gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Sum
from django.template import Context, loader
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _lazy
import commonware.log
from olympia import amo
from olympia.amo.models import ManagerBase, ModelBase, skip_cache
from olympia.access.models import Group
from olympia.amo.helpers import absolutify
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import cache_ns_key, send_mail
from olympia.addons.models import Addon, Persona
from olympia.devhub.models import ActivityLog
from olympia.editors.sql_model import RawSQLModel
from olympia.users.models import UserForeignKey, UserProfile
from olympia.versions.models import version_uploaded
user_log = commonware.log.getLogger('z.users')
class CannedResponse(ModelBase):
name = models.CharField(max_length=255)
response = models.TextField()
sort_group = models.CharField(max_length=255)
type = models.PositiveIntegerField(
choices=amo.CANNED_RESPONSE_CHOICES.items(), db_index=True, default=0)
class Meta:
db_table = 'cannedresponses'
def __unicode__(self):
return unicode(self.name)
class AddonCannedResponseManager(ManagerBase):
def get_queryset(self):
qs = super(AddonCannedResponseManager, self).get_queryset()
return qs.filter(type=amo.CANNED_RESPONSE_ADDON)
class AddonCannedResponse(CannedResponse):
objects = AddonCannedResponseManager()
class Meta:
proxy = True
class EventLog(models.Model):
type = models.CharField(max_length=60)
action = models.CharField(max_length=120)
field = models.CharField(max_length=60, blank=True)
user = models.ForeignKey(UserProfile)
changed_id = models.IntegerField()
added = models.CharField(max_length=765, blank=True)
removed = models.CharField(max_length=765, blank=True)
notes = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = u'eventlog'
@staticmethod
def new_editors():
action = amo.LOG.GROUP_USER_ADDED
group = Group.objects.get(name='Add-on Reviewers')
items = (ActivityLog.objects.for_group(group)
.filter(action=action.id)
.order_by('-created')[:5])
return [dict(user=i.arguments[1],
created=i.created)
for i in items if i.arguments[1] in group.users.all()]
def get_flags(record):
"""Return a list of tuples (indicating which flags should be displayed for
a particular add-on."""
props = (
('admin_review', 'admin-review', _lazy('Admin Review')),
('is_jetpack', 'jetpack', _lazy('Jetpack Add-on')),
('requires_restart', 'requires_restart',
_lazy('Requires Restart')),
('has_info_request', 'info', _lazy('More Information Requested')),
('has_editor_comment', 'editor', _lazy('Contains Reviewer Comment')),
('sources_provided', 'sources-provided',
_lazy('Sources provided')),
('is_webextension', 'webextension', _lazy('WebExtension')),
)
return [(cls, title) for (prop, cls, title) in props
if getattr(record, prop)]
class ViewQueue(RawSQLModel):
id = models.IntegerField()
addon_name = models.CharField(max_length=255)
addon_slug = models.CharField(max_length=30)
addon_status = models.IntegerField()
addon_type_id = models.IntegerField()
admin_review = models.BooleanField()
is_restartless = models.BooleanField()
is_jetpack = models.BooleanField()
source = models.CharField(max_length=100)
is_webextension = models.BooleanField()
latest_version = models.CharField(max_length=255)
has_info_request = models.BooleanField()
has_editor_comment = models.BooleanField()
waiting_time_days = models.IntegerField()
waiting_time_hours = models.IntegerField()
waiting_time_min = models.IntegerField()
def base_query(self):
return {
'select': SortedDict([
('id', 'addons.id'),
('addon_name', 'tr.localized_string'),
('addon_status', 'addons.status'),
('addon_type_id', 'addons.addontype_id'),
('addon_slug', 'addons.slug'),
('admin_review', 'addons.adminreview'),
('latest_version', 'versions.version'),
('has_editor_comment', 'versions.has_editor_comment'),
('has_info_request', 'versions.has_info_request'),
('is_jetpack', 'MAX(files.jetpack_version IS NOT NULL)'),
('is_restartless', 'MAX(files.no_restart)'),
('source', 'versions.source'),
('is_webextension', 'MAX(files.is_webextension)'),
('waiting_time_days',
'TIMESTAMPDIFF(DAY, MAX(versions.nomination), NOW())'),
('waiting_time_hours',
'TIMESTAMPDIFF(HOUR, MAX(versions.nomination), NOW())'),
('waiting_time_min',
'TIMESTAMPDIFF(MINUTE, MAX(versions.nomination), NOW())'),
]),
'from': [
'addons',
"""
LEFT JOIN versions ON (addons.id = versions.addon_id)
LEFT JOIN files ON (files.version_id = versions.id)
JOIN translations AS tr ON (
tr.id = addons.name
AND tr.locale = addons.defaultlocale)
"""
],
'where': [
'NOT addons.inactive', # disabled_by_user
'versions.channel = %s' % amo.RELEASE_CHANNEL_LISTED,
'files.status = %s' % amo.STATUS_AWAITING_REVIEW,
],
'group_by': 'id'}
@property
def requires_restart(self):
return not self.is_restartless
@property
def sources_provided(self):
return bool(self.source)
@property
def flags(self):
return get_flags(self)
class ViewFullReviewQueue(ViewQueue):
def base_query(self):
q = super(ViewFullReviewQueue, self).base_query()
q['where'].append('addons.status = %s' % amo.STATUS_NOMINATED)
return q
class ViewPendingQueue(ViewQueue):
def base_query(self):
q = super(ViewPendingQueue, self).base_query()
q['where'].append('addons.status = %s' % amo.STATUS_PUBLIC)
return q
class ViewUnlistedAllList(RawSQLModel):
id = models.IntegerField()
addon_name = models.CharField(max_length=255)
addon_slug = models.CharField(max_length=30)
guid = models.CharField(max_length=255)
version_date = models.DateTimeField()
_author_ids = models.CharField(max_length=255)
_author_usernames = models.CharField()
review_date = models.DateField()
review_version_num = models.CharField(max_length=255)
review_log_id = models.IntegerField()
addon_status = models.IntegerField()
latest_version = models.CharField(max_length=255)
admin_review = models.BooleanField()
is_deleted = models.BooleanField()
def base_query(self):
review_ids = ','.join([str(r) for r in amo.LOG_EDITOR_REVIEW_ACTION])
return {
'select': SortedDict([
('id', 'addons.id'),
('addon_name', 'tr.localized_string'),
('addon_status', 'addons.status'),
('addon_slug', 'addons.slug'),
('latest_version', 'versions.version'),
('guid', 'addons.guid'),
('_author_ids', 'GROUP_CONCAT(authors.user_id)'),
('_author_usernames', 'GROUP_CONCAT(users.username)'),
('admin_review', 'addons.adminreview'),
('is_deleted', 'IF (addons.status=11, true, false)'),
('version_date', 'versions.nomination'),
('review_date', 'reviewed_versions.created'),
('review_version_num', 'reviewed_versions.version'),
('review_log_id', 'reviewed_versions.log_id'),
]),
'from': [
'addons',
"""
JOIN (
SELECT MAX(id) AS latest_version, addon_id FROM versions
WHERE channel = {channel}
GROUP BY addon_id
) AS latest_version
ON latest_version.addon_id = addons.id
LEFT JOIN versions
ON (latest_version.latest_version = versions.id)
JOIN translations AS tr ON (
tr.id = addons.name AND
tr.locale = addons.defaultlocale)
LEFT JOIN addons_users AS authors
ON addons.id = authors.addon_id
LEFT JOIN users as users ON users.id = authors.user_id
LEFT JOIN (
SELECT versions.id AS id, addon_id, log.created, version,
log.id AS log_id
FROM versions
JOIN log_activity_version AS log_v ON (
log_v.version_id=versions.id)
JOIN log_activity as log ON (
log.id=log_v.activity_log_id)
WHERE log.user_id <> {task_user} AND
log.action in ({review_actions}) AND
versions.channel = {channel}
ORDER BY id desc
) AS reviewed_versions
ON reviewed_versions.addon_id = addons.id
""".format(task_user=settings.TASK_USER_ID,
review_actions=review_ids,
channel=amo.RELEASE_CHANNEL_UNLISTED),
],
'where': [
'NOT addons.inactive', # disabled_by_user
'versions.channel = %s' % amo.RELEASE_CHANNEL_UNLISTED,
"""((reviewed_versions.id = (select max(reviewed_versions.id)))
OR
(reviewed_versions.id IS NULL))
""",
'addons.status <> %s' % amo.STATUS_DISABLED
],
'group_by': 'id'}
@property
def authors(self):
ids = self._explode_concat(self._author_ids)
usernames = self._explode_concat(self._author_usernames, cast=unicode)
return list(set(zip(ids, usernames)))
class PerformanceGraph(RawSQLModel):
id = models.IntegerField()
yearmonth = models.CharField(max_length=7)
approval_created = models.DateTimeField()
user_id = models.IntegerField()
total = models.IntegerField()
def base_query(self):
request_ver = amo.LOG.REQUEST_VERSION.id
review_ids = [str(r) for r in amo.LOG_EDITOR_REVIEW_ACTION
if r != request_ver]
return {
'select': SortedDict([
('yearmonth',
"DATE_FORMAT(`log_activity`.`created`, '%%Y-%%m')"),
('approval_created', '`log_activity`.`created`'),
('user_id', '`log_activity`.`user_id`'),
('total', 'COUNT(*)')
]),
'from': [
'log_activity',
],
'where': [
'log_activity.action in (%s)' % ','.join(review_ids),
'user_id <> %s' % settings.TASK_USER_ID # No auto-approvals.
],
'group_by': 'yearmonth, user_id'
}
class EditorSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'editor_subscriptions'
def send_notification(self, version):
user_log.info('Sending addon update notice to %s for %s' %
(self.user.email, self.addon.pk))
context = Context({
'name': self.addon.name,
'url': absolutify(reverse('addons.detail', args=[self.addon.pk],
add_prefix=False)),
'number': version.version,
'review': absolutify(reverse('editors.review',
args=[self.addon.pk],
add_prefix=False)),
'SITE_URL': settings.SITE_URL,
})
# Not being localised because we don't know the editors locale.
subject = 'Mozilla Add-ons: %s Updated' % self.addon.name
template = loader.get_template('editors/emails/notify_update.ltxt')
send_mail(subject, template.render(Context(context)),
recipient_list=[self.user.email],
from_email=settings.EDITORS_EMAIL,
use_deny_list=False)
def send_notifications(signal=None, sender=None, **kw):
if sender.is_beta:
return
subscribers = sender.addon.editorsubscription_set.all()
if not subscribers:
return
for subscriber in subscribers:
subscriber.send_notification(sender)
subscriber.delete()
version_uploaded.connect(send_notifications, dispatch_uid='send_notifications')
class ReviewerScore(ModelBase):
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Addon, blank=True, null=True, related_name='+')
score = models.SmallIntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, **kwargs):
"""Return the review event type constant.
This is determined by the addon.type and the queue the addon is
currently in (which is determined from the status).
Note: We're not using addon.status because this is called after the
status has been updated by the reviewer action.
"""
queue = ''
if status == amo.STATUS_NOMINATED:
queue = 'FULL'
elif status == amo.STATUS_PUBLIC:
queue = 'UPDATE'
if (addon.type in [amo.ADDON_EXTENSION, amo.ADDON_PLUGIN,
amo.ADDON_API] and queue):
return getattr(amo, 'REVIEWED_ADDON_%s' % queue)
elif addon.type == amo.ADDON_DICT and queue:
return getattr(amo, 'REVIEWED_DICT_%s' % queue)
elif addon.type in [amo.ADDON_LPAPP, amo.ADDON_LPADDON] and queue:
return getattr(amo, 'REVIEWED_LP_%s' % queue)
elif addon.type == amo.ADDON_PERSONA:
return amo.REVIEWED_PERSONA
elif addon.type == amo.ADDON_SEARCH and queue:
return getattr(amo, 'REVIEWED_SEARCH_%s' % queue)
elif addon.type == amo.ADDON_THEME and queue:
return getattr(amo, 'REVIEWED_THEME_%s' % queue)
else:
return None
@classmethod
def award_points(cls, user, addon, status, **kwargs):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
"""
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
try:
# Add bonus to reviews greater than our limit to encourage fixing
# old reviews.
vq = ViewQueue.objects.get(
addon_slug=addon.slug,
)
if vq.waiting_time_days > amo.REVIEWED_OVERDUE_LIMIT:
days_over = vq.waiting_time_days - amo.REVIEWED_OVERDUE_LIMIT
bonus = days_over * amo.REVIEWED_OVERDUE_BONUS
score = score + bonus
except ViewQueue.DoesNotExist:
# If the object does not exist then we simply do not add a bonus
pass
if score:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s' % (
score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id, undo=False):
"""Awards points to user based on moderated review."""
event = (amo.REVIEWED_ADDON_REVIEW if not undo else
amo.REVIEWED_ADDON_REVIEW_POORLY)
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = (ReviewerScore.objects.no_cache().filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5, addon_type=None):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
if addon_type is not None:
val.filter(addon__type=addon_type)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_breakdown(cls, user):
"""Returns points broken down by addon type."""
key = cls.get_key('get_breakdown:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`,
`addons`.`addontype_id` AS `atype`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
GROUP BY `addons`.`addontype_id`
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_breakdown_since(cls, user, since):
"""
Returns points broken down by addon type since the given datetime.
"""
key = cls.get_key('get_breakdown:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`,
`addons`.`addontype_id` AS `atype`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
GROUP BY `addons`.`addontype_id`
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_query(cls, since=None, types=None, addon_type=None):
"""
Returns common SQL to leaderboard calls.
"""
query = (cls.objects
.values_list('user__id', 'user__display_name')
.annotate(total=Sum('score'))
.exclude(user__groups__name__in=('No Reviewer Incentives',
'Staff', 'Admins'))
.order_by('-total'))
if since is not None:
query = query.filter(created__gte=since)
if types is not None:
query = query.filter(note_key__in=types)
if addon_type is not None:
query = query.filter(addon__type=addon_type)
return query
@classmethod
def get_leaderboards(cls, user, days=7, types=None, addon_type=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = datetime.date.today() - datetime.timedelta(days=days)
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types,
addon_type=addon_type)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(query, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
query = cls._leaderboard_query()
scores = []
for row in query:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class EscalationQueue(ModelBase):
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'escalation_queue'
class RereviewQueueThemeManager(ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(RereviewQueueThemeManager, self).get_queryset()
if self.include_deleted:
return qs
else:
return qs.exclude(theme__addon__status=amo.STATUS_DELETED)
class RereviewQueueTheme(ModelBase):
theme = models.ForeignKey(Persona)
header = models.CharField(max_length=72, blank=True, default='')
footer = models.CharField(max_length=72, blank=True, default='')
# Holds whether this reuploaded theme is a duplicate.
dupe_persona = models.ForeignKey(Persona, null=True,
related_name='dupepersona')
# The order of those managers is very important: please read the lengthy
# comment above the Addon managers declaration/instantiation.
unfiltered = RereviewQueueThemeManager(include_deleted=True)
objects = RereviewQueueThemeManager()
class Meta:
db_table = 'rereview_queue_theme'
def __str__(self):
return str(self.id)
@property
def header_path(self):
"""Return the path to the header image."""
return self.theme._image_path(self.header or self.theme.header)
@property
def footer_path(self):
"""Return the path to the optional footer image."""
footer = self.footer or self.theme.footer
return footer and self.theme._image_path(footer) or ''
@property
def header_url(self):
"""Return the url of the header imager."""
return self.theme._image_url(self.header or self.theme.header)
@property
def footer_url(self):
"""Return the url of the optional footer image."""
footer = self.footer or self.theme.footer
return footer and self.theme._image_url(footer) or ''
class ThemeLock(ModelBase):
theme = models.OneToOneField('addons.Persona')
reviewer = UserForeignKey()
expiry = models.DateTimeField()
class Meta:
db_table = 'theme_locks'
|
|
"""Test service helpers."""
import asyncio
from collections import OrderedDict
from copy import deepcopy
import unittest
from unittest.mock import Mock, patch
import voluptuous as vol
import pytest
# To prevent circular import when running just this file
import homeassistant.components # noqa
from homeassistant import core as ha, exceptions
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ENTITY_ID
from homeassistant.setup import async_setup_component
import homeassistant.helpers.config_validation as cv
from homeassistant.auth.permissions import PolicyPermissions
from homeassistant.helpers import (
service, template, device_registry as dev_reg, entity_registry as ent_reg)
from tests.common import (
get_test_home_assistant, mock_service, mock_coro, mock_registry,
mock_device_registry)
@pytest.fixture
def mock_service_platform_call():
"""Mock service platform call."""
with patch('homeassistant.helpers.service._handle_service_platform_call',
side_effect=lambda *args: mock_coro()) as mock_call:
yield mock_call
@pytest.fixture
def mock_entities():
"""Return mock entities in an ordered dict."""
kitchen = Mock(
entity_id='light.kitchen',
available=True,
should_poll=False,
supported_features=1,
platform='test_domain',
)
living_room = Mock(
entity_id='light.living_room',
available=True,
should_poll=False,
supported_features=0,
platform='test_domain',
)
entities = OrderedDict()
entities[kitchen.entity_id] = kitchen
entities[living_room.entity_id] = living_room
return entities
class TestServiceHelpers(unittest.TestCase):
"""Test the Home Assistant service helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = mock_service(self.hass, 'test_domain', 'test_service')
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_template_service_call(self):
"""Test service call with templating."""
config = {
'service_template': '{{ \'test_domain.test_service\' }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ \'goodbye\' }}',
'data': {
'value': '{{ \'complex\' }}',
'simple': 'simple'
},
'list': ['{{ \'list\' }}', '2'],
},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
assert 'goodbye' == self.calls[0].data['hello']
assert 'complex' == self.calls[0].data['data']['value']
assert 'simple' == self.calls[0].data['data']['simple']
assert 'list' == self.calls[0].data['list'][0]
def test_passing_variables_to_templates(self):
"""Test passing variables to templates."""
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ var_data }}',
},
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
assert 'goodbye' == self.calls[0].data['hello']
def test_bad_template(self):
"""Test passing bad template."""
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ states + unknown_var }}'
}
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
assert len(self.calls) == 0
def test_split_entity_string(self):
"""Test splitting of entity string."""
service.call_from_config(self.hass, {
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer'
})
self.hass.block_till_done()
assert ['hello.world', 'sensor.beer'] == \
self.calls[-1].data.get('entity_id')
def test_not_mutate_input(self):
"""Test for immutable input."""
config = cv.SERVICE_SCHEMA({
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer',
'data': {
'hello': 1,
},
'data_template': {
'nested': {
'value': '{{ 1 + 1 }}'
}
}
})
orig = deepcopy(config)
# Only change after call is each template getting hass attached
template.attach(self.hass, orig)
service.call_from_config(self.hass, config, validate_config=False)
assert orig == config
@patch('homeassistant.helpers.service._LOGGER.error')
def test_fail_silently_if_no_service(self, mock_log):
"""Test failing if service is missing."""
service.call_from_config(self.hass, None)
assert 1 == mock_log.call_count
service.call_from_config(self.hass, {})
assert 2 == mock_log.call_count
service.call_from_config(self.hass, {
'service': 'invalid'
})
assert 3 == mock_log.call_count
async def test_extract_entity_ids(hass):
"""Test extract_entity_ids method."""
hass.states.async_set('light.Bowl', STATE_ON)
hass.states.async_set('light.Ceiling', STATE_OFF)
hass.states.async_set('light.Kitchen', STATE_OFF)
await hass.components.group.Group.async_create_group(
hass, 'test', ['light.Ceiling', 'light.Kitchen'])
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'light.Bowl'})
assert {'light.bowl'} == \
await service.async_extract_entity_ids(hass, call)
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'group.test'})
assert {'light.ceiling', 'light.kitchen'} == \
await service.async_extract_entity_ids(hass, call)
assert {'group.test'} == await service.async_extract_entity_ids(
hass, call, expand_group=False)
async def test_extract_entity_ids_from_area(hass):
"""Test extract_entity_ids method with areas."""
hass.states.async_set('light.Bowl', STATE_ON)
hass.states.async_set('light.Ceiling', STATE_OFF)
hass.states.async_set('light.Kitchen', STATE_OFF)
device_in_area = dev_reg.DeviceEntry(area_id='test-area')
device_no_area = dev_reg.DeviceEntry()
device_diff_area = dev_reg.DeviceEntry(area_id='diff-area')
mock_device_registry(hass, {
device_in_area.id: device_in_area,
device_no_area.id: device_no_area,
device_diff_area.id: device_diff_area,
})
entity_in_area = ent_reg.RegistryEntry(
entity_id='light.in_area',
unique_id='in-area-id',
platform='test',
device_id=device_in_area.id,
)
entity_no_area = ent_reg.RegistryEntry(
entity_id='light.no_area',
unique_id='no-area-id',
platform='test',
device_id=device_no_area.id,
)
entity_diff_area = ent_reg.RegistryEntry(
entity_id='light.diff_area',
unique_id='diff-area-id',
platform='test',
device_id=device_diff_area.id,
)
mock_registry(hass, {
entity_in_area.entity_id: entity_in_area,
entity_no_area.entity_id: entity_no_area,
entity_diff_area.entity_id: entity_diff_area,
})
call = ha.ServiceCall('light', 'turn_on',
{'area_id': 'test-area'})
assert {'light.in_area'} == \
await service.async_extract_entity_ids(hass, call)
call = ha.ServiceCall('light', 'turn_on',
{'area_id': ['test-area', 'diff-area']})
assert {'light.in_area', 'light.diff_area'} == \
await service.async_extract_entity_ids(hass, call)
@asyncio.coroutine
def test_async_get_all_descriptions(hass):
"""Test async_get_all_descriptions."""
group = hass.components.group
group_config = {group.DOMAIN: {}}
yield from async_setup_component(hass, group.DOMAIN, group_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert 'description' in descriptions['group']['reload']
assert 'fields' in descriptions['group']['reload']
logger = hass.components.logger
logger_config = {logger.DOMAIN: {}}
yield from async_setup_component(hass, logger.DOMAIN, logger_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 2
assert 'description' in descriptions[logger.DOMAIN]['set_level']
assert 'fields' in descriptions[logger.DOMAIN]['set_level']
async def test_call_with_required_features(hass, mock_entities):
"""Test service calls invoked only if entity has required feautres."""
test_service_mock = Mock(return_value=mock_coro())
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], test_service_mock, ha.ServiceCall('test_domain', 'test_service', {
'entity_id': 'all'
}), required_features=[1])
assert len(mock_entities) == 2
# Called once because only one of the entities had the required features
assert test_service_mock.call_count == 1
async def test_call_context_user_not_exist(hass):
"""Check we don't allow deleted users to do things."""
with pytest.raises(exceptions.UnknownUser) as err:
await service.entity_service_call(hass, [], Mock(), ha.ServiceCall(
'test_domain', 'test_service', context=ha.Context(
user_id='non-existing')))
assert err.value.context.user_id == 'non-existing'
async def test_call_context_target_all(hass, mock_service_platform_call,
mock_entities):
"""Check we only target allowed entities if targetting all."""
with patch('homeassistant.auth.AuthManager.async_get_user',
return_value=mock_coro(Mock(permissions=PolicyPermissions({
'entities': {
'entity_ids': {
'light.kitchen': True
}
}
}, None)))):
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], Mock(), ha.ServiceCall('test_domain', 'test_service',
context=ha.Context(user_id='mock-id')))
assert len(mock_service_platform_call.mock_calls) == 1
entities = mock_service_platform_call.mock_calls[0][1][2]
assert entities == [mock_entities['light.kitchen']]
async def test_call_context_target_specific(hass, mock_service_platform_call,
mock_entities):
"""Check targeting specific entities."""
with patch('homeassistant.auth.AuthManager.async_get_user',
return_value=mock_coro(Mock(permissions=PolicyPermissions({
'entities': {
'entity_ids': {
'light.kitchen': True
}
}
}, None)))):
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], Mock(), ha.ServiceCall('test_domain', 'test_service', {
'entity_id': 'light.kitchen'
}, context=ha.Context(user_id='mock-id')))
assert len(mock_service_platform_call.mock_calls) == 1
entities = mock_service_platform_call.mock_calls[0][1][2]
assert entities == [mock_entities['light.kitchen']]
async def test_call_context_target_specific_no_auth(
hass, mock_service_platform_call, mock_entities):
"""Check targeting specific entities without auth."""
with pytest.raises(exceptions.Unauthorized) as err:
with patch('homeassistant.auth.AuthManager.async_get_user',
return_value=mock_coro(Mock(
permissions=PolicyPermissions({}, None)))):
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], Mock(), ha.ServiceCall('test_domain', 'test_service', {
'entity_id': 'light.kitchen'
}, context=ha.Context(user_id='mock-id')))
assert err.value.context.user_id == 'mock-id'
assert err.value.entity_id == 'light.kitchen'
async def test_call_no_context_target_all(hass, mock_service_platform_call,
mock_entities):
"""Check we target all if no user context given."""
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], Mock(), ha.ServiceCall('test_domain', 'test_service'))
assert len(mock_service_platform_call.mock_calls) == 1
entities = mock_service_platform_call.mock_calls[0][1][2]
assert entities == list(mock_entities.values())
async def test_call_no_context_target_specific(
hass, mock_service_platform_call, mock_entities):
"""Check we can target specified entities."""
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], Mock(), ha.ServiceCall('test_domain', 'test_service', {
'entity_id': ['light.kitchen', 'light.non-existing']
}))
assert len(mock_service_platform_call.mock_calls) == 1
entities = mock_service_platform_call.mock_calls[0][1][2]
assert entities == [mock_entities['light.kitchen']]
async def test_call_with_match_all(hass, mock_service_platform_call,
mock_entities, caplog):
"""Check we only target allowed entities if targetting all."""
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], Mock(), ha.ServiceCall('test_domain', 'test_service', {
'entity_id': 'all'
}))
assert len(mock_service_platform_call.mock_calls) == 1
entities = mock_service_platform_call.mock_calls[0][1][2]
assert entities == [
mock_entities['light.kitchen'], mock_entities['light.living_room']]
assert ('Not passing an entity ID to a service to target '
'all entities is deprecated') not in caplog.text
async def test_call_with_omit_entity_id(hass, mock_service_platform_call,
mock_entities, caplog):
"""Check we only target allowed entities if targetting all."""
await service.entity_service_call(hass, [
Mock(entities=mock_entities)
], Mock(), ha.ServiceCall('test_domain', 'test_service'))
assert len(mock_service_platform_call.mock_calls) == 1
entities = mock_service_platform_call.mock_calls[0][1][2]
assert entities == [
mock_entities['light.kitchen'], mock_entities['light.living_room']]
assert ('Not passing an entity ID to a service to target '
'all entities is deprecated') in caplog.text
async def test_register_admin_service(hass, hass_read_only_user,
hass_admin_user):
"""Test the register admin service."""
calls = []
async def mock_service(call):
calls.append(call)
hass.helpers.service.async_register_admin_service(
'test', 'test', mock_service
)
hass.helpers.service.async_register_admin_service(
'test', 'test2', mock_service,
vol.Schema({vol.Required('required'): cv.boolean})
)
with pytest.raises(exceptions.UnknownUser):
await hass.services.async_call(
'test', 'test', {}, blocking=True, context=ha.Context(
user_id='non-existing'
))
assert len(calls) == 0
with pytest.raises(exceptions.Unauthorized):
await hass.services.async_call(
'test', 'test', {}, blocking=True, context=ha.Context(
user_id=hass_read_only_user.id
))
assert len(calls) == 0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
'test', 'test', {'invalid': True}, blocking=True,
context=ha.Context(user_id=hass_admin_user.id))
assert len(calls) == 0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
'test', 'test2', {}, blocking=True, context=ha.Context(
user_id=hass_admin_user.id
))
assert len(calls) == 0
await hass.services.async_call(
'test', 'test2', {'required': True}, blocking=True, context=ha.Context(
user_id=hass_admin_user.id
))
assert len(calls) == 1
assert calls[0].context.user_id == hass_admin_user.id
async def test_domain_control_not_async(hass, mock_entities):
"""Test domain verification in a service call with an unknown user."""
calls = []
def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with pytest.raises(exceptions.HomeAssistantError):
hass.helpers.service.verify_domain_control(
'test_domain')(mock_service_log)
async def test_domain_control_unknown(hass, mock_entities):
"""Test domain verification in a service call with an unknown user."""
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with patch('homeassistant.helpers.entity_registry.async_get_registry',
return_value=mock_coro(Mock(entities=mock_entities))):
protected_mock_service = hass.helpers.service.verify_domain_control(
'test_domain')(mock_service_log)
hass.services.async_register(
'test_domain', 'test_service', protected_mock_service, schema=None)
with pytest.raises(exceptions.UnknownUser):
await hass.services.async_call(
'test_domain',
'test_service', {},
blocking=True,
context=ha.Context(user_id='fake_user_id'))
assert len(calls) == 0
async def test_domain_control_unauthorized(
hass, hass_read_only_user, mock_entities):
"""Test domain verification in a service call with an unauthorized user."""
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with patch('homeassistant.helpers.entity_registry.async_get_registry',
return_value=mock_coro(Mock(entities=mock_entities))):
protected_mock_service = hass.helpers.service.verify_domain_control(
'test_domain')(mock_service_log)
hass.services.async_register(
'test_domain', 'test_service', protected_mock_service, schema=None)
with pytest.raises(exceptions.Unauthorized):
await hass.services.async_call(
'test_domain',
'test_service', {},
blocking=True,
context=ha.Context(user_id=hass_read_only_user.id))
async def test_domain_control_admin(hass, hass_admin_user, mock_entities):
"""Test domain verification in a service call with an admin user."""
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with patch('homeassistant.helpers.entity_registry.async_get_registry',
return_value=mock_coro(Mock(entities=mock_entities))):
protected_mock_service = hass.helpers.service.verify_domain_control(
'test_domain')(mock_service_log)
hass.services.async_register(
'test_domain', 'test_service', protected_mock_service, schema=None)
await hass.services.async_call(
'test_domain',
'test_service', {},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id))
assert len(calls) == 1
async def test_domain_control_no_user(hass, mock_entities):
"""Test domain verification in a service call with no user."""
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with patch('homeassistant.helpers.entity_registry.async_get_registry',
return_value=mock_coro(Mock(entities=mock_entities))):
protected_mock_service = hass.helpers.service.verify_domain_control(
'test_domain')(mock_service_log)
hass.services.async_register(
'test_domain', 'test_service', protected_mock_service, schema=None)
await hass.services.async_call(
'test_domain',
'test_service', {},
blocking=True,
context=ha.Context(user_id=None))
assert len(calls) == 1
|
|
import pandas as pd
import numpy as np
import itertools
import warnings
import sys
try:
import matplotlib.pyplot as plt
import seaborn as sns
except ImportError:
print('Importing hier_diff without matplotlib.')
import scipy.cluster.hierarchy as sch
from scipy.spatial import distance
from scipy import stats
try:
from adjustwithin import adjustnonnan
except ImportError:
print('Importing hier_diff without multiplicity adjustment package.')
__all__ = ['testHClusters',
'getClusterMembers',
'plotHClustProportions',
'testCondition',
'testSubset']
def testHClusters(cntsDf, members, cols=None, min_count=5):
"""Test each cluster for disproportionate representation of TCRs
from a set of conditions (e.g. stimulations). Test is based on the Chi2 statistic,
testing the observed proportions vs. expected proportions of TCRs
that are in vs. not-in a cluster (i.e. 2 x N_cols table).
Parameters
----------
cntsDf : pd.DataFrame [TCRs, conditions]
Counts table of TCRs (rows) that have been observed in specific conditions (columns)
Importantly the integer indices of the rows must match those used to define
clusters in members.
members : dict of lists
Each element has a cluster ID (key) and a list of cluster members (indices into cntsDf)
Can be generated from getClusterMembers with the result from calling sch.linkage (Z).
Cluster need not be mutually exclusive, and are not when using hierarchical clustering.
cols : list
Columns in cntsDf to use as conditions (default: all columns of cntsDf)
min_count : int
Required minimum number of member TCRs in a cluster to run the test.
Returns
-------
resDf : pd.DataFrame [clusters, result columns]
Results from the tests with observed/expected counts and frequencies, Chi2 statistics,
p-values, FWER and FDR adjusted p-values."""
if cols is None:
cols = cntsDf.columns
tot = cntsDf.sum()
Ncells = tot.sum()
uCDR3 = list(cntsDf.index)
results = []
for cid, m in members.items():
notM = [i for i in range(cntsDf.shape[0]) if not i in m]
obs = np.concatenate((np.sum(cntsDf[cols].values[m, :], axis=0, keepdims=True),
np.sum(cntsDf[cols].values[notM, :], axis=0, keepdims=True)), axis=0)
if np.sum(obs, axis=1)[0] > min_count:
"""Inner product of the marginal totals along both axes, divided by total cells"""
expect = np.dot(np.sum(obs, keepdims=True, axis=1),
np.sum(obs, keepdims=True, axis=0)) / Ncells
with warnings.catch_warnings():
warnings.simplefilter('ignore')
chi2 = (obs - expect)**2 / expect
sum_chi2 = np.sum(chi2)
degf = len(cols) - 1
pvalue = 1 - stats.chi2.cdf(sum_chi2, degf)
results.append({'cid':cid,
'chi2':sum_chi2,
'pvalue':pvalue,
'observed':tuple(obs[0, :]),
'observed_prop':(obs / np.sum(obs, axis=0))[0, :],
'expected':tuple(expect[0, :]),
'expected_prop':(expect / np.sum(obs, axis=0))[0, :],
'members':tuple(m),
'labels':cols})
else:
results.append({'cid':cid,
'chi2':np.nan,
'pvalue':np.nan,
'observed':tuple(obs[0, :]),
'observed_prop': (obs / np.sum(obs, axis=0))[0, :],
'expected':(np.nan, )*len(cols),
'expected_prop': (np.nan, )*len(cols),
'members':tuple(m),
'labels':cols})
resDf = pd.DataFrame(results)
if 'adjustwithin' in sys.modules:
resDf.loc[:, 'FWER-pvalue'] = adjustnonnan(resDf['pvalue'], method='holm')
resDf.loc[:, 'FDR-qvalue'] = adjustnonnan(resDf['pvalue'], method='fdr_bh')
return resDf.set_index('cid')
def getClusterMembers(Z):
"""Generate dict of lists where each key is a cluster ID from the results
of linkage-based hierarchical clustering with scipy.cluster.hierarchy.linkage (Z)
Parameters
----------
Z : linkage matrix [clusters, 4]
Returns
-------
members : dict of lists
Each element has a cluster ID (key) and a list of
cluster members (indices into the original data matrix)"""
clusters = {}
for i, merge in enumerate(Z):
cid = 1 + i + Z.shape[0]
clusters[cid] = [merge[0], merge[1]]
def _getIndices(clusters, i):
if i <= Z.shape[0]:
return [int(i)]
else:
return _getIndices(clusters, clusters[i][0]) + _getIndices(clusters, clusters[i][1])
members = {i:_getIndices(clusters, i) for i in range(Z.shape[0] + 1, max(clusters.keys()) + 1)}
return members
def plotHClustProportions(figh, Z, resDf, alpha_col='pvalue', alpha=0.05, colors=None, ann='N', xLim=None, maxY=None, min_count=20):
"""Plot tree of linkage-based hierarchical clustering, with nodes colored with stacked bars
representing proportion of cluster members associated with specific conditions. Nodes also optionally
annotated with pvalue, number of members or cluster ID.
Parameters
----------
figh : mpl Figure() handle
Z : linkage matrix
Result of calling sch.linkage on a compressed pair-wise distance matrix
resDf : pd.DataFrame
Result from calling testHClusters, with observed/frequencies and p-values for each node
alpha_col : str
Column in resDf to use for 'alpha' annotation
alpha : float
Threshold for plotting the stacked bars and annotation
colors : tuple of valid colors
Used for stacked bars of conditions at each node
labels : list of condition labels
Matched to tuples of colors and frequencies in resDf
ann : str
Indicates how nodes should be annotated: N, alpha, CID supported
xLim : tuple
Apply x-lims after plotting to focus on particular part of the tree"""
nCategories = len(resDf['observed'].iloc[0])
if colors is None:
colors = sns.color_palette('Set1', n_colors=nCategories)
labels = resDf['labels'].iloc[0]
dend = sch.dendrogram(Z, no_plot=True,
color_threshold=None,
link_color_func=lambda lid: hex(lid),
above_threshold_color='FFFFF')
figh.clf()
axh = plt.axes((0.05, 0.07, 0.8, 0.8), facecolor='w')
lowestY = None
annotateCount = 0
for xx, yy, hex_cid in zip(dend['icoord'], dend['dcoord'], dend['color_list']):
cid = int(hex_cid, 16)
xx = np.array(xx) / 10
axh.plot(xx, yy, zorder=1, lw=0.5, color='k', alpha=1)
N = np.sum(resDf.loc[cid, 'observed'])
if alpha is None or resDf.loc[cid, alpha_col] <= alpha and N > min_count:
obs = np.asarray(resDf.loc[cid, 'observed_prop'])
obs = obs / np.sum(obs)
L = (xx[2] - xx[1])
xvec = L * np.concatenate(([0.], obs, [1.]))
curX = xx[1]
for i in range(len(obs)):
c = colors[i]
axh.plot([curX, curX + L*obs[i]],
yy[1:3],
color=c,
lw=10,
solid_capstyle='butt')
curX += L*obs[i]
if ann == 'N':
s = '%1.0f' % N
elif ann == 'CID':
s = cid
elif ann == 'alpha':
if resDf.loc[cid, alpha_col] < 0.001:
s = '< 0.001'
else:
s = '%1.3f' % resDf.loc[cid, alpha_col]
if not ann == '':# and annotateCount < annC:
xy = (xx[1] + L/2, yy[1])
# print(s,np.round(xy[0]), np.round(xy[1]))
annotateCount += 1
axh.annotate(s,
xy=xy,
size='x-small',
horizontalalignment='center',
verticalalignment='center')
if lowestY is None or yy[1] < lowestY:
lowestY = yy[1]
yl = axh.get_ylim()
if not lowestY is None:
yl0 = 0.9*lowestY
else:
yl0 = yl[0]
if not maxY is None:
yl1 = maxY
else:
yl1 = yl[1]
axh.set_ylim((yl0, yl1))
axh.set_yticks(())
if not xLim is None:
if xLim[1] is None:
xl1 = axh.get_xlim()[1]
xLim = (xLim[0], xl1)
axh.set_xlim(xLim)
else:
xLim = axh.get_xlim()
xt = [x for x in range(0, Z.shape[0]) if x <= xLim[1] and x>= xLim[0]]
xt = xt[::len(xt) // 10]
# xtl = [x//10 for x in xt]
axh.set_xticks(xt)
# axh.set_xticklabels(xtl)
legh = axh.legend([plt.Rectangle((0,0), 1, 1, color=c) for c in colors],
labels,
loc='upper left', bbox_to_anchor=(1, 1))
def testCondition(df, indexCol, dmatDf, gbCol, gbValues=None, countCol='Cells', min_count=3):
"""Use hierarchical clustering to cluster data in df based on unique pair-wise distances
in dmatDf. Then test clusters for disproportionate association of members with a condition
indicated in gbCol.
Parameters
----------
df : pd.DataFrame [TCRs, metadata]
Contains freqeuncy data for TCRs in longform.
May be a subset of the larger dataset that was used for clustering.
indexCol : str
Column to use as the index for individual TCRs
dmatDf : pd.DataFrame [unique indices, unique indices]
Contains pairwise distances among all unique values in the indexCol of df
gbCol : str
Column of metadata in df containing conditions for testing
gbValues : list
List of values relevant for testing. Can be fewer than all values in gbCol to ignore
irrelevant conditions.
countCol : str
Column containing the integer counts for testing
min_count : int
Required minimum number of member TCRs in a cluster to run the test."""
if gbValues is None:
gbValues = sorted(df[gbCol].unique())
cnts = df.groupby([indexCol, gbCol])[countCol].agg(np.sum).unstack(gbCol, fill_value=0)[gbValues]
uIndices = list(df[indexCol].dropna().unique())
dmat = dmatDf.loc[:, uIndices].loc[uIndices, :]
compressedDmat = distance.squareform(dmat.values)
Z = sch.linkage(compressedDmat, method='complete')
members = getClusterMembers(Z)
resDf = testHClusters(cnts, members, gbValues, min_count=min_count)
return Z, resDf, np.array(uIndices)
def testSubset(df, fullIndex, indexCol, members, gbCol='Stimulus', gbValues=None, countCol='Cells', min_count=7, nsamps=None, rseed=110820):
"""Test clusters for disproportionate association of members with a condition indicated in gbCol.
Flexible for testing a subset of the data that was used for clustering
(and which is represented in members). This is helpful when the clustering is more accurate with the
larger dataset, but a questions is asked of only a subset of the data.
Permutation-based testing has been indistinguisable from analytic Chi2-based testing in preliminary tests.
Parameters
----------
df : pd.DataFrame [TCRs, metadata]
Contains freqeuncy data for TCRs in longform.
May be a subset of the larger dataset that was used for clustering.
fullIndex : list
List of all unique values of the indexCol in the whole dataset.
Order of values must match the integer indices in members.
indexCol : str
Column to use as the index for individual TCRs
members : dict of lists
Each element has a cluster ID (key) and a list of cluster members (indices into cntsDf)
Can be generated from getClusterMembers with the result from calling sch.linkage (Z).
Cluster need not be mutually exclusive, and are not when using hierarchical clustering.
gbCol : str
Column of metadata containing conditions for testing
gbValues : list
List of values relevant for testing. Can be fewer than all values in gbCol to ignore
irrelevant conditions.
countCol : str
Column containing the integer counts for testing
min_count : int
Required minimum number of member TCRs in a cluster to run the test.
nsamps : int
Number of permutations for permutation-based testing
rseed : int
Random numer seed for permutation-based testing"""
if gbValues is None:
gbValues = sorted(df[gbCol].unique())
cnts = df.groupby([indexCol, gbCol])[countCol].agg(np.sum).unstack(gbCol, fill_value=0)[gbValues]
cnts = cnts.reindex(fullIndex, axis=0, fill_value=0)
resDf = testHClusters(cnts, members, gbValues, min_count=min_count)
if not nsamps is None:
"""Preliminarily, permutation-based p-values have correlated perfectly
with the analytic p-values"""
np.random.seed(rseed)
rtmp = df.copy()
rchi2 = np.zeros((resDf.shape[0], nsamps))
rpvalue = np.zeros((resDf.shape[0], nsamps))
for sampi in range(nsamps):
rtmp.loc[:, gbCol] = rtmp[gbCol].values[np.random.permutation(rtmp.shape[0])]
rcnts = rtmp.groupby([indexCol, gbCol])['Cells'].agg(np.sum).unstack(gbCol, fill_value=0)
rcnts = rcnts.reindex(fullIndex, axis=0, fill_value=0)
rres = testHClusters(rcnts, members, gbValues, min_count=min_count)
rchi2[:, sampi] = rres['chi2']
rpvalue[:, sampi] = rres['pvalue']
ppvalue = ((rpvalue <= resDf['pvalue'].values[:, None]).sum(axis=1) + 1) / (nsamps + 1)
pchi2 = ((rchi2 <= resDf['chi2'].values[:, None]).sum(axis=1) + 1) / (nsamps + 1)
ppvalue[np.isnan(resDf['chi2'].values)] = np.nan
pchi2[np.isnan(resDf['chi2'].values)] = np.nan
resDf = resDf.assign(**{'Perm P-pvalue':ppvalue, 'Perm Chi2-pvalue':pchi2})
return resDf
|
|
from core.himesis import Himesis
import uuid
class HState2CProcDef(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule State2CProcDef.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HState2CProcDef, self).__init__(name='HState2CProcDef', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """State2CProcDef"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State2CProcDef')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class State() node
self.add_node()
self.vs[3]["mm__"] = """State"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class State()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Transition() node
self.add_node()
self.vs[5]["mm__"] = """Transition"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class Transition()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class EntryPoint() node
self.add_node()
self.vs[7]["mm__"] = """EntryPoint"""
self.vs[7]["attr1"] = """1"""
# match_contains node for class EntryPoint()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# match class StateMachine() node
self.add_node()
self.vs[9]["mm__"] = """StateMachine"""
self.vs[9]["attr1"] = """1"""
# match_contains node for class StateMachine()
self.add_node()
self.vs[10]["mm__"] = """match_contains"""
# apply class LocalDef() node
self.add_node()
self.vs[11]["mm__"] = """LocalDef"""
self.vs[11]["attr1"] = """1"""
# apply_contains node for class LocalDef()
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
# apply class ProcDef() node
self.add_node()
self.vs[13]["mm__"] = """ProcDef"""
self.vs[13]["attr1"] = """1"""
# apply_contains node for class ProcDef()
self.add_node()
self.vs[14]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[15]["mm__"] = """Name"""
self.vs[15]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[16]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[17]["mm__"] = """Name"""
self.vs[17]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[18]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[19]["mm__"] = """Name"""
self.vs[19]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[20]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[21]["mm__"] = """Name"""
self.vs[21]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[22]["mm__"] = """apply_contains"""
# apply class ConditionSet() node
self.add_node()
self.vs[23]["mm__"] = """ConditionSet"""
self.vs[23]["attr1"] = """1"""
# apply_contains node for class ConditionSet()
self.add_node()
self.vs[24]["mm__"] = """apply_contains"""
# apply class Inst() node
self.add_node()
self.vs[25]["mm__"] = """Inst"""
self.vs[25]["attr1"] = """1"""
# apply_contains node for class Inst()
self.add_node()
self.vs[26]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[27]["mm__"] = """Name"""
self.vs[27]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[28]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[29]["mm__"] = """Name"""
self.vs[29]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[30]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[31]["mm__"] = """Name"""
self.vs[31]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[32]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[33]["mm__"] = """Name"""
self.vs[33]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[34]["mm__"] = """apply_contains"""
# match association State--initialTransition-->Transition node
self.add_node()
self.vs[35]["attr1"] = """initialTransition"""
self.vs[35]["mm__"] = """directLink_S"""
# match association Transition--dest-->EntryPoint node
self.add_node()
self.vs[36]["attr1"] = """dest"""
self.vs[36]["mm__"] = """directLink_S"""
# match association EntryPoint--owningStateMachine-->StateMachine node
self.add_node()
self.vs[37]["attr1"] = """owningStateMachine"""
self.vs[37]["mm__"] = """directLink_S"""
# apply association LocalDef--def-->ProcDef node
self.add_node()
self.vs[38]["attr1"] = """def"""
self.vs[38]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[39]["attr1"] = """channelNames"""
self.vs[39]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[40]["attr1"] = """channelNames"""
self.vs[40]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[41]["attr1"] = """channelNames"""
self.vs[41]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[42]["attr1"] = """channelNames"""
self.vs[42]["mm__"] = """directLink_T"""
# apply association ProcDef--p-->ConditionSet node
self.add_node()
self.vs[43]["attr1"] = """p"""
self.vs[43]["mm__"] = """directLink_T"""
# apply association ConditionSet--alternative-->Inst node
self.add_node()
self.vs[44]["attr1"] = """alternative"""
self.vs[44]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[45]["attr1"] = """channelNames"""
self.vs[45]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[46]["attr1"] = """channelNames"""
self.vs[46]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[47]["attr1"] = """channelNames"""
self.vs[47]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[48]["attr1"] = """channelNames"""
self.vs[48]["mm__"] = """directLink_T"""
# backward association State---->LocalDef node
self.add_node()
self.vs[49]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class State()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Transition()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class EntryPoint()
(0,10), # matchmodel -> match_contains
(10,9), # match_contains -> match_class StateMachine()
(1,12), # applymodel -> apply_contains
(12,11), # apply_contains -> apply_class LocalDef()
(1,14), # applymodel -> apply_contains
(14,13), # apply_contains -> apply_class ProcDef()
(1,16), # applymodel -> apply_contains
(16,15), # apply_contains -> apply_class Name()
(1,18), # applymodel -> apply_contains
(18,17), # apply_contains -> apply_class Name()
(1,20), # applymodel -> apply_contains
(20,19), # apply_contains -> apply_class Name()
(1,22), # applymodel -> apply_contains
(22,21), # apply_contains -> apply_class Name()
(1,24), # applymodel -> apply_contains
(24,23), # apply_contains -> apply_class ConditionSet()
(1,26), # applymodel -> apply_contains
(26,25), # apply_contains -> apply_class Inst()
(1,28), # applymodel -> apply_contains
(28,27), # apply_contains -> apply_class Name()
(1,30), # applymodel -> apply_contains
(30,29), # apply_contains -> apply_class Name()
(1,32), # applymodel -> apply_contains
(32,31), # apply_contains -> apply_class Name()
(1,34), # applymodel -> apply_contains
(34,33), # apply_contains -> apply_class Name()
(3,35), # match_class State() -> association initialTransition
(35,5), # association initialTransition -> match_class Transition()
(5,36), # match_class Transition() -> association dest
(36,7), # association dest -> match_class EntryPoint()
(7,37), # match_class EntryPoint() -> association owningStateMachine
(37,9), # association owningStateMachine -> match_class StateMachine()
(11,38), # apply_class LocalDef() -> association def
(38,13), # association def -> apply_class ProcDef()
(13,39), # apply_class ProcDef() -> association channelNames
(39,15), # association channelNames -> apply_class Name()
(13,40), # apply_class ProcDef() -> association channelNames
(40,17), # association channelNames -> apply_class Name()
(13,41), # apply_class ProcDef() -> association channelNames
(41,19), # association channelNames -> apply_class Name()
(13,42), # apply_class ProcDef() -> association channelNames
(42,21), # association channelNames -> apply_class Name()
(13,43), # apply_class ProcDef() -> association p
(43,23), # association p -> apply_class ConditionSet()
(23,44), # apply_class ConditionSet() -> association alternative
(44,25), # association alternative -> apply_class Inst()
(25,45), # apply_class Inst() -> association channelNames
(45,27), # association channelNames -> apply_class Name()
(25,46), # apply_class Inst() -> association channelNames
(46,29), # association channelNames -> apply_class Name()
(25,47), # apply_class Inst() -> association channelNames
(47,31), # association channelNames -> apply_class Name()
(25,48), # apply_class Inst() -> association channelNames
(48,33), # association channelNames -> apply_class Name()
(11,49), # apply_class LocalDef() -> backward_association
(49,3), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((3,'isComposite'),('constant','true')), ((11,'__ApplyAttribute'),('constant','localdefcompstate')), ((13,'name'),('constant','C')), ((15,'literal'),('constant','exit')), ((17,'literal'),('constant','exack')), ((19,'literal'),('constant','enp')), ((21,'literal'),('constant','sh')), ((23,'__ApplyAttribute'),('constant','condsetcompstate')), ((25,'name'),('concat',(('constant','S'),(9,'name')))), ((27,'literal'),('constant','exit_in')), ((29,'literal'),('constant','exack_in')), ((31,'literal'),('concat',(('constant','A'),('concat',((7,'name'),('constant','A')))))), ((33,'literal'),('constant','sh_in')), ]
|
|
"""
A python class to represent a single comic, be it file or folder of images
"""
"""
Copyright 2012-2014 Anthony Beville
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import zipfile
import os
import struct
import sys
import tempfile
import subprocess
import platform
import locale
from natsort import natsorted
if platform.system() == "Windows":
import _subprocess
import time
import StringIO
try:
import Image
pil_available = True
except ImportError:
pil_available = False
sys.path.insert(0, os.path.abspath(".") )
import UnRAR2
from UnRAR2.rar_exceptions import *
#from settings import ComicTaggerSettings
from comicinfoxml import ComicInfoXml
from comicbookinfo import ComicBookInfo
from comet import CoMet
from genericmetadata import GenericMetadata, PageType
from filenameparser import FileNameParser
class MetaDataStyle:
CBI = 0
CIX = 1
COMET = 2
name = [ 'ComicBookLover', 'ComicRack', 'CoMet' ]
class ZipArchiver:
def __init__( self, path ):
self.path = path
def getArchiveComment( self ):
zf = zipfile.ZipFile( self.path, 'r' )
comment = zf.comment
zf.close()
return comment
def setArchiveComment( self, comment ):
return self.writeZipComment( self.path, comment )
def readArchiveFile( self, archive_file ):
data = ""
zf = zipfile.ZipFile( self.path, 'r' )
try:
data = zf.read( archive_file )
except zipfile.BadZipfile as e:
print >> sys.stderr, u"bad zipfile [{0}]: {1} :: {2}".format(e, self.path, archive_file)
zf.close()
raise IOError
except Exception as e:
zf.close()
print >> sys.stderr, u"bad zipfile [{0}]: {1} :: {2}".format(e, self.path, archive_file)
raise IOError
finally:
zf.close()
return data
def removeArchiveFile( self, archive_file ):
try:
self.rebuildZipFile( [ archive_file ] )
except:
return False
else:
return True
def writeArchiveFile( self, archive_file, data ):
# At the moment, no other option but to rebuild the whole
# zip archive w/o the indicated file. Very sucky, but maybe
# another solution can be found
try:
self.rebuildZipFile( [ archive_file ] )
#now just add the archive file as a new one
zf = zipfile.ZipFile(self.path, mode='a', compression=zipfile.ZIP_DEFLATED )
zf.writestr( archive_file, data )
zf.close()
return True
except:
return False
def getArchiveFilenameList( self ):
try:
zf = zipfile.ZipFile( self.path, 'r' )
namelist = zf.namelist()
zf.close()
return namelist
except Exception as e:
print >> sys.stderr, u"Unable to get zipfile list [{0}]: {1}".format(e, self.path)
return []
# zip helper func
def rebuildZipFile( self, exclude_list ):
# this recompresses the zip archive, without the files in the exclude_list
#print ">> sys.stderr, Rebuilding zip {0} without {1}".format( self.path, exclude_list )
# generate temp file
tmp_fd, tmp_name = tempfile.mkstemp( dir=os.path.dirname(self.path) )
os.close( tmp_fd )
zin = zipfile.ZipFile (self.path, 'r')
zout = zipfile.ZipFile (tmp_name, 'w')
for item in zin.infolist():
buffer = zin.read(item.filename)
if ( item.filename not in exclude_list ):
zout.writestr(item, buffer)
#preserve the old comment
zout.comment = zin.comment
zout.close()
zin.close()
# replace with the new file
os.remove( self.path )
os.rename( tmp_name, self.path )
def writeZipComment( self, filename, comment ):
"""
This is a custom function for writing a comment to a zip file,
since the built-in one doesn't seem to work on Windows and Mac OS/X
Fortunately, the zip comment is at the end of the file, and it's
easy to manipulate. See this website for more info:
see: http://en.wikipedia.org/wiki/Zip_(file_format)#Structure
"""
#get file size
statinfo = os.stat(filename)
file_length = statinfo.st_size
try:
fo = open(filename, "r+b")
#the starting position, relative to EOF
pos = -4
found = False
value = bytearray()
# walk backwards to find the "End of Central Directory" record
while ( not found ) and ( -pos != file_length ):
# seek, relative to EOF
fo.seek( pos, 2)
value = fo.read( 4 )
#look for the end of central directory signature
if bytearray(value) == bytearray([ 0x50, 0x4b, 0x05, 0x06 ]):
found = True
else:
# not found, step back another byte
pos = pos - 1
#print pos,"{1} int: {0:x}".format(bytearray(value)[0], value)
if found:
# now skip forward 20 bytes to the comment length word
pos += 20
fo.seek( pos, 2)
# Pack the length of the comment string
format = "H" # one 2-byte integer
comment_length = struct.pack(format, len(comment)) # pack integer in a binary string
# write out the length
fo.write( comment_length )
fo.seek( pos+2, 2)
# write out the comment itself
fo.write( comment )
fo.truncate()
fo.close()
else:
raise Exception('Failed to write comment to zip file!')
except:
return False
else:
return True
def copyFromArchive( self, otherArchive ):
# Replace the current zip with one copied from another archive
try:
zout = zipfile.ZipFile (self.path, 'w')
for fname in otherArchive.getArchiveFilenameList():
data = otherArchive.readArchiveFile( fname )
if data is not None:
zout.writestr( fname, data )
zout.close()
#preserve the old comment
comment = otherArchive.getArchiveComment()
if comment is not None:
if not self.writeZipComment( self.path, comment ):
return False
except Exception as e:
print >> sys.stderr, u"Error while copying to {0}: {1}".format(self.path, e)
return False
else:
return True
#------------------------------------------
# RAR implementation
class RarArchiver:
devnull = None
def __init__( self, path, rar_exe_path ):
self.path = path
self.rar_exe_path = rar_exe_path
if RarArchiver.devnull is None:
RarArchiver.devnull = open(os.devnull, "w")
# windows only, keeps the cmd.exe from popping up
if platform.system() == "Windows":
self.startupinfo = subprocess.STARTUPINFO()
self.startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
else:
self.startupinfo = None
def __del__(self):
#RarArchiver.devnull.close()
pass
def getArchiveComment( self ):
rarc = self.getRARObj()
return rarc.comment
def setArchiveComment( self, comment ):
if self.rar_exe_path is not None:
try:
# write comment to temp file
tmp_fd, tmp_name = tempfile.mkstemp()
f = os.fdopen(tmp_fd, 'w+b')
f.write( comment )
f.close()
working_dir = os.path.dirname( os.path.abspath( self.path ) )
# use external program to write comment to Rar archive
subprocess.call([self.rar_exe_path, 'c', '-w' + working_dir , '-c-', '-z' + tmp_name, self.path],
startupinfo=self.startupinfo,
stdout=RarArchiver.devnull)
if platform.system() == "Darwin":
time.sleep(1)
os.remove( tmp_name)
except:
return False
else:
return True
else:
return False
def readArchiveFile( self, archive_file ):
# Make sure to escape brackets, since some funky stuff is going on
# underneath with "fnmatch"
archive_file = archive_file.replace("[", '[[]')
entries = []
rarc = self.getRARObj()
tries = 0
while tries < 7:
try:
tries = tries+1
entries = rarc.read_files( archive_file )
if entries[0][0].size != len(entries[0][1]):
print >> sys.stderr, u"readArchiveFile(): [file is not expected size: {0} vs {1}] {2}:{3} [attempt # {4}]".format(
entries[0][0].size,len(entries[0][1]), self.path, archive_file, tries)
continue
except (OSError, IOError) as e:
print >> sys.stderr, u"readArchiveFile(): [{0}] {1}:{2} attempt#{3}".format(str(e), self.path, archive_file, tries)
time.sleep(1)
except Exception as e:
print >> sys.stderr, u"Unexpected exception in readArchiveFile(): [{0}] for {1}:{2} attempt#{3}".format(str(e), self.path, archive_file, tries)
break
else:
#Success"
#entries is a list of of tuples: ( rarinfo, filedata)
if tries > 1:
print >> sys.stderr, u"Attempted read_files() {0} times".format(tries)
if (len(entries) == 1):
return entries[0][1]
else:
raise IOError
raise IOError
def writeArchiveFile( self, archive_file, data ):
if self.rar_exe_path is not None:
try:
tmp_folder = tempfile.mkdtemp()
tmp_file = os.path.join( tmp_folder, archive_file )
working_dir = os.path.dirname( os.path.abspath( self.path ) )
# TODO: will this break if 'archive_file' is in a subfolder. i.e. "foo/bar.txt"
# will need to create the subfolder above, I guess...
f = open(tmp_file, 'w')
f.write( data )
f.close()
# use external program to write file to Rar archive
subprocess.call([self.rar_exe_path, 'a', '-w' + working_dir ,'-c-', '-ep', self.path, tmp_file],
startupinfo=self.startupinfo,
stdout=RarArchiver.devnull)
if platform.system() == "Darwin":
time.sleep(1)
os.remove( tmp_file)
os.rmdir( tmp_folder)
except:
return False
else:
return True
else:
return False
def removeArchiveFile( self, archive_file ):
if self.rar_exe_path is not None:
try:
# use external program to remove file from Rar archive
subprocess.call([self.rar_exe_path, 'd','-c-', self.path, archive_file],
startupinfo=self.startupinfo,
stdout=RarArchiver.devnull)
if platform.system() == "Darwin":
time.sleep(1)
except:
return False
else:
return True
else:
return False
def getArchiveFilenameList( self ):
rarc = self.getRARObj()
#namelist = [ item.filename for item in rarc.infolist() ]
#return namelist
tries = 0
while tries < 7:
try:
tries = tries+1
#namelist = [ item.filename for item in rarc.infolist() ]
namelist = []
for item in rarc.infolist():
if item.size != 0:
namelist.append( item.filename )
except (OSError, IOError) as e:
print >> sys.stderr, u"getArchiveFilenameList(): [{0}] {1} attempt#{2}".format(str(e), self.path, tries)
time.sleep(1)
else:
#Success"
return namelist
raise e
def getRARObj( self ):
tries = 0
while tries < 7:
try:
tries = tries+1
rarc = UnRAR2.RarFile( self.path )
except (OSError, IOError) as e:
print >> sys.stderr, u"getRARObj(): [{0}] {1} attempt#{2}".format(str(e), self.path, tries)
time.sleep(1)
else:
#Success"
return rarc
raise e
#------------------------------------------
# Folder implementation
class FolderArchiver:
def __init__( self, path ):
self.path = path
self.comment_file_name = "ComicTaggerFolderComment.txt"
def getArchiveComment( self ):
return self.readArchiveFile( self.comment_file_name )
def setArchiveComment( self, comment ):
return self.writeArchiveFile( self.comment_file_name, comment )
def readArchiveFile( self, archive_file ):
data = ""
fname = os.path.join( self.path, archive_file )
try:
with open( fname, 'rb' ) as f:
data = f.read()
f.close()
except IOError as e:
pass
return data
def writeArchiveFile( self, archive_file, data ):
fname = os.path.join( self.path, archive_file )
try:
with open(fname, 'w+') as f:
f.write( data )
f.close()
except:
return False
else:
return True
def removeArchiveFile( self, archive_file ):
fname = os.path.join( self.path, archive_file )
try:
os.remove( fname )
except:
return False
else:
return True
def getArchiveFilenameList( self ):
return self.listFiles( self.path )
def listFiles( self, folder ):
itemlist = list()
for item in os.listdir( folder ):
itemlist.append( item )
if os.path.isdir( item ):
itemlist.extend( self.listFiles( os.path.join( folder, item ) ))
return itemlist
#------------------------------------------
# Unknown implementation
class UnknownArchiver:
def __init__( self, path ):
self.path = path
def getArchiveComment( self ):
return ""
def setArchiveComment( self, comment ):
return False
def readArchiveFile( self ):
return ""
def writeArchiveFile( self, archive_file, data ):
return False
def removeArchiveFile( self, archive_file ):
return False
def getArchiveFilenameList( self ):
return []
#------------------------------------------------------------------
class ComicArchive:
logo_data = None
class ArchiveType:
Zip, Rar, Folder, Unknown = range(4)
def __init__( self, path, rar_exe_path=None, default_image_path=None ):
self.path = path
self.rar_exe_path = rar_exe_path
self.ci_xml_filename = 'ComicInfo.xml'
self.comet_default_filename = 'CoMet.xml'
self.resetCache()
self.default_image_path = default_image_path
# Use file extension to decide which archive test we do first
ext = os.path.splitext(path)[1].lower()
self.archive_type = self.ArchiveType.Unknown
self.archiver = UnknownArchiver( self.path )
if ext == ".cbr" or ext == ".rar":
if self.rarTest():
self.archive_type = self.ArchiveType.Rar
self.archiver = RarArchiver( self.path, rar_exe_path=self.rar_exe_path )
elif self.zipTest():
self.archive_type = self.ArchiveType.Zip
self.archiver = ZipArchiver( self.path )
else:
if self.zipTest():
self.archive_type = self.ArchiveType.Zip
self.archiver = ZipArchiver( self.path )
elif self.rarTest():
self.archive_type = self.ArchiveType.Rar
self.archiver = RarArchiver( self.path, rar_exe_path=self.rar_exe_path )
if ComicArchive.logo_data is None:
#fname = ComicTaggerSettings.getGraphic('nocover.png')
fname = self.default_image_path
with open(fname, 'rb') as fd:
ComicArchive.logo_data = fd.read()
# Clears the cached data
def resetCache( self ):
self.has_cix = None
self.has_cbi = None
self.has_comet = None
self.comet_filename = None
self.page_count = None
self.page_list = None
self.cix_md = None
self.cbi_md = None
self.comet_md = None
def loadCache( self, style_list ):
for style in style_list:
self.readMetadata(style)
def rename( self, path ):
self.path = path
self.archiver.path = path
def zipTest( self ):
return zipfile.is_zipfile( self.path )
def rarTest( self ):
try:
rarc = UnRAR2.RarFile( self.path )
except: # InvalidRARArchive:
return False
else:
return True
def isZip( self ):
return self.archive_type == self.ArchiveType.Zip
def isRar( self ):
return self.archive_type == self.ArchiveType.Rar
def isFolder( self ):
return self.archive_type == self.ArchiveType.Folder
def isWritable( self, check_rar_status=True ):
if self.archive_type == self.ArchiveType.Unknown :
return False
elif check_rar_status and self.isRar() and self.rar_exe_path is None:
return False
elif not os.access(self.path, os.W_OK):
return False
elif ((self.archive_type != self.ArchiveType.Folder) and
(not os.access( os.path.dirname( os.path.abspath(self.path)), os.W_OK ))):
return False
return True
def isWritableForStyle( self, data_style ):
if self.isRar() and data_style == MetaDataStyle.CBI:
return False
return self.isWritable()
def seemsToBeAComicArchive( self ):
# Do we even care about extensions??
ext = os.path.splitext(self.path)[1].lower()
if (
( self.isZip() or self.isRar() ) #or self.isFolder() )
and
( self.getNumberOfPages() > 0)
):
return True
else:
return False
def readMetadata( self, style ):
if style == MetaDataStyle.CIX:
return self.readCIX()
elif style == MetaDataStyle.CBI:
return self.readCBI()
elif style == MetaDataStyle.COMET:
return self.readCoMet()
else:
return GenericMetadata()
def writeMetadata( self, metadata, style ):
retcode = None
if style == MetaDataStyle.CIX:
retcode = self.writeCIX( metadata )
elif style == MetaDataStyle.CBI:
retcode = self.writeCBI( metadata )
elif style == MetaDataStyle.COMET:
retcode = self.writeCoMet( metadata )
return retcode
def hasMetadata( self, style ):
if style == MetaDataStyle.CIX:
return self.hasCIX()
elif style == MetaDataStyle.CBI:
return self.hasCBI()
elif style == MetaDataStyle.COMET:
return self.hasCoMet()
else:
return False
def removeMetadata( self, style ):
retcode = True
if style == MetaDataStyle.CIX:
retcode = self.removeCIX()
elif style == MetaDataStyle.CBI:
retcode = self.removeCBI()
elif style == MetaDataStyle.COMET:
retcode = self.removeCoMet()
return retcode
def getPage( self, index ):
image_data = None
filename = self.getPageName( index )
if filename is not None:
try:
image_data = self.archiver.readArchiveFile( filename )
except IOError:
print >> sys.stderr, u"Error reading in page. Substituting logo page."
image_data = ComicArchive.logo_data
return image_data
def getPageName( self, index ):
if index is None:
return None
page_list = self.getPageNameList()
num_pages = len( page_list )
if num_pages == 0 or index >= num_pages:
return None
return page_list[index]
def getScannerPageIndex( self ):
scanner_page_index = None
#make a guess at the scanner page
name_list = self.getPageNameList()
count = self.getNumberOfPages()
#too few pages to really know
if count < 5:
return None
# count the length of every filename, and count occurences
length_buckets = dict()
for name in name_list:
fname = os.path.split(name)[1]
length = len(fname)
if length_buckets.has_key( length ):
length_buckets[ length ] += 1
else:
length_buckets[ length ] = 1
# sort by most common
sorted_buckets = sorted(length_buckets.iteritems(), key=lambda (k,v): (v,k), reverse=True)
# statistical mode occurence is first
mode_length = sorted_buckets[0][0]
# we are only going to consider the final image file:
final_name = os.path.split(name_list[count-1])[1]
common_length_list = list()
for name in name_list:
if len(os.path.split(name)[1]) == mode_length:
common_length_list.append( os.path.split(name)[1] )
prefix = os.path.commonprefix(common_length_list)
if mode_length <= 7 and prefix == "":
#probably all numbers
if len(final_name) > mode_length:
scanner_page_index = count-1
# see if the last page doesn't start with the same prefix as most others
elif not final_name.startswith(prefix):
scanner_page_index = count-1
return scanner_page_index
def getPageNameList( self , sort_list=True):
if self.page_list is None:
# get the list file names in the archive, and sort
files = self.archiver.getArchiveFilenameList()
# seems like some archive creators are on Windows, and don't know about case-sensitivity!
if sort_list:
def keyfunc(k):
#hack to account for some weird scanner ID pages
#basename=os.path.split(k)[1]
#if basename < '0':
# k = os.path.join(os.path.split(k)[0], "z" + basename)
return k.lower()
files = natsorted(files, key=keyfunc,signed=False)
# make a sub-list of image files
self.page_list = []
for name in files:
if ( name[-4:].lower() in [ ".jpg", "jpeg", ".png", ".gif", "webp" ] and os.path.basename(name)[0] != "." ):
self.page_list.append(name)
return self.page_list
def getNumberOfPages( self ):
if self.page_count is None:
self.page_count = len( self.getPageNameList( ) )
return self.page_count
def readCBI( self ):
if self.cbi_md is None:
raw_cbi = self.readRawCBI()
if raw_cbi is None:
self.cbi_md = GenericMetadata()
else:
self.cbi_md = ComicBookInfo().metadataFromString( raw_cbi )
self.cbi_md.setDefaultPageList( self.getNumberOfPages() )
return self.cbi_md
def readRawCBI( self ):
if ( not self.hasCBI() ):
return None
return self.archiver.getArchiveComment()
def hasCBI(self):
if self.has_cbi is None:
#if ( not ( self.isZip() or self.isRar()) or not self.seemsToBeAComicArchive() ):
if not self.seemsToBeAComicArchive():
self.has_cbi = False
else:
comment = self.archiver.getArchiveComment()
self.has_cbi = ComicBookInfo().validateString( comment )
return self.has_cbi
def writeCBI( self, metadata ):
if metadata is not None:
self.applyArchiveInfoToMetadata( metadata )
cbi_string = ComicBookInfo().stringFromMetadata( metadata )
write_success = self.archiver.setArchiveComment( cbi_string )
if write_success:
self.has_cbi = True
self.cbi_md = metadata
self.resetCache()
return write_success
else:
return False
def removeCBI( self ):
if self.hasCBI():
write_success = self.archiver.setArchiveComment( "" )
if write_success:
self.has_cbi = False
self.cbi_md = None
self.resetCache()
return write_success
return True
def readCIX( self ):
if self.cix_md is None:
raw_cix = self.readRawCIX()
if raw_cix is None or raw_cix == "":
self.cix_md = GenericMetadata()
else:
self.cix_md = ComicInfoXml().metadataFromString( raw_cix )
#validate the existing page list (make sure count is correct)
if len ( self.cix_md.pages ) != 0 :
if len ( self.cix_md.pages ) != self.getNumberOfPages():
# pages array doesn't match the actual number of images we're seeing
# in the archive, so discard the data
self.cix_md.pages = []
if len( self.cix_md.pages ) == 0:
self.cix_md.setDefaultPageList( self.getNumberOfPages() )
return self.cix_md
def readRawCIX( self ):
if not self.hasCIX():
return None
try:
raw_cix = self.archiver.readArchiveFile( self.ci_xml_filename )
except IOError:
print "Error reading in raw CIX!"
raw_cix = ""
return raw_cix
def writeCIX(self, metadata):
if metadata is not None:
self.applyArchiveInfoToMetadata( metadata, calc_page_sizes=True )
cix_string = ComicInfoXml().stringFromMetadata( metadata )
write_success = self.archiver.writeArchiveFile( self.ci_xml_filename, cix_string )
if write_success:
self.has_cix = True
self.cix_md = metadata
self.resetCache()
return write_success
else:
return False
def removeCIX( self ):
if self.hasCIX():
write_success = self.archiver.removeArchiveFile( self.ci_xml_filename )
if write_success:
self.has_cix = False
self.cix_md = None
self.resetCache()
return write_success
return True
def hasCIX(self):
if self.has_cix is None:
if not self.seemsToBeAComicArchive():
self.has_cix = False
elif self.ci_xml_filename in self.archiver.getArchiveFilenameList():
self.has_cix = True
else:
self.has_cix = False
return self.has_cix
def readCoMet( self ):
if self.comet_md is None:
raw_comet = self.readRawCoMet()
if raw_comet is None or raw_comet == "":
self.comet_md = GenericMetadata()
else:
self.comet_md = CoMet().metadataFromString( raw_comet )
self.comet_md.setDefaultPageList( self.getNumberOfPages() )
#use the coverImage value from the comet_data to mark the cover in this struct
# walk through list of images in file, and find the matching one for md.coverImage
# need to remove the existing one in the default
if self.comet_md.coverImage is not None:
cover_idx = 0
for idx,f in enumerate(self.getPageNameList()):
if self.comet_md.coverImage == f:
cover_idx = idx
break
if cover_idx != 0:
del (self.comet_md.pages[0]['Type'] )
self.comet_md.pages[ cover_idx ]['Type'] = PageType.FrontCover
return self.comet_md
def readRawCoMet( self ):
if not self.hasCoMet():
print >> sys.stderr, self.path, "doesn't have CoMet data!"
return None
try:
raw_comet = self.archiver.readArchiveFile( self.comet_filename )
except IOError:
print >> sys.stderr, u"Error reading in raw CoMet!"
raw_comet = ""
return raw_comet
def writeCoMet(self, metadata):
if metadata is not None:
if not self.hasCoMet():
self.comet_filename = self.comet_default_filename
self.applyArchiveInfoToMetadata( metadata )
# Set the coverImage value, if it's not the first page
cover_idx = int(metadata.getCoverPageIndexList()[0])
if cover_idx != 0:
metadata.coverImage = self.getPageName( cover_idx )
comet_string = CoMet().stringFromMetadata( metadata )
write_success = self.archiver.writeArchiveFile( self.comet_filename, comet_string )
if write_success:
self.has_comet = True
self.comet_md = metadata
self.resetCache()
return write_success
else:
return False
def removeCoMet( self ):
if self.hasCoMet():
write_success = self.archiver.removeArchiveFile( self.comet_filename )
if write_success:
self.has_comet = False
self.comet_md = None
self.resetCache()
return write_success
return True
def hasCoMet(self):
if self.has_comet is None:
self.has_comet = False
if not self.seemsToBeAComicArchive():
return self.has_comet
#look at all xml files in root, and search for CoMet data, get first
for n in self.archiver.getArchiveFilenameList():
if ( os.path.dirname(n) == "" and
os.path.splitext(n)[1].lower() == '.xml'):
# read in XML file, and validate it
try:
data = self.archiver.readArchiveFile( n )
except:
data = ""
print >> sys.stderr, u"Error reading in Comet XML for validation!"
if CoMet().validateString( data ):
# since we found it, save it!
self.comet_filename = n
self.has_comet = True
break
return self.has_comet
def applyArchiveInfoToMetadata( self, md, calc_page_sizes=False):
md.pageCount = self.getNumberOfPages()
if calc_page_sizes:
for p in md.pages:
idx = int( p['Image'] )
if pil_available:
if 'ImageSize' not in p or 'ImageHeight' not in p or 'ImageWidth' not in p:
data = self.getPage( idx )
if data is not None:
try:
im = Image.open(StringIO.StringIO(data))
w,h = im.size
p['ImageSize'] = str(len(data))
p['ImageHeight'] = str(h)
p['ImageWidth'] = str(w)
except IOError:
p['ImageSize'] = str(len(data))
else:
if 'ImageSize' not in p:
data = self.getPage( idx )
p['ImageSize'] = str(len(data))
def metadataFromFilename( self , parse_scan_info=True):
metadata = GenericMetadata()
fnp = FileNameParser()
fnp.parseFilename( self.path )
if fnp.issue != "":
metadata.issue = fnp.issue
if fnp.series != "":
metadata.series = fnp.series
if fnp.volume != "":
metadata.volume = fnp.volume
if fnp.year != "":
metadata.year = fnp.year
if fnp.issue_count != "":
metadata.issueCount = fnp.issue_count
if parse_scan_info:
if fnp.remainder != "":
metadata.scanInfo = fnp.remainder
metadata.isEmpty = False
return metadata
def exportAsZip( self, zipfilename ):
if self.archive_type == self.ArchiveType.Zip:
# nothing to do, we're already a zip
return True
zip_archiver = ZipArchiver( zipfilename )
return zip_archiver.copyFromArchive( self.archiver )
|
|
# -*- coding: utf-8 -*-
import cherrypy
import datetime
import dateutil.parser
import errno
import json
import os
import pytz
import re
import string
import six
import girder
import girder.events
try:
from random import SystemRandom
random = SystemRandom()
random.random() # potentially raises NotImplementedError
except NotImplementedError:
girder.logprint.warning(
'WARNING: using non-cryptographically secure PRNG.')
import random
def parseTimestamp(x, naive=True):
"""
Parse a datetime string using the python-dateutil package.
If no timezone information is included, assume UTC. If timezone information
is included, convert to UTC.
If naive is True (the default), drop the timezone information such that a
naive datetime is returned.
"""
dt = dateutil.parser.parse(x)
if dt.tzinfo:
dt = dt.astimezone(pytz.utc).replace(tzinfo=None)
if naive:
return dt
else:
return pytz.utc.localize(dt)
def genToken(length=64):
"""
Use this utility function to generate a random string of a desired length.
"""
return ''.join(random.choice(string.ascii_letters + string.digits)
for _ in range(length))
def camelcase(value):
"""
Convert a module name or string with underscores and periods to camel case.
:param value: the string to convert
:type value: str
:returns: the value converted to camel case.
"""
return ''.join(x.capitalize() if x else '_' for x in
re.split('[._]+', value))
def mkdir(path, mode=0o777, recurse=True, existOk=True):
"""
Create a new directory or ensure a directory already exists.
:param path: The directory to create.
:type path: str
:param mode: The mode (permission mask) prior to applying umask.
:type mode: int
:param recurse: Whether intermediate missing dirs should be created.
:type recurse: bool
:param existOk: Set to True to suppress the error if the dir exists.
:type existOk: bool
"""
method = os.makedirs if recurse else os.mkdir
try:
method(path, mode)
except OSError as exc:
if existOk and exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def toBool(val):
"""
Coerce a string value to a bool. Meant to be used to parse HTTP
parameters, which are always sent as strings. The following string
values will be interpreted as True:
- ``'true'``
- ``'on'``
- ``'1'``
- ``'yes'``
All other strings will be interpreted as False. If the given param
is not passed at all, returns the value specified by the default arg.
This function is case-insensitive.
:param val: The value to coerce to a bool.
:type val: str
"""
if isinstance(val, bool):
return val
return val.lower().strip() in ('true', 'on', '1', 'yes')
class JsonEncoder(json.JSONEncoder):
"""
This extends the standard json.JSONEncoder to allow for more types to be
sensibly serialized. This is used in Girder's REST layer to serialize
route return values when JSON is requested.
"""
def default(self, obj):
event = girder.events.trigger('rest.json_encode', obj)
if len(event.responses):
return event.responses[-1]
if isinstance(obj, set):
return tuple(obj)
elif isinstance(obj, datetime.datetime):
return obj.replace(tzinfo=pytz.UTC).isoformat()
return str(obj)
class RequestBodyStream(object):
"""
Wraps a cherrypy request body into a more abstract file-like object.
"""
_ITER_CHUNK_LEN = 65536
def __init__(self, stream, size=None):
self.stream = stream
self.size = size
def read(self, *args, **kwargs):
return self.stream.read(*args, **kwargs)
def close(self, *args, **kwargs):
pass
def __iter__(self):
return self
def __len__(self):
return self.getSize()
def __next__(self):
data = self.read(self._ITER_CHUNK_LEN)
if not data:
raise StopIteration
return data
def next(self):
return self.__next__()
def getSize(self):
"""
Returns the size of the body data wrapped by this class. For
multipart encoding, this is the size of the part. For sending
as the body, this is the Content-Length header.
"""
if self.size is not None:
return self.size
return int(cherrypy.request.headers['Content-Length'])
def optionalArgumentDecorator(baseDecorator):
"""
This decorator can be applied to other decorators, allowing the target decorator to be used
either with or without arguments.
The target decorator is expected to take at least 1 argument: the function to be wrapped. If
additional arguments are provided by the final implementer of the target decorator, they will
be passed to the target decorator as additional arguments.
For example, this may be used as:
.. code-block:: python
@optionalArgumentDecorator
def myDec(fun, someArg=None):
...
@myDec
def a(...):
...
@myDec()
def a(...):
...
@myDec(5)
def a(...):
...
@myDec(someArg=5)
def a(...):
...
:param baseDecorator: The target decorator.
:type baseDecorator: callable
"""
@six.wraps(baseDecorator)
def normalizedArgumentDecorator(*args, **kwargs):
if len(args) == 1 and callable(args[0]): # Applied as a raw decorator
decoratedFunction = args[0]
# baseDecorator must wrap and return decoratedFunction
return baseDecorator(decoratedFunction)
else: # Applied as a argument-containing decorator
# Decoration will occur in two passes:
# * Now, the decorator arguments are passed, and a new decorator should be returned
# * Afterwards, the new decorator will be called to decorate the decorated function
decoratorArgs = args
decoratorKwargs = kwargs
def partiallyAppliedDecorator(decoratedFunction):
return baseDecorator(decoratedFunction, *decoratorArgs, **decoratorKwargs)
return partiallyAppliedDecorator
return normalizedArgumentDecorator
|
|
#!/usr/bin/env python
###############################################################################
# A log search utility. #
# Includes some advanced options, including ranges, ORs, less than #
# and greater than. #
# #
# options: i (ignore case) #
# w (ignore whitespace) #
# #
# #
# ranges: A single number for an exact number of matches ex: 1 #
# A dash in between numbers for a range of matches ex: 2-4 #
# A pipe in between numbers for a match of either ex: 1|3 #
# A plus sign for at least that many matches ex: 5+ #
# A minus sign for at most that many matches ex: 5- #
# A zero indicates the line should not appear ex: 0 #
# #
# A test case file should be in the form: #
# <options>:<occurrences>:<logfile>:<string> #
# However, <options> may be empty if none are used. Still need a : #
# #
# Intended for use with python 2.6 and 2.7 #
# #
# Author: Seth Hoenig 2013 #
###############################################################################
from __future__ import print_function
from __future__ import with_statement
import optparse
import sys
import os
import re
TRACE=None
def log(msg):
if TRACE:
print(msg, file=TRACE)
TEST_RE = re.compile('''
([iw]*) # Options
:
([\d]+ | [\d]+[+-] | [\d]+[\|-][\d]+) # Value, Min/Max, Range/Or
:
([^:\s]+) # Filename
:
(.+) # Text
'''
, re.DOTALL | re.VERBOSE)
def parse_test(line):
'''
Parse a single test case out of a string using the TEST_RE regex
up above to do the dirty work.
line -- the raw string which contains the test case
Return a dictionary of the form:
{ 'options' : <options>,
'occurs' : <occurences>,
'logfile' : <the log file to search>,
'string' : <the string to search for> }
'''
m = TEST_RE.match(line.strip())
if m is None:
log('Invalid Test %s' % line)
raise Exception('Invalid Test %s' % line)
test = {'options': m.group(1),
'occurs': m.group(2),
'logfile': m.group(3),
'string': m.group(4)}
return test
def parse_comparetest_file(ifname):
'''
Parse the .comparetest file which should look something like,
<options>:<occurrences>:<logfile>:<string>
Each option is just a single character, which should be concatenated together
Occurences can be a single number, a range, or a set of numbers divided by
'|' to indicate an OR.
This function parses the .comparetest file and returns a list of dicts,
each describing a test. For example, [ {'options':'ix',
'occurs':'1|3',
'logfile':'filename.txt',
'string':'foo bar baz!'}, ]
ifname -- The input file name containing test case definitions
'''
tests = []
try:
with open(ifname, 'r') as infile:
for line in infile.readlines():
line = line.strip()
if line=='' or line[0]=='#':
continue
gs = parse_test(line)
tests += [gs]
except Exception as e:
log('Error reading comparetest file')
log(str(e))
sys.exit(1)
return tests
def build_re(options, string):
'''
Build the final regex to search for, modifying according to
options.
options -- '', 'i', 'w', 'iw' are currently supported.
i - ignore case
w - ignore whitespace
Returns the compiled regex ready to use (find, search, etc.)
'''
if 'w' in options:
string = string.replace(' ', '[\s]*')
string = string.replace('\t', '[\s]*')
string = string.replace('\n', '[\s]*')
string = string.replace('\r\n', '[\s]*')
if 'i' in options:
return re.compile(string, re.IGNORECASE)
else:
return re.compile(string)
def parse_range(test):
'''
Parse the range condition of a test case.
test -- the test case which contains a string description of the
range validation.
Return ('range', low, high) for a range between low and high
('or', a, b) for one of A or B
('value', n) for exactly N matches
('atleast', n) for at least N matches
('atmost', n) for at most N matches
'''
x = test['occurs']
if '-' in x:
sp = x.split('-')
if sp[1]:
a, b = int(sp[0]), int(sp[1])
if a > b:
a, b = b, a
return ('range', a, b)
else:
return ('atmost', int(sp[0]))
elif '|' in x:
sp = x.split('|')
return ('or', int(sp[0]), int(sp[1]))
elif '+' in x:
sp = x.split('+')
return ('atleast', int(sp[0]))
else:
return ('value', int(x))
def verify_in_range(n, allowed):
'''
Checks to see if n is accepted by the range of the test case.
n -- the actual number of occurences of a string found
allowed -- a tuple of two or three arguments, first is one of,
'atmost', 'atleast', 'or', 'value', 'range'. The remaining arguments
are values or bounds to which n is compared.
'''
t = allowed[0]
a = int(allowed[1])
if len(allowed) > 2:
b = int(allowed[2])
if t == 'atmost':
return n <= a
elif t == 'range':
return n >= a and n <= b
elif t == 'or':
return n == a or n == b
elif t == 'atleast':
return n >= a
else: # t == 'value'
return n == a
def run_test(test):
'''
Runs a single test case.
test -- A dict containing options, occurs, logfile, and string
Returns a tuple of two arguments, True or False if the test passed
or failed, followed by an error string if any.
'''
with open(test['logfile'], 'r') as f:
content = f.read()
r = build_re(test['options'], test['string'])
n = len(r.findall(content))
rng = parse_range(test)
if verify_in_range(n, rng):
return (True, '')
else:
return (False, 'not in range') # todo give better error
##############
# Start Here #
##############
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-i', '--input', dest='inputfilename',
default=None, action='store', type='string',
help='The .comparetest file containing lines to search for')
parser.add_option('-o', '--output', dest='logfilename',
default=None, action='store', type='string',
help='Send output of searchLogs.py to specified file')
(options, args) = parser.parse_args()
if options.logfilename:
if options.logfilename == 'stdout':
TRACE=sys.stdout
elif options.logfilename == 'stderr':
TRACE = sys.stderr
else:
TRACE = open(options.logfilename, 'w')
else:
TRACE=sys.stdout
log('-- SearchLogs.py --')
if not options.inputfilename:
log('compare test file must be specified with -i <file>')
sys.exit(1)
tests = parse_comparetest_file(options.inputfilename)
overall = True
for test in tests:
result = run_test(test)
if not result[0]:
overall = False
log('Failed test, %r' % test)
if overall:
log('PASSED')
|
|
"""Implement SeshetBot as subclass of ircutils3.bot.SimpleBot."""
import logging
import os
from io import StringIO
from datetime import datetime
from ircutils3 import bot, client
from .utils import KVStore, Storage, IRCstr
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
class SeshetChannel(object):
"""Represent one IRC channel."""
def __init__(self, name, users, log_size=100):
self.name = IRCstr(name)
self.users = users
self.message_log = []
self._log_size = log_size
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
def __str__(self):
return str(self.name)
def __repr__(self):
temp = "<SeshetChannel {} with {} users>"
return temp.format(self.name, len(self.users))
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
def _add_channel_names(client, e):
"""Add a new channel to self.channels and initialize its user list.
Called as event handler for RPL_NAMES events. Do not call directly.
"""
chan = IRCstr(e.channel)
names = set([IRCstr(n) for n in e.name_list])
client.channels[chan] = SeshetChannel(chan, names)
|
|
import os.path
import numpy
from scipy import optimize, interpolate
from . import path as appath
from . import download as download
try:
import fitsio
fitsread = fitsio.read
except ImportError:
import astropy.io.fits as pyfits
fitsread= pyfits.getdata
import warnings
from periodictable import elements
try:
# Need to have allStar, use lite version when available
filePath= appath.allStarPath(lite=True)
if not os.path.exists(filePath):
download.allStar(lite=True)
indexArrays= fitsread(appath.allStarPath(lite=True),3)
except ValueError:
_INDEX_ARRAYS_LOADED= False
else:
_INDEX_ARRAYS_LOADED= True
if type(indexArrays['PARAM_SYMBOL'][0,0]) == numpy.dtype(bytes):
_PARAM_SYMBOL= [index.strip().lower().decode("utf-8")
for index in indexArrays['PARAM_SYMBOL'].flatten()]
_ELEM_SYMBOL= [index.strip().lower().decode("utf-8")
for index in indexArrays['ELEM_SYMBOL'].flatten()]
else:
_PARAM_SYMBOL= [index.strip().lower()
for index in indexArrays['PARAM_SYMBOL'].flatten()]
_ELEM_SYMBOL= [index.strip().lower()
for index in indexArrays['ELEM_SYMBOL'].flatten()]
_ELEM_NUMBER_DICT= dict((elem,
elements.__dict__[elem.capitalize()].number)
for elem in _ELEM_SYMBOL
if elem != 'ci' and elem != 'tiii' and elem != 'c13')
_ELEM_NUMBER_DICT['CI']= elements.__dict__['C'].number
_ELEM_NUMBER_DICT['TiII']= elements.__dict__['Ti'].number
_ELEM_NUMBER_DICT['C13'] = elements.__dict__['C'].number
# DR12 abundance uncertainty coefficients as a function of Teff, [M/H], SNR
# from http://www.sdss.org/dr12/irspec/abundances/
# see also Holtzman et al 2015
_ch_12coeff=[-3.350,0.769,-0.919,-0.066]
_nh_12coeff=[-2.704,0.291,-0.591,-0.078]
_oh_12coeff=[-3.649,0.670,-0.614,-0.093]
_nah_12coeff=[-2.352,-0.002,-0.915,-0.263]
_mgh_12coeff=[-3.537,0.263,-0.825,-0.297]
_alh_12coeff=[-2.764,0.471,-0.868,-0.162]
_sih_12coeff=[-3.150,0.383,-0.224,-0.105]
_sh_12coeff=[-3.037,0.507,-0.625,-0.299]
_kh_12coeff=[-2.770,0.216,-0.667,-0.275]
_cah_12coeff=[-3.226,0.284,-0.879,-0.429]
_tih_12coeff=[-3.186,0.657,-0.819,-0.068]
_vh_12coeff=[-1.608,0.900,-0.400,-0.418]
_mnh_12coeff=[-3.031,0.639,-0.661,-0.326]
_feh_12coeff=[-3.357,0.098,-0.303,-0.071]
_nih_12coeff=[-3.153,0.135,-0.493,-0.185]
_mh_12coeff=[-3.603,0.109,-0.433,0.039]
_alpha_12coeff=[-4.360,0.060,-0.848,-0.096]
DR12_XH_coeff = {'C_H':_ch_12coeff,'N_H':_nh_12coeff,'O_H':_oh_12coeff,
'NA_H':_nah_12coeff,'MG_H':_mgh_12coeff,'AL_H':_alh_12coeff,
'SI_H':_sih_12coeff,'S_H':_sh_12coeff,'K_H':_kh_12coeff,
'CA_H':_cah_12coeff,'TI_H':_tih_12coeff,'V_H':_vh_12coeff,
'MN_H':_mnh_12coeff,'FE_H':_feh_12coeff,'NI_H':_nih_12coeff,
'METALS':_mh_12coeff,'ALPHAFE':_alpha_12coeff}
# DR13 abundance uncertainty coefficients as a function of Teff, [M/H], SNR
# from http://www.sdss.org/dr13/irspec/abundances/
_cfe_13coeff=[-3.243,0.608,-0.757,-0.257]
_cIfe_13coeff=[-2.804,0.403,-0.743,-0.319]
_nfe_13coeff=[-2.671,0.373,-0.407,-0.192]
_ofe_13coeff=[-3.410,1.471,-0.778,-0.182]
_nafe_13coeff=[-2.389,0.140,-0.926,-0.323]
_mgfe_13coeff=[-3.980,0.284,-0.949,-0.115]
_alfe_13coeff=[-2.616,-0.192,-0.628,-0.399]
_sife_13coeff=[-3.464,0.548,-0.482,-0.212]
_pfe_13coeff=[-1.988,0.384,-0.568,-0.369]
_sfe_13coeff=[-2.199,-0.030,-0.402,-0.295]
_kfe_13coeff=[-3.098,0.208,-0.583,-0.496]
_cafe_13coeff=[-3.520,0.153,-0.895,-0.405]
_tife_13coeff=[-3.108,0.295,-0.741,-0.185]
_tiIIfe_13coeff=[-2.192,0.328,-0.538,-0.267]
_vfe_13coeff=[-2.447,1.030,-1.096,-0.519]
_crfe_13coeff=[-3.191,0.290,-0.775,-0.455]
_mnfe_13coeff=[-3.523,0.235,-0.614,-0.488]
_feh_13coeff=[-5.316,0.202,-0.874,0.019]
_cofe_13coeff=[-2.062,1.064,-0.656,-0.523]
_nife_13coeff=[-4.067,0.442,-0.816,-0.395]
_cufe_13coeff=[-2.140,-0.096,-0.559,-0.426]
_gefe_13coeff=[-1.893,0.258,-0.665,-0.395]
_rbfe_13coeff=[-2.325,0.466,-1.117,-0.360]
_mh_13coeff=[-3.730,0.232,-0.524,0.013]
_alpha_13coeff=[-4.219,0.053,-0.794,-0.127]
DR13_XH_coeff={'C_FE':_cfe_13coeff,'CI_FE':_cIfe_13coeff,'N_FE':_nfe_13coeff,
'O_FE':_ofe_13coeff,'NA_FE':_nafe_13coeff,'MG_FE':_mgfe_13coeff,
'AL_FE':_alfe_13coeff,'SI_FE':_sife_13coeff,'P_FE':_pfe_13coeff,
'S_FE':_sfe_13coeff,'K_FE':_kfe_13coeff,'CA_FE':_cafe_13coeff,
'TI_FE':_tife_13coeff,'TIII_FE':_tiIIfe_13coeff,
'V_FE':_vfe_13coeff,'CR_FE':_crfe_13coeff,'MN_FE':_mnfe_13coeff,
'FE_H':_feh_13coeff,'CO_FE':_cofe_13coeff,'NI_FE':_nife_13coeff,
'CU_FE':_cufe_13coeff,'GE_FE':_gefe_13coeff,
'RB_FE':_rbfe_13coeff,'M_H':_mh_13coeff,
'ALPHA_M':_alpha_13coeff}
drcoeffs = {'12':DR12_XH_coeff,'13':DR13_XH_coeff}
# Detector limit by data release
apStarInds = {'10':{'blue':(322,3242),'green':(3648,6048),'red':(6412,8306)},
'11':{'blue':(322,3242),'green':(3648,6048),'red':(6412,8306)},
'12':{'blue':(322,3242),'green':(3648,6048),'red':(6412,8306)},
'13':{'blue':(246,3274),'green':(3585,6080),'red':(6344,8335)},
'14':{'blue':(246,3274),'green':(3585,6080),'red':(6344,8335)},
'16':{'blue':(246,3274),'green':(3585,6080),'red':(6344,8335)},
'17':{'blue':(246,3274),'green':(3585,6080),'red':(6344,8335)},
'current':{'blue':(246,3274),'green':(3585,6080),'red':(6344,8335)}
}
def _apStarPixelLimits(dr=None):
"""
NAME:
_apStarPixelLimits
PURPOSE:
return the apStar pixel bounds for each detector for the chosen data
release by unpacking apStarInds.
INPUT
dr - string referring to data release, e.g. '12'
OUTPUT:
bounds of blue, green and red detectors.
HISTORY:
2018-02-05 - Written - Price-Jones (UofT)
"""
if dr is None:
dr=appath._default_dr()
inds = apStarInds[dr]
apStarBlu_lo,apStarBlu_hi = inds['blue']
apStarGre_lo,apStarGre_hi = inds['green']
apStarRed_lo,apStarRed_hi = inds['red']
return apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi
def _aspcapPixelLimits(dr=None):
"""
NAME:
_aspcapPixelLimits
PURPOSE:
return the ASPCAP pixel bounds for each detector for the chosen data
release by unpacking apStarInds.
INPUT
dr - string referring to data release, e.g. '12'
OUTPUT:
starting pixel of the blue, green and red detectors, as well as the
total spectrum length in pixels
HISTORY:
2018-02-05 - Written - Price-Jones (UofT)
"""
if dr is None:
dr=appath._default_dr()
apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi = _apStarPixelLimits(dr=dr)
aspcapBlu_start = 0
aspcapGre_start = apStarBlu_hi-apStarBlu_lo+aspcapBlu_start
aspcapRed_start = apStarGre_hi-apStarGre_lo+aspcapGre_start
aspcapTotal = apStarRed_hi-apStarRed_lo+aspcapRed_start
return aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal
# Wavegrid parameters used in apStarWavegrid and pix2wv
_LOG10LAMBDA0= 4.179
_DLOG10LAMBDA= 6.*10.**-6.
_NLAMBDA= 8575
def apStarWavegrid():
return 10.**numpy.arange(_LOG10LAMBDA0,
_LOG10LAMBDA0+_NLAMBDA*_DLOG10LAMBDA,
_DLOG10LAMBDA)
def paramIndx(param):
"""
NAME:
paramIndx
PURPOSE:
return the index into the PARAM/FPARAM arrays corresponding to a given stellar parameter
INPUT:
param - the stellar parameter (one of TEFF,LOGG,LOG10VDOP,METALS,C,N,ALPHA)
OUTPUT:
index into PARAM/FPARAM array
HISTORY:
2014-08-19 - Written - Bovy (IAS)
"""
if not _INDEX_ARRAYS_LOADED: raise ImportError("paramIndx function cannot be used, because the allStar file could not be properly loaded")
if param.lower() == 'alpha': return _PARAM_SYMBOL.index('o mg si s ca ti')
else:
try:
return _PARAM_SYMBOL.index(param.lower())
except ValueError:
raise KeyError("Stellar parameter %s not recognized" % param)
def elemIndx(elem):
"""
NAME:
elemIndx
PURPOSE:
return the index into the ELEM/FELEM arrays corresponding to a given element
INPUT:
elem - the element (string like 'C')
OUTPUT:
index into ELEM/FELEM array
HISTORY:
2014-08-19 - Written - Bovy (IAS)
"""
if not _INDEX_ARRAYS_LOADED: raise ImportError("elemIndx function cannot be used, because the allStar file could not be properly loaded")
try:
return _ELEM_SYMBOL.index(elem.lower())
except ValueError:
raise KeyError("Element %s is not part of the APOGEE elements (can't do everything!) or something went wrong)" % elem)
def atomic_number(elem):
"""
NAME:
atomic_number
PURPOSE:
return the atomic number of a given element
INPUT:
elem - element
OUTPUT:
atomic number
HISTORY:
2015-03-10 - Written - Bovy (IAS)
"""
try:
return _ELEM_NUMBER_DICT[elem.lower()]
except (NameError,KeyError):
return elements.__dict__[elem.lower().capitalize()].number
def sigma_XH(elem,Teff=4500.,M_H=0.,SNR=100.,dr=None):
"""
NAME:
sigma_XH
PURPOSE:
return uncertainty in a given element at specified effective
temperature, metallicity and signal to noise ratio (functional form
taken from Holtzman et al 2015)
INPUT:
elem - string element name following the ASPCAP star naming convention
i.e. for DR12 carbon, string is 'C_H'
Teff - effective temperature or array thereof in K, defaults to 4500 K
M_H - metallicity or array thereof, defaults to 0
SNR - signal to noise ratio or array thereof, defaults to 100
dr - data release
OUTPUT:
float or array depending on shape of Teff, M_H and SNR input
HISTORY:
2017-07-24 - Written - Price-Jones (UofT)
"""
if dr is None: dr=appath._default_dr()
A,B,C,D = drcoeffs[dr][elem]
logsig = A + B*((Teff-4500.)/1000.) + C*M_H + D*(SNR-100)
return numpy.exp(logsig)
def vac2air(wave,sdssweb=False):
"""
NAME:
vac2air
PURPOSE:
Convert from vacuum to air wavelengths (See Allende Prieto technical note: http://hebe.as.utexas.edu/apogee/docs/air_vacuum.pdf)
INPUT:
wave - vacuum wavelength in \AA
sdssweb= (False) if True, use the expression from the SDSS website (http://classic.sdss.org/dr7/products/spectra/vacwavelength.html)
OUTPUT:
air wavelength in \AA
HISTORY:
2014-12-04 - Written - Bovy (IAS)
2015-04-27 - Updated to CAP note expression - Bovy (IAS)
"""
if sdssweb:
return wave/(1.+2.735182*10.**-4.+131.4182/wave**2.+2.76249*10.**8./wave**4.)
else:
return wave/(1.+0.05792105/(238.0185-(10000./wave)**2.)+0.00167917/(57.362-(10000./wave)**2.))
def air2vac(wave,sdssweb=False):
"""
NAME:
air2vac
PURPOSE:
Convert from air to vacuum wavelengths (See Allende Prieto technical note: http://hebe.as.utexas.edu/apogee/docs/air_vacuum.pdf)
INPUT:
wave - air wavelength in \AA
sdssweb= (False) if True, use the expression from the SDSS website (http://classic.sdss.org/dr7/products/spectra/vacwavelength.html)
OUTPUT:
vacuum wavelength in \AA
HISTORY:
2014-12-04 - Written - Bovy (IAS)
2015-04-27 - Updated to CAP note expression - Bovy (IAS)
"""
return optimize.brentq(lambda x: vac2air(x,sdssweb=sdssweb)-wave,
wave-20,wave+20.)
def toAspcapGrid(spec,dr=None):
"""
NAME:
toAspcapGrid
PURPOSE:
convert a spectrum from apStar grid to the ASPCAP grid (w/o the detector gaps)
INPUT:
spec - spectrum (or whatever) on the apStar grid; either (nwave) or (nspec,nwave)
dr - data release of pixel bounds to use
OUTPUT:
spectrum (or whatever) on the ASPCAP grid
HISTORY:
2015-02-17 - Written - Bovy (IAS)
2018-02-05 - Updated to account for changing detector ranges - Price-Jones (UofT)
"""
apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi = _apStarPixelLimits(dr=dr)
aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal = _aspcapPixelLimits(dr=dr)
if len(spec.shape) == 2: # (nspec,nwave)
out= numpy.zeros((spec.shape[0],aspcapTotal),dtype=spec.dtype)
oneSpec= False
else:
oneSpec= True
out= numpy.zeros((1,aspcapTotal),dtype=spec.dtype)
spec= numpy.reshape(spec,(1,len(spec)))
out[:,:aspcapGre_start]= spec[:,apStarBlu_lo:apStarBlu_hi]
out[:,aspcapGre_start:aspcapRed_start]= spec[:,apStarGre_lo:apStarGre_hi]
out[:,aspcapRed_start:]= spec[:,apStarRed_lo:apStarRed_hi]
if oneSpec:
return out[0]
else:
return out
def toApStarGrid(spec,dr=None):
"""
NAME:
toApStarGrid
PURPOSE:
convert a spectrum from the ASPCAP grid (w/o the detector gaps) to the apStar grid
INPUT:
spec - spectrum (or whatever) on the ASPCAP grid; either (nwave) or (nspec,nwave)
dr - data release of pixel bounds to use
OUTPUT:
spectrum (or whatever) on the apStar grid
HISTORY:
2015-02-17 - Written - Bovy (IAS)
2018-02-05 - Updated to account for changing detector ranges - Price-Jones (UofT)
"""
apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi = _apStarPixelLimits(dr=dr)
aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal = _aspcapPixelLimits(dr=dr)
if len(spec.shape) == 2: # (nspec,nwave)
out= numpy.zeros((spec.shape[0],8575),dtype=spec.dtype)
oneSpec= False
else:
oneSpec= True
out= numpy.zeros((1,8575),dtype=spec.dtype)
spec= numpy.reshape(spec,(1,len(spec)))
out[:,apStarBlu_lo:apStarBlu_hi]= spec[:,:aspcapGre_start]
out[:,apStarGre_lo:apStarGre_hi]= spec[:,aspcapGre_start:aspcapRed_start]
out[:,apStarRed_lo:apStarRed_hi]= spec[:,aspcapRed_start:]
if oneSpec:
return out[0]
else:
return out
wvs = apStarWavegrid()
aspcapwvs = toAspcapGrid(wvs)
pixels = numpy.arange(0,_NLAMBDA)
apStar_pixel_interp = interpolate.interp1d(wvs,pixels,kind='linear',
bounds_error=False)
def pix2wv(pix,apStarWavegrid=False,dr=None):
"""
NAME:
pix2wv
PURPOSE:
convert pixel to wavelength
INPUT:
pix - pixel (int), range of pixels (tuple) or list of pixels (list/numpy array)
float input will be converted to integers
apStarWavegrid = (False) uses aspcapStarWavegrid by default
dr - data release of pixel bounds to use
OUTPUT:
wavelength(s) in Angstroms corresponding to input pixel(s)
HISTORY:
2016-10-18 - Written - Price-Jones
2018-02-05 - Updated to account for changing detector ranges - Price-Jones (UofT)
"""
# choose wavelength array to source from
if apStarWavegrid:
wvlist = wvs
maxpix = _NLAMBDA
elif not apStarWavegrid:
aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal = _aspcapPixelLimits(dr=dr)
wvlist = aspcapwvs
maxpix = aspcapTotal
# Check input cases
if isinstance(pix,float):
pix = int(pix)
if isinstance(pix,int):
if pix >= 0 and pix < maxpix:
return wvlist[pix]
else:
warnings.warn("pixel outside allowed pixel range",RuntimeWarning)
return numpy.nan
elif isinstance(pix,tuple):
if pix[0] >= 0 and pix[1] < maxpix:
return wvlist[int(pix[0]):int(pix[1]):int(pix[2])]
else:
warnings.warn("pixel bounds outside allowed pixel range",RuntimeWarning)
return numpy.nan
elif isinstance(pix,(list,numpy.ndarray)):
if isinstance(pix,list):
pix = numpy.array(pix)
wavelengths = numpy.zeros(len(pix))
valid = (pix>=0) & (pix<maxpix)
invalid = (pix<0) | (pix>maxpix)
wavelengths[valid] = wvlist[pix[valid].astype(int)]
wavelengths[invalid] = numpy.nan
if sum(invalid)!=0:
warnings.warn("pixel outside allowed pixel range",RuntimeWarning)
return wavelengths
# If input not recognized inform the user
elif not isinstance(wv,(int,float,tuple,list,numpy.ndarray)):
warnings.warn("unrecognized pixel input",RuntimeWarning)
return None
def wv2pix(wv,apStarWavegrid=False,dr=None):
"""
NAME:
wv2pix
PURPOSE:
convert wavelength to pixel using interpolated function
INPUT:
wv - wavelength (int), range of wavelengths (tuple) or list of wavelengths
(list/numpy array) in Angstroms
apStarWavegrid = (False) uses aspcapStarWavegrid by default
dr - data release of pixel bounds to use
OUTPUT:
array of pixel(s) corresponding to input wavelength(s)
nan - indicates input wavelength(s) outside the range
None - indicates input wavelength type not recognized
0 - indicates the wavelength can be found but is outside the bounds of the a
spcapStarWavegrid
HISTORY:
2016-10-18 - Written - Price-Jones
2018-02-05 - Updated to account for changing detector ranges - Price-Jones
"""
# Check input cases
if isinstance(wv,(int,float)):
if wv >= wvs[0] and wv <= wvs[-1]:
pixels = apStar_pixel_interp(wv)
else:
warnings.warn("wavelength outside allowed wavelength range",RuntimeWarning)
return numpy.nan
elif isinstance(wv,tuple):
if wv[0] >= wvs[0] and wv[1] <= wvs[-1]:
wvlist = numpy.arange(wv[0],wv[1],wv[2])
pixels = apStar_pixel_interp(wvlist)
else:
warnings.warn("wavelength bounds outside allowed wavelength range",RuntimeWarning)
return numpy.nan
elif isinstance(wv,(list,numpy.ndarray)):
if isinstance(wv,list):
wv = numpy.array(wv)
pixels = numpy.zeros(len(wv))
valid = (wv>=wvs[0]) & (wv<wvs[-1])
invalid = (wv<wvs[0]) | (wv>wvs[-1])
pixels[valid] = apStar_pixel_interp(wv[valid])
pixels[invalid] = numpy.nan
if sum(invalid)!=0:
warnings.warn("wavelength outside allowed wavelength range",RuntimeWarning)
# If input not recognized inform the user
elif not isinstance(wv,(int,float,tuple,list,numpy.ndarray)):
warnings.warn("unrecognized wavelength input",RuntimeWarning)
return None
if apStarWavegrid:
return pixels.astype(int)
# If on aspcapStarWavegrid, convert appropriately
elif not apStarWavegrid:
apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi = _apStarPixelLimits(dr=dr)
aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal = _aspcapPixelLimits(dr=dr)
# find where pixel list matches detectors
blue = numpy.where((pixels >= apStarBlu_lo) & (pixels < apStarBlu_hi))
green = numpy.where((pixels >= apStarGre_lo) & (pixels < apStarGre_hi))
red = numpy.where((pixels >= apStarRed_lo) & (pixels < apStarRed_hi))
# find where pixel list does not match detectors
if pixels.size > 1:
nomatch = (numpy.array([i for i in range(len(pixels)) if i not in blue[0] and i not in green[0] and i not in red[0]]),)
# adjust pixel values to match aspcap wavegrid
pixels[blue] -= (apStarBlu_lo-aspcapBlu_start)
pixels[green] -= (apStarGre_lo-aspcapGre_start)
pixels[red] -= (apStarRed_lo-aspcapRed_start)
# Case of single wavelength
elif pixels.size == 1:
if blue[0].size==1:
pixels -= (apStarBlu_lo-aspcapBlu_start)
elif green[0].size==1:
pixels -= (apStarGre_lo-aspcapGre_start)
elif red[0].size==1:
pixels -= (apStarRed_lo-aspcapRed_start)
elif blue[0].size==0 and green[0].size==0 and red[0].size==0:
nomatch = ([0],)
pixels = 0
return numpy.floor(pixels).astype(int)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import logging
from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type, Union
from apispec import APISpec
from apispec.exceptions import DuplicateComponentNameError
from flask import Blueprint, g, Response
from flask_appbuilder import AppBuilder, ModelRestApi
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.filters import BaseFilter, Filters
from flask_appbuilder.models.sqla.filters import FilterStartsWith
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from marshmallow import fields, Schema
from sqlalchemy import and_, distinct, func
from sqlalchemy.orm.query import Query
from superset.extensions import db, security_manager
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.sql_lab import Query as SqllabQuery
from superset.stats_logger import BaseStatsLogger
from superset.typing import FlaskResponse
from superset.utils.core import time_function
logger = logging.getLogger(__name__)
get_related_schema = {
"type": "object",
"properties": {
"page_size": {"type": "integer"},
"page": {"type": "integer"},
"filter": {"type": "string"},
},
}
class RelatedResultResponseSchema(Schema):
value = fields.Integer(description="The related item identifier")
text = fields.String(description="The related item string representation")
class RelatedResponseSchema(Schema):
count = fields.Integer(description="The total number of related values")
result = fields.List(fields.Nested(RelatedResultResponseSchema))
class DistinctResultResponseSchema(Schema):
text = fields.String(description="The distinct item")
class DistincResponseSchema(Schema):
count = fields.Integer(description="The total number of distinct values")
result = fields.List(fields.Nested(DistinctResultResponseSchema))
def statsd_metrics(f: Callable[..., Any]) -> Callable[..., Any]:
"""
Handle sending all statsd metrics from the REST API
"""
def wraps(self: "BaseSupersetModelRestApi", *args: Any, **kwargs: Any) -> Response:
duration, response = time_function(f, self, *args, **kwargs)
self.send_stats_metrics(response, f.__name__, duration)
return response
return functools.update_wrapper(wraps, f)
class RelatedFieldFilter:
# data class to specify what filter to use on a /related endpoint
# pylint: disable=too-few-public-methods
def __init__(self, field_name: str, filter_class: Type[BaseFilter]):
self.field_name = field_name
self.filter_class = filter_class
class BaseFavoriteFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Base Custom filter for the GET list that filters all dashboards, slices
that a user has favored or not
"""
name = _("Is favorite")
arg_name = ""
class_name = ""
""" The FavStar class_name to user """
model: Type[Union[Dashboard, Slice, SqllabQuery]] = Dashboard
""" The SQLAlchemy model """
def apply(self, query: Query, value: Any) -> Query:
# If anonymous user filter nothing
if security_manager.current_user is None:
return query
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(FavStar.user_id == g.user.id, FavStar.class_name == self.class_name)
)
if value:
return query.filter(and_(self.model.id.in_(users_favorite_query)))
return query.filter(and_(~self.model.id.in_(users_favorite_query)))
class BaseSupersetModelRestApi(ModelRestApi):
"""
Extends FAB's ModelResApi to implement specific superset generic functionality
"""
csrf_exempt = False
method_permission_name = {
"bulk_delete": "delete",
"data": "list",
"delete": "delete",
"distinct": "list",
"export": "mulexport",
"get": "show",
"get_list": "list",
"info": "list",
"post": "add",
"put": "edit",
"refresh": "edit",
"related": "list",
"related_objects": "list",
"schemas": "list",
"select_star": "list",
"table_metadata": "list",
"test_connection": "post",
"thumbnail": "list",
"viz_types": "list",
}
order_rel_fields: Dict[str, Tuple[str, str]] = {}
"""
Impose ordering on related fields query::
order_rel_fields = {
"<RELATED_FIELD>": ("<RELATED_FIELD_FIELD>", "<asc|desc>"),
...
}
""" # pylint: disable=pointless-string-statement
related_field_filters: Dict[str, Union[RelatedFieldFilter, str]] = {}
"""
Declare the filters for related fields::
related_fields = {
"<RELATED_FIELD>": <RelatedFieldFilter>)
}
""" # pylint: disable=pointless-string-statement
filter_rel_fields: Dict[str, BaseFilter] = {}
"""
Declare the related field base filter::
filter_rel_fields_field = {
"<RELATED_FIELD>": "<FILTER>")
}
""" # pylint: disable=pointless-string-statement
allowed_rel_fields: Set[str] = set()
allowed_distinct_fields: Set[str] = set()
openapi_spec_component_schemas: Tuple[Type[Schema], ...] = tuple()
"""
Add extra schemas to the OpenAPI component schemas section
""" # pylint: disable=pointless-string-statement
add_columns: List[str]
edit_columns: List[str]
list_columns: List[str]
show_columns: List[str]
def __init__(self) -> None:
# Setup statsd
self.stats_logger = BaseStatsLogger()
# Add base API spec base query parameter schemas
if self.apispec_parameter_schemas is None: # type: ignore
self.apispec_parameter_schemas = {}
self.apispec_parameter_schemas["get_related_schema"] = get_related_schema
if self.openapi_spec_component_schemas is None:
self.openapi_spec_component_schemas = ()
self.openapi_spec_component_schemas = self.openapi_spec_component_schemas + (
RelatedResponseSchema,
DistincResponseSchema,
)
super().__init__()
def add_apispec_components(self, api_spec: APISpec) -> None:
for schema in self.openapi_spec_component_schemas:
try:
api_spec.components.schema(
schema.__name__, schema=schema,
)
except DuplicateComponentNameError:
pass
super().add_apispec_components(api_spec)
def create_blueprint(
self, appbuilder: AppBuilder, *args: Any, **kwargs: Any
) -> Blueprint:
self.stats_logger = self.appbuilder.get_app.config["STATS_LOGGER"]
return super().create_blueprint(appbuilder, *args, **kwargs)
def _init_properties(self) -> None:
model_id = self.datamodel.get_pk_name()
if self.list_columns is None and not self.list_model_schema:
self.list_columns = [model_id]
if self.show_columns is None and not self.show_model_schema:
self.show_columns = [model_id]
if self.edit_columns is None and not self.edit_model_schema:
self.edit_columns = [model_id]
if self.add_columns is None and not self.add_model_schema:
self.add_columns = [model_id]
super()._init_properties()
def _get_related_filter(
self, datamodel: SQLAInterface, column_name: str, value: str
) -> Filters:
filter_field = self.related_field_filters.get(column_name)
if isinstance(filter_field, str):
filter_field = RelatedFieldFilter(cast(str, filter_field), FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = datamodel.get_filters(search_columns)
base_filters = self.filter_rel_fields.get(column_name)
if base_filters:
filters.add_filter_list(base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_distinct_filter(self, column_name: str, value: str) -> Filters:
filter_field = RelatedFieldFilter(column_name, FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = self.datamodel.get_filters(search_columns)
filters.add_filter_list(self.base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def incr_stats(self, action: str, func_name: str) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
"""
self.stats_logger.incr(f"{self.__class__.__name__}.{func_name}.{action}")
def timing_stats(self, action: str, func_name: str, value: float) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
:param value: A float with the time it took for the endpoint to execute
"""
self.stats_logger.timing(
f"{self.__class__.__name__}.{func_name}.{action}", value
)
def send_stats_metrics(
self, response: Response, key: str, time_delta: Optional[float] = None
) -> None:
"""
Helper function to handle sending statsd metrics
:param response: flask response object, will evaluate if it was an error
:param key: The function name
:param time_delta: Optional time it took for the endpoint to execute
"""
if 200 <= response.status_code < 400:
self.incr_stats("success", key)
else:
self.incr_stats("error", key)
if time_delta:
self.timing_stats("time", key, time_delta)
def info_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB _info endpoint
"""
duration, response = time_function(super().info_headless, **kwargs)
self.send_stats_metrics(response, self.info.__name__, duration)
return response
def get_headless(self, pk: int, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET endpoint
"""
duration, response = time_function(super().get_headless, pk, **kwargs)
self.send_stats_metrics(response, self.get.__name__, duration)
return response
def get_list_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET list endpoint
"""
duration, response = time_function(super().get_list_headless, **kwargs)
self.send_stats_metrics(response, self.get_list.__name__, duration)
return response
@expose("/related/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def related(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get related fields data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Related column data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/RelatedResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_rel_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._handle_page_args(args)
try:
datamodel = self.datamodel.get_related_interface(column_name)
except KeyError:
return self.response_404()
page, page_size = self._sanitize_page_args(page, page_size)
# handle ordering
order_field = self.order_rel_fields.get(column_name)
if order_field:
order_column, order_direction = order_field
else:
order_column, order_direction = "", ""
# handle filters
filters = self._get_related_filter(datamodel, column_name, args.get("filter"))
# Make the query
count, values = datamodel.query(
filters, order_column, order_direction, page=page, page_size=page_size
)
# produce response
result = [
{"value": datamodel.get_pk_value(value), "text": str(value)}
for value in values
]
return self.response(200, count=count, result=result)
@expose("/distinct/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def distinct(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get distinct values from field data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Distinct field data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/DistincResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_distinct_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._sanitize_page_args(*self._handle_page_args(args))
# Create generic base filters with added request filter
filters = self._get_distinct_filter(column_name, args.get("filter"))
# Make the query
query_count = self.appbuilder.get_session.query(
func.count(distinct(getattr(self.datamodel.obj, column_name)))
)
count = self.datamodel.apply_filters(query_count, filters).scalar()
if count == 0:
return self.response(200, count=count, result=[])
query = self.appbuilder.get_session.query(
distinct(getattr(self.datamodel.obj, column_name))
)
# Apply generic base filters with added request filter
query = self.datamodel.apply_filters(query, filters)
# Apply sort
query = self.datamodel.apply_order_by(query, column_name, "asc")
# Apply pagination
result = self.datamodel.apply_pagination(query, page, page_size).all()
# produce response
result = [
{"text": item[0], "value": item[0]}
for item in result
if item[0] is not None
]
return self.response(200, count=count, result=result)
|
|
import gzip
import json
from CommonServerPython import *
# IMPORTS
import urllib3
import csv
import requests
import traceback
import urllib.parse
from typing import Tuple, Optional, List, Dict
# Disable insecure warnings
urllib3.disable_warnings()
BATCH_SIZE = 2000
INTEGRATION_NAME = 'Recorded Future'
# taken from recorded future docs
RF_CRITICALITY_LABELS = {
'Very_Malicious': 90,
'Malicious': 65,
'Suspicious': 25,
'Unusual': 5
}
RF_INDICATOR_TYPES = {
'ip': 'ip',
'domain': 'domain',
'url': 'url',
'CVE(vulnerability)': 'vulnerability',
'hash': 'hash'
}
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
SOURCE_NAME = 'recordedfuture.masterrisklist'
BASE_URL = 'https://api.recordedfuture.com/v2/'
PARAMS = {'output_format': 'csv/splunk',
'download': 1} # for faster download
headers = {'X-RF-User-Agent': 'Demisto',
'content-type': 'application/json'}
def __init__(self, indicator_type: str, api_token: str, services: list, risk_rule: str = None,
fusion_file_path: str = None, insecure: bool = False,
polling_timeout: int = 20, proxy: bool = False, threshold: int = 65, risk_score_threshold: int = 0,
tags: Optional[list] = None, tlp_color: Optional[str] = None):
"""
Attributes:
indicator_type: string, the indicator type of the feed.
api_token: string, the api token for RecordedFuture.
services: list, the services from RecordedFuture.
risk_rule: string, an optional argument to the 'ConnectApi' service request.
fusion_file_path: string, an optional argument to the 'Fusion' service request.
insecure: boolean, if *false* feed HTTPS server certificate is verified. Default: *false*
polling_timeout: timeout of the polling request in seconds. Default: 20
proxy: Sets whether use proxy when sending requests
threshold: The minimum score from the feed in order to to determine whether the indicator is malicious.
risk_score_threshold: The minimum score to filter out the ingested indicators.
tags: A list of tags to add to indicators
:param tlp_color: Traffic Light Protocol color
"""
if tags is None:
tags = []
try:
self.polling_timeout = int(polling_timeout)
except (ValueError, TypeError):
return_error('Please provide an integer value for "Request Timeout"')
self.risk_rule = argToList(risk_rule)
self.fusion_file_path = fusion_file_path if fusion_file_path != "" else None
self.api_token = self.headers['X-RFToken'] = api_token
self.services = services
self.indicator_type = indicator_type
self.threshold = int(threshold)
self.risk_score_threshold = int(risk_score_threshold)
self.tags = tags
self.tlp_color = tlp_color
super().__init__(self.BASE_URL, proxy=proxy, verify=not insecure)
def _build_request(self, service, indicator_type, risk_rule: Optional[str] = None) -> requests.PreparedRequest:
"""Builds the request for the Recorded Future feed.
Args:
service (str): The service from recorded future. Can be 'connectApi' or 'fusion'
indicator_type (str) The indicator type. Can be 'domain', 'ip', 'hash' or 'url'
risk_rule(str): A risk rule that limits the fetched indicators
Returns:
requests.PreparedRequest: The prepared request which will be sent to the server
"""
if service == 'connectApi':
if risk_rule:
url = self.BASE_URL + indicator_type + '/risklist?list=' + risk_rule
else:
url = self.BASE_URL + indicator_type + '/risklist'
params = self.PARAMS
params['gzip'] = True
response = requests.Request(
'GET',
url,
headers=self.headers,
params=params
)
elif service == 'fusion':
url = self.BASE_URL + 'fusion/files/?path='
if self.fusion_file_path is None:
fusion_path = '/public/risklists/default_' + indicator_type + '_risklist.csv'
else:
fusion_path = self.fusion_file_path
fusion_path = urllib.parse.quote_plus(fusion_path)
response = requests.Request('GET',
url + fusion_path,
headers=self.headers,
params=self.PARAMS)
else:
raise DemistoException(f'Service unknown: {service}')
return response.prepare()
def build_iterator(self, service, indicator_type, risk_rule: Optional[str] = None):
"""Retrieves all entries from the feed.
Args:
service (str): The service from recorded future. Can be 'connectApi' or 'fusion'
indicator_type (str): The indicator type. Can be 'domain', 'ip', 'hash' or 'url'
risk_rule (str): A risk rule that limits the fetched indicators
Returns:
list of feed dictionaries.
"""
_session = requests.Session()
prepared_request = self._build_request(service, indicator_type, risk_rule)
# this is to honour the proxy environment variables
rkwargs = _session.merge_environment_settings(
prepared_request.url,
{}, None, None, None # defaults
)
rkwargs['stream'] = True
rkwargs['verify'] = self._verify
rkwargs['timeout'] = self.polling_timeout
try:
response = _session.send(prepared_request, **rkwargs)
except requests.ConnectionError as e:
raise requests.ConnectionError(f'Failed to establish a new connection: {str(e)}')
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
if "Insufficient credits" in response.text:
return_error("'Insufficient Credits' error was returned from Recorded Future. \n"
"Try increasing the integration's fetch interval in order to decrease the amount of API"
" requests made to Recorded Future. ")
else:
return_error(
'{} - exception in request: {} {}'.format(self.SOURCE_NAME, response.status_code, response.content))
if service == 'connectApi':
response_content = gzip.decompress(response.content)
response_content = response_content.decode('utf-8')
with open("response.txt", "w") as f:
f.write(response_content)
else:
with open("response.txt", "w") as f:
f.write(response.text)
def get_batches_from_file(self, limit):
file_stream = open("response.txt", 'rt')
columns = file_stream.readline() # get the headers from the csv file.
columns = columns.replace("\"", "").strip().split(",") # '"a","b"\n' -> ["a", "b"]
batch_size = limit if limit else BATCH_SIZE
while True:
feed_batch = [feed for _, feed in zip(range(batch_size + 1), file_stream) if feed]
if not feed_batch:
file_stream.close()
os.remove("response.txt")
return
yield csv.DictReader(feed_batch, fieldnames=columns)
def calculate_indicator_score(self, risk_from_feed):
"""Calculates the Dbot score of an indicator based on its Risk value from the feed.
Args:
risk_from_feed (str): The indicator's risk value from the feed
Returns:
int. The indicator's Dbot score
"""
dbot_score = 0
risk_from_feed = int(risk_from_feed)
if risk_from_feed >= self.threshold or risk_from_feed >= 65:
dbot_score = 3
elif risk_from_feed >= 5:
dbot_score = 2
return dbot_score
def check_indicator_risk_score(self, risk_score):
"""Checks if the indicator risk score is above risk_score_threshold
Args:
risk_score (str): The indicator's risk score from the feed
Returns:
True if the indicator risk score is above risk_score_threshold, False otherwise.
"""
return int(risk_score) >= self.risk_score_threshold
def run_parameters_validations(self):
"""Checks validation of the risk_rule and fusion_file_path parameters
Returns:
None in success, Error otherwise
"""
if self.risk_rule:
if 'connectApi' not in self.services:
return_error("You entered a risk rule but the 'connectApi' service is not chosen. "
"Add the 'connectApi' service to the list or remove the risk rule.")
else:
for risk_rule in self.risk_rule:
if not is_valid_risk_rule(self, risk_rule):
return_error(f"The given risk rule: {risk_rule} does not exist,"
f"please make sure you entered it correctly. \n"
f"To see all available risk rules run the '!rf-get-risk-rules' command.")
if self.fusion_file_path is not None:
if 'fusion' not in self.services:
return_error("You entered a fusion file path but the 'fusion' service is not chosen. "
"Add the 'fusion' service to the list or remove the fusion file path.")
def get_risk_rules(self, indicator_type: Optional[str] = None) -> dict:
if indicator_type is None:
indicator_type = self.indicator_type
return self._http_request(
method='GET',
url_suffix=indicator_type + '/riskrules',
params=self.PARAMS,
headers=self.headers
)
def is_valid_risk_rule(client: Client, risk_rule):
"""Checks if the risk rule is valid by requesting from RF a list of all available rules.
Returns:
bool. Whether the risk rule is valid or not
"""
risk_rule_response: dict = client.get_risk_rules()
risk_rules_list = [single_risk_rule['name'] for single_risk_rule in risk_rule_response['data']['results']]
if risk_rule in risk_rules_list:
return True
else:
return False
def test_module(client: Client, *args) -> Tuple[str, dict, dict]:
"""Builds the iterator to check that the feed is accessible.
Args:
client(Client): Recorded Future Feed client.
args(dict): demisto.args()
Returns:
'ok' if test passed, anything else will fail the test.
"""
client.run_parameters_validations()
for service in client.services:
# if there are risk rules, select the first one for test
risk_rule = client.risk_rule[0] if client.risk_rule else None
client.build_iterator(service, client.indicator_type, risk_rule)
client.get_batches_from_file(limit=1)
return 'ok', {}, {}
def get_indicator_type(indicator_type, item):
"""Returns the indicator type in Demisto
Args:
indicator_type (str): ip, url, domain or hash
item (dict): the indicator row from the csv response
Returns:
str. The indicator type per the indicators defined in Demisto
"""
if indicator_type == 'ip':
return ip_to_indicator_type(item.get('Name'))
elif indicator_type == 'hash':
return FeedIndicatorType.File
elif indicator_type == 'domain':
# If * is in the domain it is of type DomainGlob
if '*' in item.get('Name', ''):
return FeedIndicatorType.DomainGlob
return FeedIndicatorType.Domain
elif indicator_type == 'url':
return FeedIndicatorType.URL
elif indicator_type == 'vulnerability':
return FeedIndicatorType.CVE
def ip_to_indicator_type(ip):
"""Returns the indicator type of the input IP.
:type ip: ``str``
:param ip: IP address to get it's indicator type.
:rtype: ``str``
:return:: Indicator type from FeedIndicatorType, or None if invalid IP address.
"""
ip = str(ip)
if re.match(ipv4cidrRegex, ip):
return FeedIndicatorType.CIDR
elif re.match(ipv4Regex, ip):
return FeedIndicatorType.IP
elif re.match(ipv6cidrRegex, ip):
return FeedIndicatorType.IPv6CIDR
elif re.match(ipv6Regex, ip):
return FeedIndicatorType.IPv6
else:
return None
def calculate_recorded_future_criticality_label(risk_from_feed):
risk_from_feed = int(risk_from_feed)
if risk_from_feed >= RF_CRITICALITY_LABELS['Very_Malicious']:
return 'Very Malicious'
elif risk_from_feed >= RF_CRITICALITY_LABELS['Malicious']:
return 'Malicious'
elif risk_from_feed >= RF_CRITICALITY_LABELS['Suspicious']:
return 'Suspicious'
elif risk_from_feed >= RF_CRITICALITY_LABELS['Unusual']:
return 'Unusual'
else:
return 'No current evidence of risk'
def format_risk_string(risk_string):
"""Formats the risk string returned from the feed
Args:
risk_string(str): The risk string from the feed, in 'X/X' format
Returns:
str. The formatted string
"""
splitted_risk_string = risk_string.split('/')
return f'{splitted_risk_string[0]} of {splitted_risk_string[1]} Risk Rules Triggered'
def fetch_and_create_indicators(client, risk_rule: Optional[str] = None):
"""Fetches indicators from the Recorded Future feeds,
and from each fetched indicator creates an indicator in XSOAR.
Args:
client(Client): Recorded Future Feed client.
risk_rule(str): A risk rule that limits the fetched indicators
Returns: None.
"""
for indicators in fetch_indicators_command(client, client.indicator_type, risk_rule):
demisto.createIndicators(indicators)
def fetch_indicators_command(client, indicator_type, risk_rule: Optional[str] = None, limit: Optional[int] = None):
"""Fetches indicators from the Recorded Future feeds.
Args:
client(Client): Recorded Future Feed client
indicator_type(str): The indicator type
risk_rule(str): A risk rule that limits the fetched indicators
limit(int): Optional. The number of the indicators to fetch
Returns:
list. List of indicators from the feed
"""
indicators_value_set: Set[str] = set()
for service in client.services:
client.build_iterator(service, indicator_type, risk_rule)
feed_batches = client.get_batches_from_file(limit)
for feed_dicts in feed_batches:
indicators = []
for item in feed_dicts:
raw_json = dict(item)
raw_json['value'] = value = item.get('Name')
if value in indicators_value_set:
continue
indicators_value_set.add(value)
raw_json['type'] = get_indicator_type(indicator_type, item)
score = 0
risk = item.get('Risk')
if isinstance(risk, str) and risk.isdigit():
raw_json['score'] = score = client.calculate_indicator_score(risk)
raw_json['Criticality Label'] = calculate_recorded_future_criticality_label(risk)
# If the indicator risk score is lower than the risk score threshold we shouldn't create it.
if not client.check_indicator_risk_score(risk):
continue
lower_case_evidence_details_keys = []
evidence_details_value = item.get('EvidenceDetails', '{}')
if evidence_details_value:
evidence_details = json.loads(evidence_details_value).get('EvidenceDetails', [])
if evidence_details:
raw_json['EvidenceDetails'] = evidence_details
for rule in evidence_details:
rule = dict((key.lower(), value) for key, value in rule.items())
lower_case_evidence_details_keys.append(rule)
risk_string = item.get('RiskString')
if isinstance(risk_string, str):
raw_json['RiskString'] = format_risk_string(risk_string)
indicator_obj = {
'value': value,
'type': raw_json['type'],
'rawJSON': raw_json,
'fields': {
'recordedfutureevidencedetails': lower_case_evidence_details_keys,
'tags': client.tags,
},
'score': score
}
if client.tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = client.tlp_color
indicators.append(indicator_obj)
yield indicators
def get_indicators_command(client, args) -> Tuple[str, Dict[Any, Any], List[Dict]]:
"""Retrieves indicators from the Recorded Future feed to the war-room.
Args:
client(Client): Recorded Future Feed client.
args(dict): demisto.args()
Returns:
str, dict, list. the markdown table, context JSON and list of indicators
"""
indicator_type = args.get('indicator_type', demisto.params().get('indicator_type'))
limit = int(args.get('limit'))
human_readable: str = ''
entry_results: List[Dict]
indicators_list: List[Dict]
if client.risk_rule:
entry_results = []
for risk_rule in client.risk_rule:
indicators_list = []
for indicators in fetch_indicators_command(client, indicator_type, risk_rule, limit):
indicators_list.extend(indicators)
if limit and len(indicators_list) >= limit:
break
entry_result = camelize(indicators_list)
entry_results.extend(entry_result)
hr = tableToMarkdown(f'Indicators from RecordedFuture Feed for {risk_rule} risk rule:', entry_result,
headers=['Value', 'Type'], removeNull=True)
human_readable += f'\n{hr}'
else: # there are no risk rules
indicators_list = []
risk_rule = None
for indicators in fetch_indicators_command(client, indicator_type, risk_rule, limit):
indicators_list.extend(indicators)
if limit and len(indicators_list) >= limit:
break
entry_results = camelize(indicators_list)
human_readable = tableToMarkdown('Indicators from RecordedFuture Feed:', entry_results,
headers=['Value', 'Type'], removeNull=True)
return human_readable, {}, entry_results
def get_risk_rules_command(client: Client, args) -> Tuple[str, dict, dict]:
"""Retrieves all risk rules available from Recorded Future to the war-room.
Args:
client(Client): Recorded Future Feed client.
args(dict): demisto.args()
Returns:
str, dict, list. the markdown table, context JSON and list of risk rules
"""
indicator_type = args.get('indicator_type', demisto.params().get('indicator_type'))
result = client.get_risk_rules(indicator_type)
entry_result = []
for entry in result['data']['results']:
entry_result.append({
'Name': entry.get('name'),
'Description': entry.get('description'),
'Criticality': entry.get('criticalityLabel')
})
headers = ['Name', 'Description', 'Criticality']
hr = tableToMarkdown(f'Available risk rules for {indicator_type}:', entry_result, headers)
return hr, {'RecordedFutureFeed.RiskRule(val.Name == obj.Name)': entry_result}, result
def main():
params = demisto.params()
client = Client(RF_INDICATOR_TYPES[params.get('indicator_type')], params.get('api_token'), params.get('services'),
params.get('risk_rule'), params.get('fusion_file_path'), params.get('insecure'),
params.get('polling_timeout'), params.get('proxy'), params.get('threshold'),
params.get('risk_score_threshold'), argToList(params.get('feedTags'), params.get('tlp_color'))
)
command = demisto.command()
demisto.info('Command being called is {}'.format(command))
# Switch case
commands = {
'test-module': test_module,
'rf-feed-get-indicators': get_indicators_command,
'rf-feed-get-risk-rules': get_risk_rules_command
}
try:
if demisto.command() == 'fetch-indicators':
if client.risk_rule:
for risk_rule in client.risk_rule:
fetch_and_create_indicators(client, risk_rule)
else: # there are no risk rules
fetch_and_create_indicators(client)
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args()) # type:ignore
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}] \n Traceback: {traceback.format_exc()}'
return_error(err_msg)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
from __future__ import unicode_literals
import datetime
import re
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import truncate_name
from django.utils import six, timezone
from django.utils.encoding import force_bytes, force_text
from .base import Database
from .utils import InsertIdVar, Oracle_datetime, convert_unicode
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
# Oracle uses NUMBER(11) and NUMBER(19) for integer fields.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
}
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = self._get_sequence_name(table)
tr_name = self._get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6)
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6)"
return fmt % (days, hours, minutes, seconds, timedelta.microseconds,
day_precision), []
def date_trunc_sql(self, lookup_type, field_name):
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not settings.USE_TZ:
return field_name
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
# Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
# Convert to a DATETIME, which is called DATE by Oracle. There's no
# built-in function to do that; the easiest is to go through a string.
result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
# Re-convert to a TIMESTAMP because EXTRACT only handles the date part
# on DATE values, even though they actually store the time part.
return "CAST(%s AS TIMESTAMP)" % result
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
sql = 'TRUNC(%s)' % field_name
return sql, []
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = field_name # Cast to DATE removes sub-second precision.
return sql, []
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
converters.append(self.convert_empty_values)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if isinstance(value, Database.LOB):
value = force_text(value.read())
return value
def convert_binaryfield_value(self, value, expression, connection, context):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection, context):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_empty_values(self, value, expression, connection, context):
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
field = expression.output_field
if value is None and field.empty_strings_allowed:
value = ''
if field.get_internal_type() == 'BinaryField':
value = b''
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode): # NOQA: unicode undefined on PY3
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name.upper(), self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = self._sequence_reset_sql % {
'sequence': sequence_name,
'table': table_name,
'column': column_name,
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.remote_field.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, num_values):
items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
return " UNION ALL ".join([items_sql] * num_values)
|
|
"""
Mean log loss from 5-fold CV: 0.488150595136
"""
import copy
import itertools
import numpy as np
import lasagne
import math
import os
import theano
import theano.tensor as T
import time
from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params
from lasagne.nonlinearities import rectify, softmax
from lasagne.objectives import categorical_crossentropy, Objective
from lasagne.updates import adagrad
from sklearn.base import BaseEstimator
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.utils import check_random_state
from otto_utils import consts, utils
MODEL_NAME = 'model_17_nn_adagrad_log'
MODE = 'submission' # cv|submission|holdout|tune
class NeuralNetwork(BaseEstimator):
def __init__(self, n_hidden=20, max_epochs=150, batch_size=200,
lr=0.01, rho=0.9, dropout=0.5, valid_ratio=0.0,
use_valid=False, verbose=0, random_state=None):
self.n_hidden = n_hidden
self.max_epochs = max_epochs
self.batch_size = batch_size
self.lr = lr
self.rho = rho
self.dropout = dropout
self.valid_ratio = valid_ratio
self.use_valid = use_valid
self.verbose = verbose
self.random_state = random_state
# State
self.score_ = None
self.classes_ = None
self.n_classes_ = None
self.model = None
def fit(self, data, targets, sample_weight=None):
self.classes_, indices = np.unique(targets, return_inverse=True)
self.n_classes_ = self.classes_.shape[0]
random_state = check_random_state(self.random_state)
# Shuffle data and eventually split on train and validation sets
if self.valid_ratio > 0:
strat_shuffled_split = StratifiedShuffleSplit(targets, test_size=self.valid_ratio,
n_iter=1, random_state=self.random_state)
train_index, valid_index = [s for s in strat_shuffled_split][0]
X_train, y_train = data[train_index], targets[train_index]
X_valid, y_valid = data[valid_index], targets[valid_index]
else:
X_train, y_train = data, targets
X_valid, y_valid = np.array([]), np.array([])
if self.verbose > 5:
print 'X_train: %s, y_train: %s' % (X_train.shape, y_train.shape)
if self.use_valid:
print 'X_valid: %s, y_valid: %s' % (X_valid.shape, y_valid.shape)
# Prepare theano variables
dataset = dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
input_dim=X_train.shape[1],
output_dim=self.n_classes_,
)
if self.verbose > 0:
print "Building model and compiling functions..."
output_layer = self.build_model(dataset['input_dim'])
iter_funcs = self.create_iter_functions(dataset, output_layer)
if self.verbose > 0:
print "Starting training..."
now = time.time()
results = []
try:
for epoch in self.train(iter_funcs, dataset, output_layer):
if self.verbose > 1:
print "Epoch {} of {} took {:.3f}s".format(
epoch['number'], self.max_epochs, time.time() - now)
now = time.time()
results.append([epoch['number'], epoch['train_loss'], epoch['valid_loss']])
if self.verbose > 1:
print " training loss:\t\t{:.6f}".format(epoch['train_loss'])
print " validation loss:\t\t{:.6f}".format(epoch['valid_loss'])
print " validation accuracy:\t\t{:.2f} %%".format(
epoch['valid_accuracy'] * 100)
if epoch['number'] >= self.max_epochs:
break
if self.verbose > 0:
print 'Minimum validation error: %f (epoch %d)' % \
(epoch['best_val_error'], epoch['best_val_iter'])
except KeyboardInterrupt:
pass
return self
def predict(self, data):
preds, _ = self.make_predictions(data)
return preds
def predict_proba(self, data):
_, proba = self.make_predictions(data)
return proba
def score(self):
return self.score_
# Private methods
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 4, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
def create_iter_functions(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
objective = Objective(output_layer, loss_function=categorical_crossentropy)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch, deterministic=True)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = get_all_params(output_layer)
updates = adagrad(loss_train, all_params, self.lr, self.rho)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
on_unused_input='ignore',
)
iter_valid = None
if self.use_valid:
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy, proba],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
return dict(train=iter_train, valid=iter_valid)
def create_test_function(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
iter_test = theano.function(
[batch_index], [pred, proba],
givens={
X_batch: dataset['X_test'][batch_slice],
},
)
return dict(test=iter_test)
def train(self, iter_funcs, dataset, output_layer):
num_batches_train = dataset['num_examples_train'] // self.batch_size
num_batches_valid = int(math.ceil(dataset['num_examples_valid'] / float(self.batch_size)))
best_val_err = 100
best_val_iter = -1
for epoch in itertools.count(1):
batch_train_losses = []
for b in range(num_batches_train):
batch_train_loss = iter_funcs['train'](b)
batch_train_losses.append(batch_train_loss)
avg_train_loss = np.mean(batch_train_losses)
batch_valid_losses = []
batch_valid_accuracies = []
batch_valid_probas = []
if self.use_valid:
for b in range(num_batches_valid):
batch_valid_loss, batch_valid_accuracy, batch_valid_proba = iter_funcs['valid'](b)
batch_valid_losses.append(batch_valid_loss)
batch_valid_accuracies.append(batch_valid_accuracy)
batch_valid_probas.append(batch_valid_proba)
avg_valid_loss = np.mean(batch_valid_losses)
avg_valid_accuracy = np.mean(batch_valid_accuracies)
if (best_val_err > avg_valid_loss and self.use_valid) or\
(epoch == self.max_epochs and not self.use_valid):
best_val_err = avg_valid_loss
best_val_iter = epoch
# Save model
self.score_ = best_val_err
self.model = copy.deepcopy(output_layer)
yield {
'number': epoch,
'train_loss': avg_train_loss,
'valid_loss': avg_valid_loss,
'valid_accuracy': avg_valid_accuracy,
'best_val_error': best_val_err,
'best_val_iter': best_val_iter,
}
def make_predictions(self, data):
dataset = dict(
X_test=theano.shared(lasagne.utils.floatX(data)),
num_examples_test=data.shape[0],
input_dim=data.shape[1],
output_dim=self.n_classes_,
)
iter_funcs = self.create_test_function(dataset, self.model)
num_batches_test = int(math.ceil(dataset['num_examples_test'] / float(self.batch_size)))
test_preds, test_probas = np.array([]), None
for b in range(num_batches_test):
batch_test_pred, batch_test_proba = iter_funcs['test'](b)
test_preds = np.append(test_preds, batch_test_pred)
test_probas = np.append(test_probas, batch_test_proba, axis=0) if test_probas is not None else batch_test_proba
return test_preds, test_probas
if __name__ == '__main__':
train, labels, test, _, _ = utils.load_data()
train = np.log(train + 1.)
test = np.log(test + 1.)
clf = NeuralNetwork(1024, 110, 220, 0.0026294067059507813, 1.1141900388281156e-15, 0.26355646219340834,
.02, True, 10, random_state=23)
if MODE == 'cv':
scores, predictions = utils.make_blender_cv(clf, train, labels, calibrate=False)
print 'CV:', scores, 'Mean log loss:', np.mean(scores)
utils.write_blender_data(consts.BLEND_PATH, MODEL_NAME + '.csv', predictions)
elif MODE == 'submission':
clf.fit(train, labels)
predictions = clf.predict_proba(test)
utils.save_submission(consts.DATA_SAMPLE_SUBMISSION_PATH,
os.path.join(consts.ENSEMBLE_PATH, MODEL_NAME + '.csv'),
predictions)
elif MODE == 'holdout':
score = utils.hold_out_evaluation(clf, train, labels, calibrate=False)
print 'Log loss:', score
else:
print 'Unknown mode'
|
|
#!/usr/bin/env python
import re
# The number of minutes in an hour.
HOUR = 60
# Various patterns for matching time/date information.
REGEX_CLASSTIME = "([a-zA-Z]+)[\s+](\d*\d:\d\d[ap])-(\d*\d:\d\d[ap])"
REGEX_TIME = "(\d*\d):(\d\d)([ap])"
REGEX_DAY = "([A-Z][a-z]*)"
# The number of hours on a 12-hour clock.
MAX_HOURS = 12
# Indices for matched values from the above patterns.
INDEX_DAYS = 1
INDEX_START = 2
INDEX_FINISH = 3
INDEX_HOURS = 1
INDEX_MINS = 2
INDEX_PERIOD = 3
class ClassTime(object):
"""
The ClassTime class is used to hold time and date ranges for classes found
within the Schedule of Classes. This class also provides methods for easy
comparison of various class times.
:ivar days: the days this class occurs on (M, Tu, W, Th, F)
:ivar start: the start time of this class in the hh:mma/p format
:ivar finish: the finish time of this class in the hh:mma/p format
:ivar startTime: the numeric representation of the start time (in minutes)
:ivar finishTime: the numberic representation of the finish time
"""
def __init__(self):
"""
Constructor for the ClassTime object which sets the default values for
the instance variables.
:param self: the ClassTime object
"""
self.days = "MTuWThF"
self.start = "00:00a"
self.finish = "11:59p"
self.startTime = 0
self.finishTime = (11 * HOUR) + 59
@staticmethod
def toMinutes(hour, minutes, period):
"""
Converts hours and minutes into the number of minutes within the day.
:param hour: the hour number
:param minutes: the minute number
:returns: the number of minutes representing the hour:minutes time if
successfully converted, -1 otherwise
"""
period = period.lower()
# Check that the period is valid.
if period != "a" and period != "p":
return -1
# Return appropriate values for noon and midnight.
if hour == MAX_HOURS:
return HOUR * (MAX_HOURS if period == "p" else 0) + minutes
# If PM, then add 12 hours to convert to a 24 hour time.
if period == "p":
hour += MAX_HOURS
# Convert hours to minutes.
hour *= HOUR
return hour + minutes
@classmethod
def fromString(thisClass, date):
"""
Creates a ClassTime object given a string that is in the format of
weekday followed by a start time, a dash, and a finish time. Note that
None will be returned if the given date string is not in a valid format.
:param thisClass: the ClassTime class
:param date: the string representation of the ClassTime
:returns: a corresponding ClassTime object if valid, None otherwise
"""
match = re.fullmatch(REGEX_CLASSTIME, date.strip())
if not match:
return None
time = thisClass()
time.days = match.group(INDEX_DAYS)
time.setTimes(match.group(INDEX_START), match.group(INDEX_FINISH))
return time
def setTimes(self, newStart, newFinish):
"""
Sets the time range for the class time. The times are formatted as
hh:mma or hh:mmp where hours (hh) range from 00-12 and minutes (mm)
range from 00-59.
:param self: the ClassTime object
:param newStart: the date/time string for when the class starts
:param newFinish: the date/time string for when the class finishes
"""
# Delegate the parsing and setting to setStart and setFinish.
self.setStart(newStart)
self.setFinish(newFinish)
def setStart(self, newStart):
"""
Sets the start time of the class.
:param self: the ClassTime object
:param newStart: the start time date/time string
:returns: True if the start time was set, False otherwise
"""
# Make sure the start time is valid.
newStart = newStart.strip().lower()
match = re.match(REGEX_TIME, newStart)
if not match:
return False
# Get the hours and minutes as numbers.
hours = int(match.group(INDEX_HOURS))
minutes = int(match.group(INDEX_MINS)) % HOUR
period = match.group(INDEX_PERIOD)
# Set the numeric start time.
self.startTime = ClassTime.toMinutes(hours, minutes, period)
self.start = newStart
return (self.startTime > -1)
def setFinish(self, newFinish):
"""
Sets the finish time of the class.
:param self: the ClassTime object
:param newStart: the finish time date/time string
"""
# Make sure the finish time is valid.
newFinish = newFinish.strip().lower()
match = re.match(REGEX_TIME, newFinish)
if not match:
return False
# Get the hours and minutes as numbers.
hours = int(match.group(INDEX_HOURS))
minutes = int(match.group(INDEX_MINS))
period = match.group(INDEX_PERIOD).lower()
# Set the numeric start time.
self.finishTime = ClassTime.toMinutes(hours, minutes, period)
self.finish = newFinish
return (self.finishTime > -1)
def isTimeBefore(self, other):
"""
Returns whether or not the time for this ClassTime is before the time of
the given ClassTime object. This returns False if the other object is
not a ClassTime object.
:param self: the ClassTime object
:param other: the other ClassTime object to compare with
:returns: whether or not this ClassTime occurs before
"""
if type(other) is ClassTime:
return (self.startTime < other.startTime and
self.finishTime < other.startTime and
self.startTime < other.finishTime and
self.finishTime < other.finishTime)
return False
def isTimeAfter(self, other):
"""
Returns whether or not the time for this ClassTime is after the time of
the given ClassTime object. This returns False if the other object is
not a ClassTime object.
:param self: the ClassTime object
:param other: the other ClassTime object to compare with
:returns: whether or not this ClassTime occurs after
"""
if type(other) is ClassTime:
return (self.startTime > other.finishTime and
self.finishTime > other.finishTime and
self.startTime > other.startTime and
self.finishTime > other.startTime)
return False
def conflictsWith(self, other):
"""
Returns whether or not the day or time for this ClassTime overlaps with
a given ClassTime object.
:param self: the ClassTime object
:param other: the other ClassTime object to compare with
:returns: whether or not there is any time overlap (if any days match)
"""
# Check day conflicts before time conflicts.
if not self.isOnDay(other.days):
return False
return (self.startTime <= other.finishTime and
other.startTime <= self.finishTime)
def isOnDay(self, day):
"""
Returns whether or not the days of the ClassTime object contains the
day.
:param self: the ClassTime object
:param day: a day of the week
:returns: whether or not the ClassTime occurs on the given day
"""
for day in re.findall(REGEX_DAY, day):
if self.days.find(day) > -1:
return True
return False
def toString(self):
"""
Returns the string representation of the ClassTime object by combining
the days and the dash separated start and finish times. This
representation is the same as the input for the ClassTime.fromString
function.
:param self: the ClassTime object
:returns: the string representation
"""
return self.days + " " + self.start + "-" + self.finish
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
|
|
"""Master server for lab-nanny
Collects data from the different nodes and makes it available to the
clients using websockets.
The functionality of the master server is to join the data from the
different nodes and make it available in two forms:
-- clients using websockets
-- store it in a database
To do this, the master uses the Masterserver.tick method, which
-- submits updates to the clients
-- sends requests for data to the nodes
(in this order)
By centralizing the communications (that is, nodes send updates to
MasterServer, which then sends them to the clients), we reduce the
amount of connections required from (#clients * #nodes) to
(#clients + #nodes)
The master server handles the connections both with the inside (nodes)
and the outside (clients) using two classes: NodeHandler and
ClientHandler, which are classes derived from the
tornado.websocket.WebSocketHandler class.
The server uses an auxilitary communications handler class (CommsHandler)
which keeps a list of nodes and clients, and the last data from the nodes.
"""
# Master server for lab-nanny
#
# Collects data from the different nodes and makes it available to the clients through websockets.
#!/usr/bin/python
#TODO: have a way to "disconnect" the nodes when they do disconnect.
import tornado.httpserver
import tornado.websocket
import tornado.ioloop as ioloop
import tornado.web
import tornado
from tornado.websocket import WebSocketClosedError
import signal
import argparse
import time
from database.DBHandler import DBHandler as DBHandler
from servers.header import MST_HEADER
import uuid
import socket
import json
from json2html import json2html
SOCKETPORT = 8001
SLAVE_SOCKETNAME = r'/nodes_ws'
CLIENT_SOCKETNAME = r'/client_ws'
STATUS_ADDR = r'/status'
DEFAULTMESSAGE = 'X,50,0'
DEFAULTDBNAME = 'example.db'
PERIODICITY = 100
DB_PERIODICITY = 30000 #Save data to db every...
TFORMAT = '%y/%m/%d %H:%M:%S'
METAKEYWORD = 'meta'
CONNCLOSEDSTR = 'Connection closed'
condition_trap = {'name':'Trap unlock',
'obs_lab':'lab7',
'obs_ch':'ch4',
'obs_range':(1,1.5),
'target_lab':'lab7',
'target_ch':13,
'target_val':1,
'message':'Trap unlocked'}
condition_temp = {'name':'Temperature changes',
'obs_lab':'lab7',
'obs_ch':'ch2',
'obs_range':(19,23),
'target_lab':'lab7',
'target_ch':13,
'target_val':1,
'message':'Temperature outside of bounds'}
class MasterServer(object):
""" Class that runs the Master Server for lab-nanny.
It keeps a NodeHandler and a ClientHandler object to communicate with
the slave nodes and the clients, which in turn use an instance of the
CommsHandler class to do internal communications.
Periodically (every fraction of a second), the Master server polls the
nodes for data, and sends the results to the clients.
Additionally, with a different periodicity (~10s) the Master server
saves a copy of the data to a database.
"""
def __init__(self, slave_socketname = SLAVE_SOCKETNAME,
socketport=SOCKETPORT,
client_socketname=CLIENT_SOCKETNAME,
periodicity=PERIODICITY,
db_periodicity = DB_PERIODICITY,
status_addr = STATUS_ADDR,
verbose = True):
#Init parameters
self.socketport = socketport
self.slave_socketname = slave_socketname
self.client_socketname = client_socketname
self.status_addr = status_addr
self.callback_periodicity = periodicity
self.db_callback_periodicity = db_periodicity
self.verbose = verbose
self.callback = []
self.dbcallback = []
self.HTTPserver = []
self._conditions = [] # list of dictionaries
# Create instance of the CommsHandler to mediate communications between
# node and client handlers
self.comms_handler = CommsHandler()
# Add callback db_metadata_append upon change to the metadata in nodes
self.comms_handler.bind_to_metadata_change(self.db_metadata_append)
# Also, start communication with the database
self.db_handler = DBHandler(db_name=DEFAULTDBNAME)
# Init program
self._conditions.append(condition_trap)
self._conditions.append(condition_temp)
self.run()
def run(self):
""" Main function of the MasterServer class.
It creates a tornado web application with two websocket handlers and
one web RequestHandler: the two websockets are one for the nodes,
and one for the clients, listening on the same port
(self.socket_port), but using different names (the defauls are
'/nodes_ws' and '/clients_ws' respectively); the web request handler
shows information about the status of the master server using the same
socket and a different address ('/status').
Afterwards, this method initialises two periodic callbacks:
- One that manages the node/client communications, typically with a
sub-second periodicity
- Another one to store long-term traces of the data to a database
(every ~10s)
"""
self.application = tornado.web.Application([(self.slave_socketname,
NodeHandler,
{'comms_handler':self.comms_handler,
'verbose':self.verbose}),
(self.client_socketname,
ClientHandler,
{'comms_handler':self.comms_handler,
'verbose':self.verbose}),
(self.status_addr,
StatusHandler,
{'comms_handler':self.comms_handler})])
try:
self.HTTPserver = self.application.listen(self.socketport)
fqdn = socket.getfqdn()
alias = socket.gethostbyname(socket.gethostname())
print('Setting up connections:\n-----------------------------------')
print('Status page: @ {}:{}{}, ({})'.format(fqdn,
self.socketport,
self.status_addr,
alias))
print('Websockets opened:')
print('-Client WS EST @ {}:{}{}, ({})'.format(fqdn,
self.socketport,
self.client_socketname,
alias))
print('-Nodes WS EST @ {}:{}{}, ({})'.format(fqdn,
self.socketport,
self.slave_socketname,
alias))
print('-----------------------------------')
except socket.error as error:
#Catch the error if the connections are already present:
if error.errno == 10048:
pass
else:
raise
self.callback= ioloop.PeriodicCallback(self.tick,
self.callback_periodicity)
self.callback.start()
print('\nStarting ioloop')
# To save to DB:
self.dbcallback= ioloop.PeriodicCallback(self.db_tick,
self.db_callback_periodicity)
self.dbcallback.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
ioloop.IOLoop.instance().stop()
print('(MST {}) Exiting gracefully... '.format(time.strftime(TFORMAT)))
finally:
self.on_close()
def tick(self):
""" Function called periodically to manage node/client communication
- First, the function sends the last data (obtained from the nodes)
to the clients
- Then, it requests more data to the nodes.
By first sending the data, and then asking for more, we make sure the
nodes have time to send the data back to the MasterServer before
sending that data to the clients; this comes at the expense of sending
"old data" (with a repetition period), which has no impact unless the
application is time-critical.
"""
# TODO: should only send data to the right client connection?, instead of relying on the nodes to check whether the message is for them?
try:
# If the NodeHandler decides to write messages to the clients upon
# reception of each message, comment this line
ClientHandler.broadcast(self.comms_handler.last_data)
# Write a command with no side consequences. The 'X' ensures that
# all nodes reply
msg = DEFAULTMESSAGE
broadcast(self.comms_handler.nodes,msg)
self.check_conditions()
except WebSocketClosedError:
print('Websocket closed')
#In case we want to exit, we send a KeyboardInterrupt
except KeyboardInterrupt:
raise
def db_tick(self):
""" Function called periodically to save data to the database
This function generates an entry in the database for each node ID
held in the CommsHandler.last_data instance.
The details of writing to the database are found in the
database.DBHandler module.
"""
# Write values to db (called every N seconds, probably 30-60)
# if self.verbose:
## CHECK HERE IF THE METADATA HAS BEEN ADDED
num_connected_devices = len(self.comms_handler.last_data)
if num_connected_devices>0:
print('(MST {}) Adding {} entries to DB '\
.format(time.strftime(TFORMAT),num_connected_devices))
for id in self.comms_handler.last_data:
datadict = self.comms_handler.last_data[id]
# Add data to observations table
# Check if table with name "id" exists
# Add data to specific table for ID
self.db_handler.add_database_entry(datadict)
self.db_handler.commit()
def db_metadata_append(self,idx):
""" Function called when a new node transmits its metadata
This function generates an entry in the database for each new node
The entry in the database is composed of a timestamp, a username, and the JSON string.
"""
print('(MST {}) Updating metadata'.format(time.strftime(TFORMAT)))
# Metadata can be updated upon (re)connection, or when the connection
# is closing. When (re)connecting, the metadata is a dictionary
# which contains, amongst others, a 'user' key. This is not the
# case upon closing the connection, thus we need the user from
# somewhere else.
if isinstance(self.comms_handler.metadata[idx],dict):
user = self.comms_handler.metadata[idx]['user']
else:
user = self.comms_handler.last_data[idx]['user']
self.db_handler.register_new_metadata(user,self.comms_handler.metadata[idx])
def on_close(self):
self.db_handler.close()
def check_conditions(self):
for condition in self._conditions:
# if obs_lab/obs_ch outside obs_range:
# send target_lab/target_ch the target_val
lab = condition['obs_lab']
observ_channel = condition['obs_ch']
range_boundary = condition['obs_range']
target_lab = condition['target_lab']
target_channel = condition['target_ch']
target_value = condition['target_val']
node_id = self.comms_handler.get_nodeID_by_user(lab)
if len(node_id)>0:
current_observed_val = self.comms_handler.last_data[node_id[0]][observ_channel]
if not range_boundary[0]<= current_observed_val <= range_boundary[1]:
# Add here entry to database when condition is not met
target_id = self.comms_handler.get_nodeID_by_user(target_lab)
msg = target_lab+','+str(target_channel)+','+str(target_value)
broadcast(self.comms_handler.nodes,msg)
print(condition['message'])
print('{} <= {} <= {}'.format(range_boundary[0],current_observed_val,range_boundary[1]))
else:
pass
class NodeHandler(tornado.websocket.WebSocketHandler):
""" Class that handles the communication via websockets with the slave nodes.
"""
node_dict = {}
def initialize(self, comms_handler,verbose=True):
"""Initialisation of an object of the NodeHandler class.
We provide a communications handler object which keeps a list of the nodes
and clients, and a list of the last messages from the nodes.
:param comms_handler:
:type comms_handler: CommsHandler
:param verbose: True for verbose output
:return:
"""
self.__comms_handler = comms_handler
self.verbose = verbose
def open(self):
""" Callback executed upon opening a new slave node connection.
This function adds the new connection to the class "nodes" list and
provides a unique id to the connection using the uuid.uuid4().hex
function.
:return:
"""
# We could do here the configuration of the node, like a dictionary with the channels exposed
#self.write_message('Init')
self.id = uuid.uuid4().hex
NodeHandler.node_dict[self.id] = self
ip = self.request.remote_ip
print('(NDH {}) New NODE {} ({}). (out of {}) ' \
.format(time.strftime(TFORMAT),
socket.getfqdn(ip),
ip,
len(NodeHandler.node_dict)))
print('(NDH) UUID: {}'.format(self.id))
def on_message(self, message):
""" Callback executed upon message reception from the master server.
The message is a JSON string, which is converted to a dictionary.
:param message:
:return:
"""
## TODO: maybe we can code here a case in which we configure
## For example, we can write a "configure" key in the dictionary
message_dict = json.loads(message)
if METAKEYWORD not in message_dict:
if self.verbose:
if not message_dict['error']:
print('(NDH) time: {0:.3f}, user: {1}, error: {2}, ch0: {3}'\
.format(message_dict["x"],
message_dict["user"],
message_dict["error"],
message_dict["ch0"]))
else:
print('(NDH) time: {0:.3f}, user: {1}, error: {2}'\
.format(message_dict["x"],
message_dict["user"],
message_dict["error"]))
#There are two ways in which we can pass the data to the clients:
# - Store the data in the self.__comms_handler.last_data dictionary
# - Send the data to the clients everytime a message is received
# The first one helps with synchronizing sending the data to the clients.
# The second one is more immediate, but it might impact the performance of the network,
# since we communicate with each of the clients on every message received.
# To use the first method, uncomment this line, and make sure that the "tick()" function
# in the master server uses :
self.__comms_handler.last_data[self.id] = message_dict
else:
self.user = message_dict['user']
self.__comms_handler.add_metadata(self.id,message_dict)
# To use the second method, uncomment this other line
#for client in self.__comms_handler.clients:
# client.write_message(message)
def on_close(self):
# Add log to metadata table in database
self.__comms_handler.add_metadata(self.id,CONNCLOSEDSTR)
# Remove nodehandler from the comms_handler instance and the class'
# node_list.
self.__comms_handler.remove_key(self.id)
NodeHandler.node_dict.pop(self.id, None)
ip = self.request.remote_ip
user = self.user
print('(NDH {}) Connection with {} ({}) closed '\
.format(time.strftime(TFORMAT),
ip, user))
def check_origin(self, origin):
#TODO: change this to actually check the origin
return True
@classmethod
def broadcast_to_nodes(cls,msg=DEFAULTMESSAGE):
""" Function to send a message to all the nodes held in the self.__comms_handler nodes list.
:param msg: message to broadcast
:return:
"""
#In case we want to exit, we send a KeyboardInterrupt
try:
broadcast(cls.node_dict, msg)
except KeyboardInterrupt:
raise
class ClientHandler(tornado.websocket.WebSocketHandler):
""" Class that handles the communication via websockets with the
slave nodes.
"""
client_list = []
def initialize(self, comms_handler,verbose=False):
""" Initialisation of an object of the ClientHandler class.
We provide a communications handler object which keeps a list of the
nodes and clients, and a list of the last messages from the nodes.
:param comms_handler:
:type comms_handler: CommsHandler
:param verbose: True for verbose output
:return:
"""
self.__comms_handler = comms_handler
self.verbose = verbose
def open(self):
""" Callback executed upon opening a new client connection.
This function adds the new connection to the class "client" list.
:return:
"""
# We could do here the configuration of the node, like a dictionary with the channels exposed
ClientHandler.client_list.append(self)
print('(CLH {}) New connection from {}. Total of clients: {}'\
.format(time.strftime(TFORMAT),
self.request.remote_ip,
len(ClientHandler.client_list)))
def on_message(self, message):
""" Callback executed upon message reception from the client.
The message is a JSON string, which is then broadcasted to all the
nodes sequentially.
:param message:
:return:
"""
if self.verbose:
print('(CLH {}) Message received from client: {}'\
.format(time.strftime(TFORMAT),
message))
for node in self.__comms_handler.nodes:
self.__comms_handler.nodes[node].write_message(message)
def on_close(self):
print('(CLH {}) Connection closed'\
.format(time.strftime(TFORMAT)))
ClientHandler.client_list.remove(self)
print(ClientHandler.client_list)
def check_origin(self, origin):
#TODO: should actually check the origin
return True
@classmethod
def broadcast(cls, msg):
for client in cls.client_list:
client.write_message(msg)
class StatusHandler(tornado.web.RequestHandler):
def initialize(self, comms_handler):
"""Initialisation of an object of the NodeHandler class.
We provide a communications handler object which keeps a list of the nodes
and clients, and a list of the last messages from the nodes.
:param comms_handler:
:type comms_handler: CommsHandler
:param verbose: True for verbose output
:return:
"""
self.__comms_handler = comms_handler
def get(self):
# Time
fetch_time = time.strftime(TFORMAT)
self.write('<meta http-equiv="refresh" content="10">')
self.write(' <style> .wrapper {display:flex}</style>')
self.write('<p> TIME: {}</p>'.format(fetch_time))
# Nodes
num_nodes = len(self.__comms_handler.nodes)
self.write("<h3>Number of connected nodes: {}</h3><ul>".format(num_nodes))
for node_key in self.__comms_handler.nodes:
node = self.__comms_handler.nodes[node_key]
if 'user' in node.__dict__:
user = node.user
else:
user ='no ID'
self.write('<li>{} ({})</li>'.format(socket.getfqdn(node.request.remote_ip),
user))
# Clients
num_clients = len(self.__comms_handler.clients)
self.write("</ul><h3>Number of connected clients: {}</h3><ul style>".format(num_clients))
for client in self.__comms_handler.clients:
self.write('<li>{}</li>'.format(socket.getfqdn(client.request.remote_ip)))
self.write("</ul><h3>Last data: </h3>")
self.write("<div class=wrapper>")
for node_id in self.__comms_handler.last_data:
last_data = self.__comms_handler.last_data[node_id]
self.write('<p>{} {}</p>'.format(last_data['user'],
json2html.convert(json=last_data)))
self.write("</div>")
class CommsHandler(object):
""" Class that keeps references of the nodes and the clients for
communication purposes
It also keeps a dictionary with a reference to the last data sent
(self.last_data) with the keys being the ids of the NodeHandler
instances, and another one (self.metadata) which stores the metadata
(that is, the "contents" of each channel in the self.last_data
dictionaries).
Whenever the connection between the master and the node is (re)
established, the metadata corresponding to that id needs to be
recorded by an external class. To do this, we use an observer
pattern in which the external class observes changes to a property
(in this case, self.last_metadata_id) from the outside using the
self.bind_to function and perform some callback whenever this
value changes.
"""
def __init__(self):
self.nodes = NodeHandler.node_dict #list
self.clients = ClientHandler.client_list #list
#Data dictionary
self.last_data = {} #dictionary
#Metadata dictionary
self.metadata = {} #dictionary
self._last_metadata_id = []
self._metadata_observers= []
def get_last_metadata_id(self):
return self._last_metadata_id
def set_last_metadata_id(self, value):
#print('setting new metadata id')
self._last_metadata_id = value
for callback in self._metadata_observers:
callback(value)
last_metadata_id = property(get_last_metadata_id,set_last_metadata_id)
def bind_to_metadata_change(self, callback):
''' Binds callbacks to changes in the values of self._last_metadata_id
This function is used to add metadata to the database upon (re)connection
of the server/client link.
:param callback:
:return:
'''
self._metadata_observers.append(callback)
def add_metadata(self, id, contents):
print(contents)
self.metadata[id] = contents
self.last_metadata_id = id # This triggers the callback
def remove_key(self,id):
"""
Removes the node with a given id from the comms_handler.
We need to make sure that both the last_data and the metadata
entries are removed
:param id: the UUID given by the MasterServer to the node
:type id: str
:return:
"""
self.last_data.pop(id,None)
self.metadata.pop(id,None)
def get_nodeID_by_user(self,user):
""" Returns the node.id of the node with a given user name
Since the user is first obtained after getting data, we infer
it from the information in the self.last_data dictionary.
:param user: The laboratory name
:type user: str
:return: Returns the UUID given by the master server to the node
with a given username
"""
return [key for key in self.last_data if self.last_data[key]['user'] == user]
########################################
def broadcast(dictionary_of_endpoints, msg):
""" Broadcasts a message to a list of endpoints using the "write_message"
method.
:param dictionary_of_endpoints:
:param msg:
:return:
"""
for endpoint in dictionary_of_endpoints:
dictionary_of_endpoints[endpoint].write_message(msg)
def main1(periodicity=100, verbose=0):
my_master_server = MasterServer(periodicity=periodicity,
verbose=verbose)
return my_master_server
def signal_handler(signum,frame):
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
print(MST_HEADER)
print
parser = argparse.ArgumentParser()
parser.add_argument("-pr","--periodicity",
help="periodicity to poll nodes",
type=int,default=PERIODICITY)
parser.add_argument("-dbpr","--database_periodicity",
help="periodicity of saving data to database",
type=int,default=DB_PERIODICITY)
parser.add_argument("-v","--verbose",help="Activate verbose",
type=int,default=0)
args = parser.parse_args()
signal.signal(signal.SIGINT,signal_handler)
main1(periodicity=args.periodicity,
verbose=args.verbose)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import logging
import tempfile
import gzip
import shutil
import time
import csv
import copy
import sys
from mtgraphite import MTGraphiteClient
import json
import multiprocessing
import Queue
from crawler_exceptions import (EmitterUnsupportedFormat,
EmitterUnsupportedProtocol,
EmitterBadURL,
EmitterEmitTimeout)
# External dependencies that must be pip install'ed separately
import kafka as kafka_python
import pykafka
import requests
from features import (OSFeature, FileFeature, ConfigFeature, DiskFeature,
ProcessFeature, MetricFeature, ConnectionFeature,
PackageFeature, MemoryFeature, CpuFeature,
InterfaceFeature, LoadFeature, DockerPSFeature,
DockerHistoryFeature)
from misc import NullHandler
logger = logging.getLogger('crawlutils')
def kafka_send(kurl, temp_fpath, format, topic, queue=None):
try:
kafka_python_client = kafka_python.KafkaClient(kurl)
kafka_python_client.ensure_topic_exists(topic)
kafka = pykafka.KafkaClient(hosts=kurl)
publish_topic_object = kafka.topics[topic]
# the default partitioner is random_partitioner
producer = publish_topic_object.get_producer()
if format == 'csv':
with open(temp_fpath, 'r') as fp:
text = fp.read()
producer.produce([text])
elif format == 'graphite':
with open(temp_fpath, 'r') as fp:
for line in fp.readlines():
producer.produce([line])
else:
raise EmitterUnsupportedFormat('Unsupported format: %s' % format)
queue and queue.put((True, None))
except Exception as e:
if queue:
queue.put((False, e))
else:
raise
finally:
queue and queue.close()
class Emitter:
"""Class that abstracts the outputs supported by the crawler, like
stdout, or kafka.
An object of this class is created for every frame emitted. A frame is
emitted for every container and at every crawling interval.
"""
# We want to use a global to store the MTGraphite client class so it
# persists across metric intervals.
mtgclient = None
kafka_timeout_secs = 30
# Debugging TIP: use url='file://<local-file>' to emit the frame data into
# a local file
def __init__(
self,
urls,
emitter_args={},
format='csv',
max_emit_retries=9,
kafka_timeout_secs=30
):
self.urls = urls
self.emitter_args = emitter_args
self.compress = emitter_args.get('compress', False)
self.format = format
self.max_emit_retries = max_emit_retries
self.mtgclient = None
self.kafka_timeout_secs = kafka_timeout_secs
def __enter__(self):
(self.temp_fd, self.temp_fpath) = \
tempfile.mkstemp(prefix='emit.')
os.close(self.temp_fd) # close temporary file descriptor
# as we open immediately
# need to find a better fix later
if self.compress:
self.emitfile = gzip.open(self.temp_fpath, 'wb')
else:
self.emitfile = open(self.temp_fpath, 'wb')
self.csv_writer = csv.writer(self.emitfile, delimiter='\t',
quotechar="'")
self.begin_time = time.time()
self.num_features = 0
return self
def emit_dict_as_graphite(
self,
sysname,
group,
suffix,
data,
timestamp=None,
):
timestamp = int(timestamp or time.time())
items = data.items()
# this is for issue #343
sysname = sysname.replace('/', '.')
for (metric, value) in items:
try:
value = float(value)
except Exception:
# value was not a float or anything that looks like a float
continue
metric = metric.replace('(', '_').replace(')', '')
metric = metric.replace(' ', '_').replace('-', '_')
metric = metric.replace('/', '_').replace('\\', '_')
suffix = suffix.replace('_', '-')
if 'cpu' in suffix or 'memory' in suffix:
metric = metric.replace('_', '-')
if 'if' in metric:
metric = metric.replace('_tx', '.tx')
metric = metric.replace('_rx', '.rx')
if suffix == 'load':
suffix = 'load.load'
suffix = suffix.replace('/', '$')
tmp_message = '%s.%s.%s %f %d\r\n' % (sysname, suffix,
metric, value, timestamp)
self.emitfile.write(tmp_message)
# Added optional feature_type so that we can bypass feature type discovery
# for FILE crawlmode
def emit(
self,
feature_key,
feature_val,
feature_type=None,
):
# Add metadata as first feature
if self.num_features == 0:
metadata = copy.deepcopy(self.emitter_args)
# Update timestamp to the actual emit time
metadata['timestamp'] = \
time.strftime('%Y-%m-%dT%H:%M:%S%z')
if 'extra' in metadata:
del metadata['extra']
if self.emitter_args['extra']:
metadata.update(json.loads(self.emitter_args['extra'
]))
if 'extra_all_features' in metadata:
del metadata['extra_all_features']
if self.format == 'csv':
self.csv_writer.writerow(
['metadata',
json.dumps('metadata'),
json.dumps(metadata,
separators=(',', ':'))])
self.num_features += 1
if isinstance(feature_val, dict):
feature_val_as_dict = feature_val
else:
feature_val_as_dict = feature_val._asdict()
if 'extra' in self.emitter_args and self.emitter_args['extra'] \
and 'extra_all_features' in self.emitter_args \
and self.emitter_args['extra_all_features'] == True:
feature_val_as_dict.update(json.loads(self.emitter_args['extra'
]))
if self.format == 'csv':
self.csv_writer.writerow(
[feature_type,
json.dumps(feature_key),
json.dumps(feature_val_as_dict,
separators=(',', ':'))])
elif self.format == 'graphite':
if 'namespace' in self.emitter_args:
namespace = self.emitter_args['namespace']
else:
namespace = 'undefined'
self.emit_dict_as_graphite(
namespace,
feature_type,
feature_key,
feature_val_as_dict)
else:
raise EmitterUnsupportedFormat(
'Unsupported format: %s' % self.format)
self.num_features += 1
def _close_file(self):
# close the output file
self.emitfile.close()
def _publish_to_stdout(self):
with open(self.temp_fpath, 'r') as fd:
if self.compress:
print '%s' % fd.read()
else:
for line in fd.readlines():
print line.strip()
sys.stdout.flush()
def _publish_to_broker(self, url, max_emit_retries=5):
for attempt in range(max_emit_retries):
try:
headers = {'content-type': 'text/csv'}
if self.compress:
headers['content-encoding'] = 'gzip'
with open(self.temp_fpath, 'rb') as framefp:
response = requests.post(
url, headers=headers, params=self.emitter_args, data=framefp)
except requests.exceptions.ChunkedEncodingError as e:
logger.exception(e)
logger.error(
"POST to %s resulted in exception (attempt %d of %d), will not re-try" %
(url, attempt + 1, max_emit_retries))
break
except requests.exceptions.RequestException as e:
logger.exception(e)
logger.error(
"POST to %s resulted in exception (attempt %d of %d)" %
(url, attempt + 1, max_emit_retries))
time.sleep(2.0 ** attempt * 0.1)
continue
if response.status_code != requests.codes.ok:
logger.error("POST to %s resulted in status code %s: %s (attempt %d of %d)" % (
url, str(response.status_code), response.text, attempt + 1, max_emit_retries))
time.sleep(2.0 ** attempt * 0.1)
else:
break
def _publish_to_kafka_no_retries(self, url):
list = url[len('kafka://'):].split('/')
if len(list) == 2:
kurl, topic = list
else:
raise EmitterBadURL(
'The kafka url provided does not seem to be valid: %s. '
'It should be something like this: '
'kafka://[ip|hostname]:[port]/[kafka_topic]. '
'For example: kafka://1.1.1.1:1234/metrics' % url)
# Kafka logs too much
h = NullHandler()
logging.getLogger('kafka').addHandler(h)
queue = multiprocessing.Queue()
try:
child_process = multiprocessing.Process(
name='kafka-emitter', target=kafka_send, args=(
kurl, self.temp_fpath, self.format, topic, queue))
child_process.start()
except OSError:
queue.close()
raise
try:
(result, child_exception) = queue.get(
timeout=self.kafka_timeout_secs)
except Queue.Empty:
child_exception = EmitterEmitTimeout()
child_process.join(self.kafka_timeout_secs)
if child_process.is_alive():
errmsg = ('Timed out waiting for process %d to exit.' %
child_process.pid)
queue.close()
os.kill(child_process.pid, 9)
logger.error(errmsg)
raise EmitterEmitTimeout(errmsg)
if child_exception:
raise child_exception
def _publish_to_kafka(self, url, max_emit_retries=8):
broker_alive = False
retries = 0
while not broker_alive and retries <= max_emit_retries:
try:
retries += 1
self._publish_to_kafka_no_retries(url)
broker_alive = True
except Exception as e:
logger.debug('_publish_to_kafka_no_retries {0}: {1}'.format(url,
e))
if retries <= max_emit_retries:
# Wait for (2^retries * 100) milliseconds
wait_time = 2.0 ** retries * 0.1
logger.error(
'Could not connect to the kafka server at %s. Retry '
'in %f seconds.' % (url, wait_time))
time.sleep(wait_time)
else:
raise e
def _publish_to_mtgraphite(self, url):
if not Emitter.mtgclient:
Emitter.mtgclient = MTGraphiteClient(url)
with open(self.temp_fpath, 'r') as fp:
num_pushed_to_queue = \
Emitter.mtgclient.send_messages(fp.readlines())
logger.debug('Pushed %d messages to mtgraphite queue'
% num_pushed_to_queue)
def _write_to_file(self, url):
output_path = url[len('file://'):]
if self.compress:
output_path += '.gz'
shutil.move(self.temp_fpath, output_path)
def __exit__(
self,
typ,
exc,
trc,
):
if exc:
self._close_file()
if os.path.exists(self.temp_fpath):
os.remove(self.temp_fpath)
return False
try:
self._close_file()
for url in self.urls:
logger.debug('Emitting frame to {0}'.format(url))
if url.startswith('stdout://'):
self._publish_to_stdout()
elif url.startswith('http://'):
self._publish_to_broker(url, self.max_emit_retries)
elif url.startswith('file://'):
self._write_to_file(url)
elif url.startswith('kafka://'):
self._publish_to_kafka(url, self.max_emit_retries)
elif url.startswith('mtgraphite://'):
self._publish_to_mtgraphite(url)
else:
if os.path.exists(self.temp_fpath):
os.remove(self.temp_fpath)
raise EmitterUnsupportedProtocol(
'Unsupported URL protocol {0}'.format(url))
finally:
if os.path.exists(self.temp_fpath):
os.remove(self.temp_fpath)
self.end_time = time.time()
elapsed_time = self.end_time - self.begin_time
logger.info(
'Emitted {0} features in {1} seconds'.format(
self.num_features,
elapsed_time))
return True
|
|
import aiohttp
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.owncloud import utils
from waterbutler.providers.owncloud.metadata import OwnCloudFileRevisionMetadata
class OwnCloudProvider(provider.BaseProvider):
"""Provider for the ownCloud cloud storage service.
This provider uses WebDAV for communication.
API docs::
* WebDAV: http://www.webdav.org/specs/rfc4918.html
* OCSv1.7: https://www.freedesktop.org/wiki/Specifications/open-collaboration-services-1.7/
Required settings fields::
* folder
* verify_ssl
Required credentials fields::
* host
* username
* password
Quirks:
* User credentials are stored in a aiohttp.BasicAuth object. At the moment, there isn't a
better way to do this.
"""
NAME = 'owncloud'
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.folder = settings['folder']
if not self.folder.endswith('/'):
self.folder += '/'
self.verify_ssl = settings['verify_ssl']
self.url = credentials['host']
self._auth = aiohttp.BasicAuth(credentials['username'], credentials['password'])
self.metrics.add('host', self.url)
def connector(self):
return aiohttp.TCPConnector(verify_ssl=self.verify_ssl)
@property
def _webdav_url_(self):
"""Formats the outgoing url appropriately. This accounts for some differences in oc server
software.
"""
if self.url[-1] != '/':
return self.url + '/remote.php/webdav/'
return self.url + 'remote.php/webdav/'
def shares_storage_root(self, other):
"""Owncloud settings only include the root folder. If a cross-resource move occurs
between two owncloud providers that are on different accounts but have the same folder
base name, the parent method could incorrectly think the action is a self-overwrite.
Comparing credentials means that this is unique per connected account.
:param waterbutler.core.provider.BaseProvider other: another provider to test
:return: `True` if both providers share the same storage root
:rtype: `bool`
"""
return super().shares_storage_root(other) and self.credentials == other.credentials
async def validate_v1_path(self, path, **kwargs):
"""Verifies that ``path`` exists and if so, returns a WaterButlerPath object that
represents it. WebDAV returns 200 for a single file, 207 for a multipart (folder), and 404
for Does Not Exist.
:param str path: user-supplied path to validate
:return: WaterButlerPath object representing ``path``
:rtype: `waterbutler.core.path.WaterButlerPath`
:raises `waterbutler.core.exceptions.NotFoundError`: if the path doesn't exist
"""
if path == '/':
return WaterButlerPath(path, prepend=self.folder)
full_path = WaterButlerPath(path, prepend=self.folder)
response = await self.make_request('PROPFIND',
self._webdav_url_ + full_path.full_path,
expects=(200, 207, 404),
throws=exceptions.MetadataError,
auth=self._auth,
connector=self.connector(),
)
content = await response.content.read()
await response.release()
if response.status == 404:
raise exceptions.NotFoundError(str(full_path.full_path))
try:
item = await utils.parse_dav_response(content, '/')
except exceptions.NotFoundError:
# Re-raise with the proper path
raise exceptions.NotFoundError(str(full_path.full_path))
if full_path.kind != item[0].kind:
raise exceptions.NotFoundError(full_path.full_path)
return full_path
async def validate_path(self, path, **kwargs):
"""Similar to `validate_v1_path`, but will not throw a 404 if the path doesn't yet exist.
Instead, returns a WaterButlerPath object for the potential path (such as before uploads).
:param str path: user-supplied path to validate
:return: WaterButlerPath object representing ``path``
:rtype: :class:`waterbutler.core.path.WaterButlerPath`
"""
if path == '/':
return WaterButlerPath(path, prepend=self.folder)
full_path = WaterButlerPath(path, prepend=self.folder)
response = await self.make_request('PROPFIND',
self._webdav_url_ + full_path.full_path,
expects=(200, 207, 404),
throws=exceptions.MetadataError,
auth=self._auth,
connector=self.connector(),
)
content = await response.content.read()
await response.release()
try:
await utils.parse_dav_response(content, '/')
except exceptions.NotFoundError:
pass
return full_path
async def download(self, path, accept_url=False, range=None, **kwargs):
"""Creates a stream for downloading files from the remote host. If the metadata query for
the file has no size metadata, downloads to memory.
:param waterbutler.core.path.WaterButlerPath path: user-supplied path to download
:raises: `waterbutler.core.exceptions.DownloadError`
"""
self.metrics.add('download', {
'got_accept_url': accept_url is False,
'got_range': range is not None,
})
download_resp = await self.make_request(
'GET',
self._webdav_url_ + path.full_path,
range=range,
expects=(200, 206,),
throws=exceptions.DownloadError,
auth=self._auth,
connector=self.connector(),
)
return streams.ResponseStreamReader(download_resp)
async def upload(self, stream, path, conflict='replace', **kwargs):
"""Utilizes default name conflict handling behavior then adds the appropriate headers and
creates the upload request.
:param waterbutler.core.streams.RequestStreamReader stream: stream containing file contents
:param waterbutler.core.path.WaterButlerPath path: user-supplied path to upload to
:raises: `waterbutler.core.exceptions.UploadError`
"""
if path.identifier and conflict == 'keep':
path, _ = await self.handle_name_conflict(path, conflict=conflict, kind='folder')
path._parts[-1]._id = None
response = await self.make_request(
'PUT',
self._webdav_url_ + path.full_path,
data=stream,
headers={'Content-Length': str(stream.size)},
expects=(201, 204,),
throws=exceptions.UploadError,
auth=self._auth,
connector=self.connector(),
)
await response.release()
meta = await self.metadata(path)
return meta, response.status == 201
async def delete(self, path, **kwargs):
"""Deletes ``path`` on remote host
:param waterbutler.core.path.WaterButlerPath path: user-supplied path to delete
:raises: `waterbutler.core.exceptions.DeleteError`
"""
delete_resp = await self.make_request(
'DELETE',
self._webdav_url_ + path.full_path,
expects=(204,),
throws=exceptions.DeleteError,
auth=self._auth,
connector=self.connector(),
)
await delete_resp.release()
return
async def metadata(self, path, **kwargs):
"""Queries the remote host for metadata and returns metadata objects based on the return
value.
:param waterbutler.core.path.WaterButlerPath path: user-supplied path to query
:raises: `waterbutler.core.exceptions.MetadataError`
"""
if path.is_dir:
return (await self._metadata_folder(path, **kwargs))
else:
return (await self._metadata_file(path, **kwargs))
async def _metadata_file(self, path, **kwargs):
items = await self._metadata_folder(path, skip_first=False, **kwargs)
return items[0]
async def _metadata_folder(self, path, skip_first=True, **kwargs):
"""Performs the actual query against ownCloud. In this case the return code depends on the
content::
* 204: Empty response
* 207: Multipart response
"""
response = await self.make_request('PROPFIND',
self._webdav_url_ + path.full_path,
expects=(204, 207),
throws=exceptions.MetadataError,
auth=self._auth,
connector=self.connector(),
)
items = []
if response.status == 207:
content = await response.content.read()
items = await utils.parse_dav_response(content, self.folder, skip_first)
await response.release()
return items
async def create_folder(self, path, **kwargs):
"""Create a folder in the current provider at ``path``. Returns an
`.metadata.OwnCloudFolderMetadata` object if successful.
:param waterbutler.core.path.WaterButlerPath path: user-supplied directory path to create
:param boolean precheck_folder: flag to check for folder before attempting create
:rtype: `.metadata.OwnCloudFolderMetadata`
:raises: `waterbutler.core.exceptions.CreateFolderError`
"""
resp = await self.make_request(
'MKCOL',
self._webdav_url_ + path.full_path,
expects=(201, 405),
throws=exceptions.CreateFolderError,
auth=self._auth,
connector=self.connector()
)
await resp.release()
if resp.status == 405:
raise exceptions.FolderNamingConflict(path.name)
# get the folder metadata
meta = await self.metadata(path.parent)
return [m for m in meta if m.path == path.materialized_path][0]
def can_duplicate_names(self):
return True
def can_intra_copy(self, dest_provider, path=None):
return self == dest_provider
def can_intra_move(self, dest_provider, path=None):
return self == dest_provider
async def intra_copy(self, dest_provider, src_path, dest_path):
return await self._do_dav_move_copy(src_path, dest_path, 'COPY')
async def intra_move(self, dest_provider, src_path, dest_path):
return await self._do_dav_move_copy(src_path, dest_path, 'MOVE')
async def _do_dav_move_copy(self, src_path, dest_path, operation):
"""Performs a quick copy or move operation on the remote host.
:param waterbutler.core.path.WaterButlerPath src_path: path for the source object
:param waterbutler.core.path.WaterButlerPath dest_path: path for the destination object
:param str operation: Either `COPY` or `MOVE`
:rtype: `.metadata.OwnCloudFileMetadata`
:rtype: `.metadata.OwnCloudFolderMetadata`
:raises: `waterbutler.core.exceptions.IntraCopyError`
"""
if operation != 'MOVE' and operation != 'COPY':
raise NotImplementedError("ownCloud move/copy only supports MOVE and COPY endpoints")
resp = await self.make_request(
operation,
self._webdav_url_ + src_path.full_path,
expects=(201, 204), # WebDAV MOVE/COPY: 201 = Created, 204 = Updated existing
throws=exceptions.IntraCopyError,
auth=self._auth,
connector=self.connector(),
headers={'Destination': '/remote.php/webdav' + dest_path.full_path}
)
await resp.release()
file_meta = await self.metadata(dest_path)
if dest_path.is_folder:
parent_meta = await self.metadata(dest_path.parent)
meta = [m for m in parent_meta if m.materialized_path == dest_path.materialized_path][0]
meta.children = file_meta
else:
meta = file_meta
return meta, resp.status == 201
async def revisions(self, path, **kwargs):
metadata = await self.metadata(path)
return [OwnCloudFileRevisionMetadata.from_metadata(metadata)]
|
|
"""
Support to interface with the Emby API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.emby/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL, MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_STOP)
from homeassistant.const import (
CONF_API_KEY, CONF_HOST, CONF_PORT, CONF_SSL, DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, STATE_IDLE, STATE_OFF,
STATE_PAUSED, STATE_PLAYING)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['pyemby==1.6']
_LOGGER = logging.getLogger(__name__)
CONF_AUTO_HIDE = 'auto_hide'
MEDIA_TYPE_TRAILER = 'trailer'
MEDIA_TYPE_GENERIC_VIDEO = 'video'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8096
DEFAULT_SSL_PORT = 8920
DEFAULT_SSL = False
DEFAULT_AUTO_HIDE = False
_LOGGER = logging.getLogger(__name__)
SUPPORT_EMBY = SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_STOP | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_AUTO_HIDE, default=DEFAULT_AUTO_HIDE): cv.boolean,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Emby platform."""
from pyemby import EmbyServer
host = config.get(CONF_HOST)
key = config.get(CONF_API_KEY)
port = config.get(CONF_PORT)
ssl = config.get(CONF_SSL)
auto_hide = config.get(CONF_AUTO_HIDE)
if port is None:
port = DEFAULT_SSL_PORT if ssl else DEFAULT_PORT
_LOGGER.debug("Setting up Emby server at: %s:%s", host, port)
emby = EmbyServer(host, key, port, ssl, hass.loop)
active_emby_devices = {}
inactive_emby_devices = {}
@callback
def device_update_callback(data):
"""Handle devices which are added to Emby."""
new_devices = []
active_devices = []
for dev_id in emby.devices:
active_devices.append(dev_id)
if dev_id not in active_emby_devices and \
dev_id not in inactive_emby_devices:
new = EmbyDevice(emby, dev_id)
active_emby_devices[dev_id] = new
new_devices.append(new)
elif dev_id in inactive_emby_devices:
if emby.devices[dev_id].state != 'Off':
add = inactive_emby_devices.pop(dev_id)
active_emby_devices[dev_id] = add
_LOGGER.debug("Showing %s, item: %s", dev_id, add)
add.set_available(True)
add.set_hidden(False)
if new_devices:
_LOGGER.debug("Adding new devices: %s", new_devices)
async_add_entities(new_devices, True)
@callback
def device_removal_callback(data):
"""Handle the removal of devices from Emby."""
if data in active_emby_devices:
rem = active_emby_devices.pop(data)
inactive_emby_devices[data] = rem
_LOGGER.debug("Inactive %s, item: %s", data, rem)
rem.set_available(False)
if auto_hide:
rem.set_hidden(True)
@callback
def start_emby(event):
"""Start Emby connection."""
emby.start()
async def stop_emby(event):
"""Stop Emby connection."""
await emby.stop()
emby.add_new_devices_callback(device_update_callback)
emby.add_stale_devices_callback(device_removal_callback)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_emby)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_emby)
class EmbyDevice(MediaPlayerDevice):
"""Representation of an Emby device."""
def __init__(self, emby, device_id):
"""Initialize the Emby device."""
_LOGGER.debug("New Emby Device initialized with ID: %s", device_id)
self.emby = emby
self.device_id = device_id
self.device = self.emby.devices[self.device_id]
self._hidden = False
self._available = True
self.media_status_last_position = None
self.media_status_received = None
async def async_added_to_hass(self):
"""Register callback."""
self.emby.add_update_callback(
self.async_update_callback, self.device_id)
@callback
def async_update_callback(self, msg):
"""Handle device updates."""
# Check if we should update progress
if self.device.media_position:
if self.device.media_position != self.media_status_last_position:
self.media_status_last_position = self.device.media_position
self.media_status_received = dt_util.utcnow()
elif not self.device.is_nowplaying:
# No position, but we have an old value and are still playing
self.media_status_last_position = None
self.media_status_received = None
self.async_schedule_update_ha_state()
@property
def hidden(self):
"""Return True if entity should be hidden from UI."""
return self._hidden
def set_hidden(self, value):
"""Set hidden property."""
self._hidden = value
@property
def available(self):
"""Return True if entity is available."""
return self._available
def set_available(self, value):
"""Set available property."""
self._available = value
@property
def unique_id(self):
"""Return the id of this emby client."""
return self.device_id
@property
def supports_remote_control(self):
"""Return control ability."""
return self.device.supports_remote_control
@property
def name(self):
"""Return the name of the device."""
return ('Emby - {} - {}'.format(self.device.client, self.device.name)
or DEVICE_DEFAULT_NAME)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def state(self):
"""Return the state of the device."""
state = self.device.state
if state == 'Paused':
return STATE_PAUSED
if state == 'Playing':
return STATE_PLAYING
if state == 'Idle':
return STATE_IDLE
if state == 'Off':
return STATE_OFF
@property
def app_name(self):
"""Return current user as app_name."""
# Ideally the media_player object would have a user property.
return self.device.username
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.device.media_id
@property
def media_content_type(self):
"""Content type of current playing media."""
media_type = self.device.media_type
if media_type == 'Episode':
return MEDIA_TYPE_TVSHOW
if media_type == 'Movie':
return MEDIA_TYPE_MOVIE
if media_type == 'Trailer':
return MEDIA_TYPE_TRAILER
if media_type == 'Music':
return MEDIA_TYPE_MUSIC
if media_type == 'Video':
return MEDIA_TYPE_GENERIC_VIDEO
if media_type == 'Audio':
return MEDIA_TYPE_MUSIC
if media_type == 'TvChannel':
return MEDIA_TYPE_CHANNEL
return None
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self.device.media_runtime
@property
def media_position(self):
"""Return the position of current playing media in seconds."""
return self.media_status_last_position
@property
def media_position_updated_at(self):
"""
When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self.media_status_received
@property
def media_image_url(self):
"""Return the image URL of current playing media."""
return self.device.media_image_url
@property
def media_title(self):
"""Return the title of current playing media."""
return self.device.media_title
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
return self.device.media_season
@property
def media_series_title(self):
"""Return the title of the series of current playing media (TV)."""
return self.device.media_series_title
@property
def media_episode(self):
"""Return the episode of current playing media (TV only)."""
return self.device.media_episode
@property
def media_album_name(self):
"""Return the album name of current playing media (Music only)."""
return self.device.media_album_name
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self.device.media_artist
@property
def media_album_artist(self):
"""Return the album artist of current playing media (Music only)."""
return self.device.media_album_artist
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.supports_remote_control:
return SUPPORT_EMBY
return None
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
return self.device.media_play()
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.device.media_pause()
def async_media_stop(self):
"""Stop the media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.device.media_stop()
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.device.media_next()
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.device.media_previous()
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
return self.device.media_seek(position)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import List, Optional
import msrest.serialization
class ResourceSku(msrest.serialization.Model):
"""Describes an available Compute SKU.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The type of resource the SKU applies to.
:vartype resource_type: str
:ivar name: The name of SKU.
:vartype name: str
:ivar tier: Specifies the tier of virtual machines in a scale set.:code:`<br />`:code:`<br />`
Possible Values::code:`<br />`:code:`<br />` **Standard**\ :code:`<br />`:code:`<br />`
**Basic**.
:vartype tier: str
:ivar size: The Size of the SKU.
:vartype size: str
:ivar family: The Family of this particular SKU.
:vartype family: str
:ivar kind: The Kind of resources that are supported in this SKU.
:vartype kind: str
:ivar capacity: Specifies the number of virtual machines in the scale set.
:vartype capacity: ~azure.mgmt.compute.v2017_09_01.models.ResourceSkuCapacity
:ivar locations: The set of locations that the SKU is available.
:vartype locations: list[str]
:ivar location_info: A list of locations and availability zones in those locations where the
SKU is available.
:vartype location_info: list[~azure.mgmt.compute.v2017_09_01.models.ResourceSkuLocationInfo]
:ivar api_versions: The api versions that support this SKU.
:vartype api_versions: list[str]
:ivar costs: Metadata for retrieving price info.
:vartype costs: list[~azure.mgmt.compute.v2017_09_01.models.ResourceSkuCosts]
:ivar capabilities: A name value pair to describe the capability.
:vartype capabilities: list[~azure.mgmt.compute.v2017_09_01.models.ResourceSkuCapabilities]
:ivar restrictions: The restrictions because of which SKU cannot be used. This is empty if
there are no restrictions.
:vartype restrictions: list[~azure.mgmt.compute.v2017_09_01.models.ResourceSkuRestrictions]
"""
_validation = {
'resource_type': {'readonly': True},
'name': {'readonly': True},
'tier': {'readonly': True},
'size': {'readonly': True},
'family': {'readonly': True},
'kind': {'readonly': True},
'capacity': {'readonly': True},
'locations': {'readonly': True},
'location_info': {'readonly': True},
'api_versions': {'readonly': True},
'costs': {'readonly': True},
'capabilities': {'readonly': True},
'restrictions': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'ResourceSkuCapacity'},
'locations': {'key': 'locations', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'costs': {'key': 'costs', 'type': '[ResourceSkuCosts]'},
'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'},
'restrictions': {'key': 'restrictions', 'type': '[ResourceSkuRestrictions]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceSku, self).__init__(**kwargs)
self.resource_type = None
self.name = None
self.tier = None
self.size = None
self.family = None
self.kind = None
self.capacity = None
self.locations = None
self.location_info = None
self.api_versions = None
self.costs = None
self.capabilities = None
self.restrictions = None
class ResourceSkuCapabilities(msrest.serialization.Model):
"""Describes The SKU capabilities object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: An invariant to describe the feature.
:vartype name: str
:ivar value: An invariant if the feature is measured by quantity.
:vartype value: str
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceSkuCapabilities, self).__init__(**kwargs)
self.name = None
self.value = None
class ResourceSkuCapacity(msrest.serialization.Model):
"""Describes scaling information of a SKU.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum capacity.
:vartype minimum: long
:ivar maximum: The maximum capacity that can be set.
:vartype maximum: long
:ivar default: The default capacity.
:vartype default: long
:ivar scale_type: The scale type applicable to the sku. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.compute.v2017_09_01.models.ResourceSkuCapacityScaleType
"""
_validation = {
'minimum': {'readonly': True},
'maximum': {'readonly': True},
'default': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default': {'key': 'default', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceSkuCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class ResourceSkuCosts(msrest.serialization.Model):
"""Describes metadata for retrieving price info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar meter_id: Used for querying price from commerce.
:vartype meter_id: str
:ivar quantity: The multiplier is needed to extend the base metered cost.
:vartype quantity: long
:ivar extended_unit: An invariant to show the extended unit.
:vartype extended_unit: str
"""
_validation = {
'meter_id': {'readonly': True},
'quantity': {'readonly': True},
'extended_unit': {'readonly': True},
}
_attribute_map = {
'meter_id': {'key': 'meterID', 'type': 'str'},
'quantity': {'key': 'quantity', 'type': 'long'},
'extended_unit': {'key': 'extendedUnit', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceSkuCosts, self).__init__(**kwargs)
self.meter_id = None
self.quantity = None
self.extended_unit = None
class ResourceSkuLocationInfo(msrest.serialization.Model):
"""ResourceSkuLocationInfo.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: Location of the SKU.
:vartype location: str
:ivar zones: List of availability zones where the SKU is supported.
:vartype zones: list[str]
"""
_validation = {
'location': {'readonly': True},
'zones': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceSkuLocationInfo, self).__init__(**kwargs)
self.location = None
self.zones = None
class ResourceSkuRestrictionInfo(msrest.serialization.Model):
"""ResourceSkuRestrictionInfo.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar locations: Locations where the SKU is restricted.
:vartype locations: list[str]
:ivar zones: List of availability zones where the SKU is restricted.
:vartype zones: list[str]
"""
_validation = {
'locations': {'readonly': True},
'zones': {'readonly': True},
}
_attribute_map = {
'locations': {'key': 'locations', 'type': '[str]'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceSkuRestrictionInfo, self).__init__(**kwargs)
self.locations = None
self.zones = None
class ResourceSkuRestrictions(msrest.serialization.Model):
"""Describes scaling information of a SKU.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of restrictions. Possible values include: "Location", "Zone".
:vartype type: str or ~azure.mgmt.compute.v2017_09_01.models.ResourceSkuRestrictionsType
:ivar values: The value of restrictions. If the restriction type is set to location. This would
be different locations where the SKU is restricted.
:vartype values: list[str]
:ivar restriction_info: The information about the restriction where the SKU cannot be used.
:vartype restriction_info: ~azure.mgmt.compute.v2017_09_01.models.ResourceSkuRestrictionInfo
:ivar reason_code: The reason for restriction. Possible values include: "QuotaId",
"NotAvailableForSubscription".
:vartype reason_code: str or
~azure.mgmt.compute.v2017_09_01.models.ResourceSkuRestrictionsReasonCode
"""
_validation = {
'type': {'readonly': True},
'values': {'readonly': True},
'restriction_info': {'readonly': True},
'reason_code': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
'restriction_info': {'key': 'restrictionInfo', 'type': 'ResourceSkuRestrictionInfo'},
'reason_code': {'key': 'reasonCode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceSkuRestrictions, self).__init__(**kwargs)
self.type = None
self.values = None
self.restriction_info = None
self.reason_code = None
class ResourceSkusResult(msrest.serialization.Model):
"""The List Resource Skus operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of skus available for the subscription.
:vartype value: list[~azure.mgmt.compute.v2017_09_01.models.ResourceSku]
:ivar next_link: The URI to fetch the next page of Resource Skus. Call ListNext() with this URI
to fetch the next page of Resource Skus.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ResourceSku"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of skus available for the subscription.
:paramtype value: list[~azure.mgmt.compute.v2017_09_01.models.ResourceSku]
:keyword next_link: The URI to fetch the next page of Resource Skus. Call ListNext() with this
URI to fetch the next page of Resource Skus.
:paramtype next_link: str
"""
super(ResourceSkusResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
|
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2010 Barry Schwartz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import fontforge
from sortsmill import font_db
from sortsmill.glyphbuild import *
from sortsmill.spacing_by_anchors import *
emsize = 1000
spacesize = 200
def build_glyphs(bitbucket, f):
from sortsmill import cap_spacing
figures = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
def base(letter):
if letter == 'i':
base = 'dotlessi'
elif letter == 'j':
base = 'uni0237'
else:
base = letter
return base
db = font_db.db_create(f)
db['spacing_anchor_heights'] = { 'hi2' : 670, # caps and ascenders,
'hi' : 610, # caps and ascenders,
'x' : 385, # ex-height
'o' : 200, # like the letter o
'bl' : 10, # baseline
'lo' : -200, # descenders
}
# all_glyphs = set(f) - set(['.notdef'])
# (smallcaps, capssmall, uppercase, lowercase, fraction_bar, numerators, denominators, remaining) = \
# tuple(separate_strings(all_glyphs, [
# (lambda s: s[-3:] == '.sc'),
# (lambda s: s[-3:] == '.c2'),
# (lambda s: is_uppercase(s, last_name)),
# (lambda s: is_lowercase(s, last_name)),
# (lambda s: s == 'fraction'),
# (lambda s: s[-6:] == '.numer'),
# (lambda s: s[-6:] == '.denom'),
# ]))
# db["kerning_sets"] = [
# (remaining, uppercase | lowercase | smallcaps | capssmall | remaining),
# (uppercase, uppercase | lowercase | smallcaps | remaining),
# (smallcaps, uppercase | smallcaps | capssmall | remaining),
# (lowercase, uppercase | lowercase | remaining),
# (numerators, fraction_bar),
# (fraction_bar, denominators),
# ]
# db['kerning_rounding'] = '(lambda x: int(round(x/5.0)) * 5)'
## db['kerning_rounding'] = '(lambda x: x if abs(x) < 10 else int(round(x/5.0))*5)'
build_several_space_glyphs(f, emsize = emsize, spacesize = spacesize,
thinspacesize = emsize / 6,
hairspacesize = emsize / 10,
# tabwidth = f['zero.l'].width)
tabwidth = 300) # <-- FIX: SET THIS CORRECTLY
propagate_hyphens(f)
# propagate_hyphens(f, '.uppercase')
build_spacing_marks(f, width = 2 * 230)
# for fig in figures:
# make_glyph_reference(fig, f[fig + '.h'])
make_glyph_reference('asciitilde', f['uni2053']) # Swung dash.
make_glyph_reference('i.TRK', f['i'])
make_glyph_reference('Dcroat', f['Eth'])
# make_glyph_reference('dcroat.sc', f['eth.sc'])
build_multigraph('ellipsis', [f['period'], f['period'], f['period']])
# for fig in figures + ['dollar']:
# make_glyph_reference(fig, f[fig + '.hanging'])
#
# make_glyph_reference('uni00B9', f['one.sup'])
# make_glyph_reference('uni00B2', f['two.sup'])
# make_glyph_reference('uni00B3', f['three.sup'])
## for extension in [('.numer', 244), ('.sub', -98), ('.sup', 293)]:
# for extension in [('.sub', -98), ('.sup', 293)]:
# for fig in figures:
# make_glyph_reference(fig + extension[0],
# f[fig + '.denom'],
# transformation = (1, 0, 0, 1, 0, extension[1]),
# copy_spacing_anchors = False)
# build_multigraph('onequarter', [f['one.numer'], f['fraction'], f['four.denom']])
# build_multigraph('onehalf', [f['one.numer'], f['fraction'], f['two.denom']])
# build_multigraph('threequarters', [f['three.numer'], f['fraction'], f['four.denom']])
# for g in f:
# if g[-3:] == '.sc':
# if g == 'periodcentered.sc':
# make_glyph_reference(g[:-3] + '.c2', f[g])
# elif g == 'uni0163.sc':
# make_glyph_reference('uni0162.c2', f[g])
# elif g == 'uni0219.sc':
# make_glyph_reference('uni0218.c2', f[g])
# elif g == 'uni021B.sc':
# make_glyph_reference('uni021A.c2', f[g])
# elif g in ('ae.sc', 'oe.sc'):
# make_glyph_reference(g[:-3].upper() + '.c2', f[g])
# else:
# make_glyph_reference(g[:-3].capitalize() + '.c2', f[g])
#--------------------------------------------------------------------------
for letter in 'GKkLlNnRr':
build_accented_glyph(letter + 'commaaccent', f[base(letter)], f['uni0326'])
build_accented_glyph('uni0218', f['S'], f['uni0326'])
build_accented_glyph('uni0219', f['s'], f['uni0326'])
build_accented_glyph('uni021A', f['T'], f['uni0326'])
build_accented_glyph('uni021B', f['t'], f['uni0326'])
build_accented_glyph('gcommaaccent', f['g'], f['uni0312'])
build_accented_glyph('gcommaaccent.ld', f['g.ld'], f['uni0312'])
# for letter in 'gklnr':
# build_accented_glyph(letter + 'commaaccent.sc', f[letter + '.sc'], f['uni0326'])
# build_accented_glyph('uni0219.sc', f['s.sc'], f['uni0326'])
# build_accented_glyph('uni021B.sc', f['t.sc'], f['uni0326'])
#--------------------------------------------------------------------------
for letter in 'CcSs':
build_accented_glyph(letter + 'cedilla', f[base(letter)], f['uni0327'])
remove_overlap(f[letter + 'cedilla'])
build_accented_glyph('uni0162', f['T'], f['uni0327'])
remove_overlap(f['uni0162'])
# build_accented_glyph('uni0163', f['t'], f['uni0327']) <-- don't rebuild; it's manually hinted
# remove_overlap(f['uni0163']) <-- don't rebuild; it's manually hinted
# for letter in 'cs':
# build_accented_glyph(letter + 'cedilla.sc', f[letter + '.sc'], f['uni0327'])
# build_accented_glyph('uni0163.sc', f['t.sc'], f['uni0327'])
#--------------------------------------------------------------------------
for letter in 'aeiou':
build_accented_glyph(letter + 'grave', f[base(letter)], f['gravecomb'])
for letter in 'AEIOU':
build_accented_glyph(letter + 'grave', f[base(letter)], f['gravecomb.cap'])
# for letter in 'aeiou':
# build_accented_glyph(letter + 'grave.sc', f[letter + '.sc'], f['gravecomb'])
#
#--------------------------------------------------------------------------
for letter in 'aceinorsuyz':
build_accented_glyph(letter + 'acute', f[base(letter)], f['acutecomb'])
for letter in 'ACEILNORSUYZ':
build_accented_glyph(letter + 'acute', f[base(letter)], f['acutecomb.cap'])
build_accented_glyph('lacute', f['l'], f['acutecomb.cap'])
for letter in 'y':
build_accented_glyph(letter + 'acute.ld', f[base(letter) + '.ld'], f['acutecomb'])
for letter in 'y':
build_accented_glyph(letter + 'acute.001', f[base(letter) + '.001'], f['acutecomb'])
# for letter in 'aceilnorsuyz':
# build_accented_glyph(letter + 'acute.sc', f[letter + '.sc'], f['acutecomb'])
#--------------------------------------------------------------------------
for letter in 'ainou':
build_accented_glyph(letter + 'tilde', f[base(letter)], f['tildecomb'])
for letter in 'AINOU':
build_accented_glyph(letter + 'tilde', f[base(letter)], f['tildecomb.cap'])
# for letter in 'ainou':
# build_accented_glyph(letter + 'tilde.sc', f[letter + '.sc'], f['tildecomb'])
#
# #--------------------------------------------------------------------------
#
for letter in 'aeouy':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308'])
for letter in 'AEIOUY':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308.cap'])
for letter in 'i':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308.narrow'])
for letter in 'y':
build_accented_glyph(letter + 'dieresis.ld', f[base(letter) + '.ld'], f['uni0308'])
for letter in 'y':
build_accented_glyph(letter + 'dieresis.001', f[base(letter) + '.001'], f['uni0308'])
# for letter in 'aeiouy':
# build_accented_glyph(letter + 'dieresis.sc', f[letter + '.sc'], f['uni0308'])
#--------------------------------------------------------------------------
for letter in 'au':
build_accented_glyph(letter + 'ring', f[base(letter)], f['uni030A'])
for letter in 'AU':
build_accented_glyph(letter + 'ring', f[base(letter)], f['uni030A.cap'])
# for letter in 'au':
# build_accented_glyph(letter + 'ring.sc', f[letter + '.sc'], f['uni030A'])
#--------------------------------------------------------------------------
for letter in 'acegijosuwy':
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302'])
for letter in 'hACEGHIJOSUWY':
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302.cap'])
for letter in 'gjy':
build_accented_glyph(letter + 'circumflex.ld', f[base(letter) + '.ld'], f['uni0302'])
for letter in 'y':
build_accented_glyph(letter + 'circumflex.001', f[base(letter) + '.001'], f['uni0302'])
build_accented_glyph('jcircumflex.001', f['uni0237.001'], f['uni0302'])
build_accented_glyph('jcircumflex.002', f['uni0237.002'], f['uni0302'])
# for letter in ['f_h', 'f_f_h']:
# build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302.cap'])
# for letter in 'aceghijosuwy':
# build_accented_glyph(letter + 'circumflex.sc', f[letter + '.sc'], f['uni0302'])
#--------------------------------------------------------------------------
for letter in 'aegiou':
build_accented_glyph(letter + 'breve', f[base(letter)], f['uni0306'])
for letter in 'AEGIOU':
build_accented_glyph(letter + 'breve', f[base(letter)], f['uni0306.cap'])
for letter in 'g':
build_accented_glyph(letter + 'breve.ld', f[base(letter) + '.ld'], f['uni0306'])
# for letter in 'aegiou':
# build_accented_glyph(letter + 'breve.sc', f[letter + '.sc'], f['uni0306'])
#--------------------------------------------------------------------------
for letter in 'cegz':
build_accented_glyph(letter + 'dotaccent', f[base(letter)], f['uni0307'])
for letter in 'CEGIZ':
build_accented_glyph(letter + 'dotaccent', f[base(letter)], f['uni0307.cap'])
for letter in 'g':
build_accented_glyph(letter + 'dotaccent.ld', f[base(letter) + '.ld'], f['uni0307'])
# for letter in 'cegz':
# build_accented_glyph(letter + 'dotaccent.sc', f[letter + '.sc'], f['uni0307'])
# build_accented_glyph('i.TRK.sc', f['i.sc'], f['uni0307'])
build_accented_glyph('i', f['dotlessi'], f['uni0307'])
build_accented_glyph('iogonek', f['iogonek.dotless'], f['uni0307'])
build_accented_glyph('j', f['uni0237'], f['uni0307'])
build_accented_glyph('j.ld', f['uni0237.ld'], f['uni0307'])
build_accented_glyph('j.001', f['uni0237.001'], f['uni0307'])
build_accented_glyph('j.002', f['uni0237.002'], f['uni0307'])
# Extra dot accents for Old Irish.
build_accented_glyph('uni1E02', f['B'], f['uni0307.cap'])
build_accented_glyph('uni1E03', f['b'], f['uni0307.cap'])
build_accented_glyph('uni1E0A', f['D'], f['uni0307.cap'])
build_accented_glyph('uni1E0B', f['d'], f['uni0307.cap'])
build_accented_glyph('uni1E1E', f['F'], f['uni0307.cap'])
build_accented_glyph('uni1E1F', f['f'], f['uni0307.cap'])
build_accented_glyph('uni1E1F.ld', f['f.ld'], f['uni0307.cap'])
build_accented_glyph('uni1E1F.001', f['f.001'], f['uni0307.cap'])
build_accented_glyph('uni1E22', f['H'], f['uni0307.cap'])
build_accented_glyph('uni1E23', f['h'], f['uni0307.cap'])
build_accented_glyph('uni1E40', f['M'], f['uni0307.cap'])
build_accented_glyph('uni1E41', f['m'], f['uni0307'])
build_accented_glyph('uni1E56', f['P'], f['uni0307.cap'])
build_accented_glyph('uni1E57', f['p'], f['uni0307'])
build_accented_glyph('uni1E57.ld', f['p.ld'], f['uni0307'])
build_accented_glyph('uni1E57.001', f['p.001'], f['uni0307'])
build_accented_glyph('uni1E57.002', f['p.002'], f['uni0307'])
build_accented_glyph('uni1E60', f['S'], f['uni0307.cap'])
build_accented_glyph('uni1E61', f['s'], f['uni0307'])
build_accented_glyph('uni1E6A', f['T'], f['uni0307.cap'])
build_accented_glyph('uni1E6B', f['t'], f['uni0307'])
#--------------------------------------------------------------------------
for letter in 'cenrsz':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni030C'])
for letter in 'CDENRTSZ':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni030C.cap'])
for letter in 'dLlt':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni0315'])
# for letter in 'cdenrstz':
# build_accented_glyph(letter + 'caron.sc', f[letter + '.sc'], f['uni030C'])
# build_accented_glyph('lcaron.sc', f['l.sc'], f['uni0315'])
#--------------------------------------------------------------------------
for letter in 'aeiou':
build_accented_glyph(letter + 'macron', f[base(letter)], f['uni0304'])
for letter in 'AEIOU':
build_accented_glyph(letter + 'macron', f[base(letter)], f['uni0304.cap'])
# for letter in 'aeiou':
# build_accented_glyph(letter + 'macron.sc', f[letter + '.sc'], f['uni0304'])
#--------------------------------------------------------------------------
for letter in 'ou':
build_accented_glyph(letter + 'hungarumlaut', f[base(letter)], f['uni030B'])
for letter in 'OU':
build_accented_glyph(letter + 'hungarumlaut', f[base(letter)], f['uni030B.cap'])
# for letter in 'ou':
# build_accented_glyph(letter + 'hungarumlaut.sc', f[letter + '.sc'], f['uni030B'])
#--------------------------------------------------------------------------
build_multigraph('napostrophe', [f['quoteright'], f['n']])
build_multigraph('IJ', [f['I'], f['J']])
build_multigraph('ij', [f['i'], f['j']])
build_multigraph('f_b', [f['f'], f['b']])
build_multigraph('f_h', [f['f'], f['h']])
build_multigraph('f_hcircumflex', [f['f'], f['hcircumflex']])
build_multigraph('f_k', [f['f'], f['k']])
build_multigraph('f_l', [f['f'], f['l']])
build_multigraph('f_f_b', [f['f_f'], f['b']])
build_multigraph('f_f_h', [f['f_f'], f['h']])
build_multigraph('f_f_hcircumflex', [f['f_f'], f['hcircumflex']])
build_multigraph('f_f_k', [f['f_f'], f['k']])
build_multigraph('f_f_l', [f['f_f'], f['l']])
build_multigraph('f_b.ld', [f['f.ld'], f['b']])
build_multigraph('f_h.ld', [f['f.ld'], f['h']])
build_multigraph('f_hcircumflex.ld', [f['f.ld'], f['hcircumflex']])
build_multigraph('f_k.ld', [f['f.ld'], f['k']])
build_multigraph('f_l.ld', [f['f.ld'], f['l']])
build_multigraph('f_f_b.ld', [f['f_f.ld'], f['b']])
build_multigraph('f_f_h.ld', [f['f_f.ld'], f['h']])
build_multigraph('f_f_hcircumflex.ld', [f['f_f.ld'], f['hcircumflex']])
build_multigraph('f_f_k.ld', [f['f_f.ld'], f['k']])
build_multigraph('f_f_l.ld', [f['f_f.ld'], f['l']])
#--------------------------------------------------------------------------
f.selection.all()
space_selected_by_anchors(f)
f.selection.none()
generate_kerning_and_read_features(None, f)
#--------------------------------------------------------------------------
font_db.db_close(f)
#--------------------------------------------------------------------------
|
|
import sys
from json import loads
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from unittest import mock
except ImportError:
import mock
from django.conf import settings
from django.utils.translation import activate
from django.template.base import Template, TemplateSyntaxError
from django.template.context import Context
from django.test.utils import override_settings
from django.test import TestCase
from django.contrib.auth.models import Permission
from django.contrib.admin.sites import site
from django.core.management import call_command
from django.core.exceptions import ImproperlyConfigured
from django.conf.urls import patterns, url
from sitetree.models import Tree, TreeItem
from sitetree.forms import TreeItemForm
from sitetree.admin import TreeAdmin, TreeItemAdmin, redirects_handler
from sitetree.utils import (
tree, item, get_app_n_model, import_app_sitetree_module, import_project_sitetree_modules, get_model_class
)
from sitetree.sitetreeapp import (
SiteTree, SiteTreeError, register_items_hook, register_i18n_trees, register_dynamic_trees, compose_dynamic_tree
)
urlpatterns = patterns(
'',
url(r'articles/', lambda r: None, name='articles_list'),
url(r'articles/(\d+)/', lambda r: None, name='articles_detailed'),
url(r'articles/(?P<id>\d+)_(?P<slug>[\w-]+)/', lambda r: None, name='url'),
)
class MockRequest(object):
def __init__(self, path=None, user_authorized=None, meta=None):
if path is None:
path = '/'
if user_authorized is None:
user_authorized = True
self.path = path
self.user = MockUser(user_authorized)
self.META = meta
class MockUser(object):
def __init__(self, authorized):
self.authorized = authorized
def is_authenticated(self):
return self.authorized
def get_mock_context(app=None, path=None, user_authorized=False, tree_item=None, put_var=None):
ctx = Context(
{
'request': MockRequest(path, user_authorized),
't2_root2_title': 'my_real_title', 'art_id': 10, 'tree_item': tree_item,
'somevar_str': 'articles_list', 'somevar_list': ['a', 'b'], 'put_var': put_var
},
current_app=app
)
ctx.template = mock.MagicMock()
ctx.template.engine.string_if_invalid = ''
return ctx
def render_string(string, context=None, context_put_var=None, context_path=None):
return Template(string).render(Context(context or get_mock_context(path=context_path, put_var=context_put_var)))
class SitetreeTest(TestCase):
@classmethod
def setUpClass(cls):
cls.init_trees()
@classmethod
def init_trees(cls):
cls.sitetree = SiteTree()
###########################################################
t1 = Tree(alias='tree1')
t1.save()
cls.t1 = t1
t1_root = TreeItem(title='root', tree=t1, url='/')
t1_root.save()
cls.tree_ttags_root = t1_root
t1_root_child1 = TreeItem(title='child1', tree=t1, parent=t1_root, url='/about/')
t1_root_child1.save()
cls.tree_ttags_root_child1 = t1_root_child1
t1_root_child2 = TreeItem(title='child2', tree=t1, parent=t1_root, url='articles_list', urlaspattern=True,
description='items_descr')
t1_root_child2.save()
cls.t1_root_child2 = t1_root_child2
t1_root_child2_sub1 = TreeItem(title='subchild1', tree=t1, parent=t1_root_child2,
url='articles_detailed art_id', urlaspattern=True)
t1_root_child2_sub1.save()
cls.t1_root_child2_sub1 = t1_root_child2_sub1
t1_root_child2_sub2 = TreeItem(title='subchild2', tree=t1, parent=t1_root_child2, url='/not_articles/10/')
t1_root_child2_sub2.save()
cls.t1_root_child2_sub2 = t1_root_child2_sub2
t1_root_child3 = TreeItem(title='child_with_var_str', tree=t1, parent=t1_root, url='somevar_str',
urlaspattern=True)
t1_root_child3.save()
cls.t1_root_child3 = t1_root_child3
t1_root_child4 = TreeItem(title='child_with_var_list', tree=t1, parent=t1_root, url='somevar_list',
urlaspattern=True)
t1_root_child4.save()
t2 = Tree(alias='tree2')
t2.save()
cls.t2 = t2
t2_root1 = TreeItem(title='{{ t2_root1_title }}', tree=t2, url='/')
t2_root1.save()
cls.t2_root1 = t2_root1
t2_root2 = TreeItem(title='put {{ t2_root2_title }} inside', tree=t2, url='/sub/')
t2_root2.save()
cls.t2_root2 = t2_root2
t2_root3 = TreeItem(title='for logged in only', tree=t2, url='/some/', access_loggedin=True)
t2_root3.save()
cls.t2_root3 = t2_root3
t2_root4 = TreeItem(title='url quoting', tree=t2, url='url 2 put_var', urlaspattern=True)
t2_root4.save()
cls.t2_root4 = t2_root4
t2_root5 = TreeItem(title='url quoting 1.5 style', tree=t2, url="'url' 2 put_var", urlaspattern=True)
t2_root5.save()
cls.t2_root5 = t2_root5
t2_root6 = TreeItem(title='url quoting 1.5 style', tree=t2, url='"url" 2 put_var', urlaspattern=True)
t2_root6.save()
cls.t2_root6 = t2_root6
t2_root7 = TreeItem(title='for guests only', tree=t2, url='/some_other/', access_guest=True)
t2_root7.save()
cls.t2_root7 = t2_root7
###########################################################
t3 = Tree(alias='tree3')
t3.save()
cls.t3 = t3
t3_en_root = TreeItem(title='root', tree=t3, url='/', hidden=True)
t3_en_root.save()
cls.t3_root = t3_en_root
t3_root_child1 = TreeItem(title='child1', tree=t3, parent=t3_en_root, url='/0/', access_loggedin=True)
t3_root_child1.save()
cls.t3_root_child1 = t3_root_child1
t3_root_child2 = TreeItem(title='child2', tree=t3, parent=t3_en_root, url='/1/', inmenu=True, hidden=True)
t3_root_child2.save()
cls.t3_root_child2 = t3_root_child2
t3_root_child3 = TreeItem(title='child3', tree=t3, parent=t3_en_root, url='/the_same_url/', inmenu=False)
t3_root_child3.save()
cls.t3_root_child3 = t3_root_child3
t3_root_child4 = TreeItem(title='child4', tree=t3, parent=t3_en_root, url='/3/', hidden=True)
t3_root_child4.save()
cls.t3_root_child4 = t3_root_child4
t3_root_child5 = TreeItem(title='child5', tree=t3, parent=t3_en_root, url='/4/', inmenu=True, hidden=True)
t3_root_child5.save()
cls.t3_root_child5 = t3_root_child5
t3_en = Tree(alias='tree3_en', title='tree3en_title')
t3_en.save()
cls.t3_en = t3_en
t3_en_root = TreeItem(title='root_en', tree=t3_en, url='/')
t3_en_root.save()
cls.t3_en_root = t3_en_root
t3_en_root_child1 = TreeItem(title='child1_en', tree=t3_en, parent=t3_en_root, url='/0_en/')
t3_en_root_child1.save()
t3_en_root_child2 = TreeItem(title='child2_en', tree=t3_en, parent=t3_en_root, url='/the_same_url/')
t3_en_root_child2.save()
###########################################################
tree_main = Tree(alias='main')
tree_main.save()
cls.tree_main = tree_main
tree_main_root = TreeItem(title='root', tree=tree_main, url='/', alias='for_dynamic')
tree_main_root.save()
cls.tree_main_root = tree_main_root
@classmethod
def tearDownClass(cls):
Tree.objects.all().delete()
TreeItem.objects.all().delete()
class TreeModelTest(SitetreeTest):
def test_create_rename_delete(self):
tree = Tree(alias='mytree')
tree.save()
self.assertIsNotNone(tree.id)
self.assertEqual(tree.alias, 'mytree')
tree.alias = 'not_mytree'
tree.save(force_update=True)
self.assertEqual(tree.alias, 'not_mytree')
tree.delete()
self.assertIsNone(tree.id)
def test_unique_aliases(self):
tree1 = Tree(alias='mytree')
tree1.save()
tree2 = Tree(alias='mytree')
self.assertRaises(Exception, tree2.save)
class TreeItemModelTest(SitetreeTest):
def test_url_resolve(self):
self.sitetree.menu('tree1', 'trunk', get_mock_context(path='/', put_var='abrakadabra'))
url = self.sitetree.url(self.t2_root4, get_mock_context(path='/articles/2_slugged/'))
self.assertTrue(url.find('abrakadabra') > -1)
self.sitetree.menu('tree1', 'trunk', get_mock_context(path='/', put_var='booo'))
url = self.sitetree.url(self.t2_root4, get_mock_context(path='/articles/2_slugged-mugged/'))
self.assertTrue(url.find('booo') > -1)
self.sitetree.menu('tree1', 'trunk', get_mock_context(path='/', put_var='rolling'))
url = self.sitetree.url(self.t2_root5, get_mock_context(path='/articles/2_quoted/'))
self.assertTrue(url.find('rolling') > -1)
self.sitetree.menu('tree1', 'trunk', get_mock_context(path='/', put_var='spoon'))
url = self.sitetree.url(self.t2_root6, get_mock_context(path='/articles/2_quoted/'))
self.assertTrue(url.find('spoon') > -1)
def test_no_tree(self):
ti = TreeItem(title='notree_item')
self.assertRaises(Exception, ti.save)
def test_create_rename_delete(self):
ti1 = TreeItem(title='new_root_item', tree=self.t1)
ti1.save()
self.assertIsNotNone(ti1.id)
self.assertEqual(ti1.title, 'new_root_item')
ti1.title = 'not_new_root_item'
ti1.save(force_update=True)
self.assertEqual(ti1.title, 'not_new_root_item')
ti1.delete()
self.assertIsNone(ti1.id)
def test_context_proc_required(self):
context = Context()
old_debug = settings.DEBUG
settings.DEBUG = True
self.assertRaises(SiteTreeError, self.sitetree.menu, 'tree1', 'trunk', context)
settings.DEBUG = old_debug
def test_menu(self):
menu = self.sitetree.menu('tree1', 'trunk', get_mock_context(path='/about/'))
self.assertEqual(len(menu), 1)
self.assertEqual(menu[0].id, self.tree_ttags_root.id)
self.assertEqual(menu[0].is_current, False)
self.assertEqual(menu[0].depth, 0)
self.assertEqual(menu[0].has_children, True)
self.assertEqual(menu[0].in_current_branch, True)
menu = self.sitetree.menu('tree2', 'trunk', get_mock_context(path='/sub/'))
self.assertEqual(len(menu), 6)
self.assertEqual(menu[0].id, self.t2_root1.id)
self.assertEqual(menu[1].id, self.t2_root2.id)
self.assertEqual(menu[0].is_current, False)
self.assertEqual(menu[0].in_current_branch, False)
self.assertEqual(menu[1].is_current, True)
self.assertEqual(menu[1].in_current_branch, True)
self.assertEqual(menu[0].depth, 0)
self.assertEqual(menu[1].depth, 0)
self.assertEqual(menu[0].has_children, False)
self.assertEqual(menu[1].has_children, False)
def test_breadcrumbs(self):
bc1 = self.sitetree.breadcrumbs('tree1', get_mock_context(path='/not_articles/10/'))
self.assertEqual(len(bc1), 3)
self.assertEqual(bc1[0].id, self.tree_ttags_root.id)
self.assertEqual(bc1[1].id, self.t1_root_child2.id)
self.assertEqual(bc1[1].url_resolved, '/articles/')
self.assertEqual(bc1[2].id, self.t1_root_child2_sub2.id)
self.assertEqual(bc1[0].is_current, False)
self.assertEqual(bc1[1].is_current, False)
self.assertEqual(bc1[2].is_current, True)
self.assertEqual(bc1[0].has_children, True)
self.assertEqual(bc1[1].has_children, True)
self.assertEqual(bc1[2].has_children, False)
self.assertEqual(bc1[0].depth, 0)
self.assertEqual(bc1[1].depth, 1)
self.assertEqual(bc1[2].depth, 2)
def test_page_title(self):
title = self.sitetree.get_current_page_title('tree1', get_mock_context(path='/articles/'))
self.assertEqual(title, self.t1_root_child2.title)
title = self.sitetree.get_current_page_title('tree1', get_mock_context(path='/not_articles/'))
self.assertEqual(title, '')
def test_page_attr(self):
attr = self.sitetree.get_current_page_attr('description', 'tree1', get_mock_context(path='/articles/'))
self.assertEqual(attr, self.t1_root_child2.description)
attr = self.sitetree.get_current_page_attr('description', 'tree1', get_mock_context(path='/not_articles/'))
self.assertEqual(attr, '')
def test_sitetree(self):
st1 = self.sitetree.tree('tree1', get_mock_context(path='/articles/'))
self.assertEqual(len(st1), 1)
self.assertEqual(st1[0].id, self.tree_ttags_root.id)
self.assertEqual(st1[0].is_current, False)
self.assertEqual(st1[0].depth, 0)
self.assertEqual(st1[0].has_children, True)
st2 = self.sitetree.tree('tree2', get_mock_context(path='/'))
self.assertIn(self.t2_root7, st2) # Not every item is visible for non logged in.
self.assertNotIn(self.t2_root3, st2)
self.assertEqual(len(st2), 6)
self.assertEqual(st2[0].id, self.t2_root1.id)
self.assertEqual(st2[1].id, self.t2_root2.id)
self.assertEqual(self.t2_root1.access_loggedin, False)
self.assertEqual(self.t2_root1.access_guest, False)
self.assertEqual(self.t2_root2.access_loggedin, False)
self.assertEqual(self.t2_root2.access_guest, False)
self.assertEqual(self.t2_root3.access_loggedin, True)
self.assertEqual(self.t2_root3.access_guest, False)
self.assertEqual(self.t2_root7.access_loggedin, False)
self.assertEqual(self.t2_root7.access_guest, True)
self.assertEqual(st2[0].title, '{{ t2_root1_title }}')
self.assertEqual(st2[1].title, 'put {{ t2_root2_title }} inside')
self.assertEqual(st2[0].title_resolved, '')
self.assertEqual(st2[1].title_resolved, 'put my_real_title inside')
self.assertEqual(st2[0].is_current, True)
self.assertEqual(st2[1].is_current, False)
self.assertEqual(st2[0].depth, 0)
self.assertEqual(st2[1].depth, 0)
self.assertEqual(st2[0].has_children, False)
self.assertEqual(st2[1].has_children, False)
st2 = self.sitetree.tree('tree2', get_mock_context(path='/', user_authorized=True))
self.assertNotIn(self.t2_root7, st2) # Not every item is visible for non logged in.
self.assertIn(self.t2_root3, st2)
self.assertEqual(len(st2), 6)
def test_items_hook_tree(self):
def my_processor(tree_items, tree_sender):
for item in tree_items:
item.title_resolved = 'FakedTreeItem'
return tree_items
register_items_hook(my_processor)
items = self.sitetree.tree('tree1', get_mock_context(path='/'))
register_items_hook(None)
self.assertEqual(items[0].title_resolved, 'FakedTreeItem')
def test_items_hook_menu(self):
def my_processor(tree_items, tree_sender):
for item in tree_items:
item.title_resolved = 'FakedMenuItem'
return tree_items
register_items_hook(my_processor)
items = self.sitetree.menu('tree1', 'trunk', get_mock_context(path='/'))
register_items_hook(None)
self.assertEqual(items[0].title_resolved, 'FakedMenuItem')
def test_items_hook_breadcrumbs(self):
def my_processor(tree_items, tree_sender):
for item in tree_items:
item.title_resolved = 'FakedBreadcrumbsItem'
return tree_items
register_items_hook(my_processor)
items = self.sitetree.breadcrumbs('tree1', get_mock_context(path='/not_articles/10/'))
register_items_hook(None)
self.assertEqual(items[0].title_resolved, 'FakedBreadcrumbsItem')
class TemplateTagsTest(SitetreeTest):
@classmethod
def setUpClass(cls):
cls.sitetree = SiteTree()
tree_ttags = Tree(alias='ttags')
tree_ttags.save()
cls.tree_ttags = tree_ttags
tree_ttags_root = TreeItem(
title='root', tree=tree_ttags, url='/',
insitetree=True, inbreadcrumbs=True, inmenu=True
)
tree_ttags_root.save()
cls.tree_ttags_root = tree_ttags_root
tree_ttags_root_child1 = TreeItem(
title='sometitle', tree=tree_ttags, parent=tree_ttags_root, url='/child1',
insitetree=True, inbreadcrumbs=True, inmenu=True,
hint='somehint', description='somedescr'
)
tree_ttags_root_child1.save()
cls.tree_ttags_root_child1 = tree_ttags_root_child1
def test_sitetree_tree(self):
tpl = '{% load sitetree %}{% sitetree_tree "mytree" %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
tpl = '{% load sitetree %}{% sitetree_tree from "mytree" %}'
result = render_string(tpl)
self.assertEqual(result.strip(), '')
tpl = '{% load sitetree %}{% sitetree_tree from "ttags" %}'
result = render_string(tpl)
self.assertIn('href="/"', result)
def test_sitetree_children(self):
context = get_mock_context(put_var=self.tree_ttags_root)
self.sitetree.set_global_context(context)
tpl = '{% load sitetree %}{% sitetree_children %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
tpl = '{% load sitetree %}{% sitetree_children of put_var for sitetree template "sitetree/tree.html" %}'
result = render_string(tpl, context=context)
self.assertIn('href="/child1"', result)
def test_sitetree_breadcrumbs(self):
tpl = '{% load sitetree %}{% sitetree_breadcrumbs %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
tpl = '{% load sitetree %}{% sitetree_breadcrumbs from "mytree" %}'
result = render_string(tpl)
self.assertEqual(result.strip(), '<ul>\n\t\n</ul>')
tpl = '{% load sitetree %}{% sitetree_breadcrumbs from "ttags" %}'
result = render_string(tpl, context_path='/child1')
self.assertIn('href="/"', result)
self.assertIn('root', result)
self.assertIn('sometitle', result)
def test_sitetree_menu(self):
tpl = '{% load sitetree %}{% sitetree_menu %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
tpl = '{% load sitetree %}{% sitetree_menu from "mytree" include "trunk" %}'
result = render_string(tpl)
self.assertEqual(result.strip(), '<ul>\n\t\n</ul>')
tpl = '{% load sitetree %}{% sitetree_menu from "ttags" include "trunk" %}'
result = render_string(tpl, context_path='/child1')
self.assertIn('current_branch">root', result)
self.assertIn('current_item current_branch">sometitle', result)
def test_sitetree_page_title(self):
tpl = '{% load sitetree %}{% sitetree_page_title %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
with override_settings(DEBUG=True):
tpl = '{% load sitetree %}{% sitetree_page_title from "ttags" %}'
self.assertRaises(SiteTreeError, render_string, tpl, context_path='/somewhere')
tpl = '{% load sitetree %}{% sitetree_page_title from "ttags" %}'
result = render_string(tpl, context_path='/child1')
self.assertEqual(result, 'sometitle')
def test_sitetree_page_hint(self):
tpl = '{% load sitetree %}{% sitetree_page_hint %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
with override_settings(DEBUG=True):
tpl = '{% load sitetree %}{% sitetree_page_hint from "ttags" %}'
self.assertRaises(SiteTreeError, render_string, tpl, context_path='/somewhere')
tpl = '{% load sitetree %}{% sitetree_page_hint from "ttags" %}'
result = render_string(tpl, context_path='/child1')
self.assertEqual(result, 'somehint')
def test_sitetree_page_description(self):
tpl = '{% load sitetree %}{% sitetree_page_description %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
with override_settings(DEBUG=True):
tpl = '{% load sitetree %}{% sitetree_page_description from "ttags" %}'
self.assertRaises(SiteTreeError, render_string, tpl, context_path='/somewhere')
tpl = '{% load sitetree %}{% sitetree_page_description from "ttags" %}'
result = render_string(tpl, context_path='/child1')
self.assertEqual(result, 'somedescr')
def test_sitetree_url(self):
tpl = '{% load sitetree %}{% sitetree_url %}'
self.assertRaises(TemplateSyntaxError, render_string, tpl)
context = get_mock_context(path='/child1', put_var=self.tree_ttags_root_child1)
tpl = '{% load sitetree %}{% sitetree_url for put_var %}'
result = render_string(tpl, context)
self.assertEqual(result, '/child1')
tpl = '{% load sitetree %}{% sitetree_url for put_var as res_var %}'
render_string(tpl, context)
self.assertEqual(context.get('res_var'), '/child1')
class TreeTest(SitetreeTest):
def test_str(self):
self.assertEqual(self.t3.alias, str(self.t3))
def test_get_title(self):
self.assertEqual(self.t3.get_title(), 'tree3')
self.assertEqual(self.t3_en.get_title(), 'tree3en_title')
def test_children_filtering(self):
self.sitetree._global_context = get_mock_context(path='/')
self.sitetree.get_sitetree('tree3')
children = self.sitetree.get_children('tree3', self.t3_root)
filtered = self.sitetree.filter_items(children, 'menu')
self.assertEqual(filtered, [])
def test_tree_filtering(self):
tree = self.sitetree.tree('tree3', get_mock_context(path='/'))
self.assertEqual(len(tree), 0)
def test_register_i18n_trees(self):
register_i18n_trees(['tree3'])
self.sitetree._global_context = get_mock_context(path='/the_same_url/')
activate('en')
self.sitetree.get_sitetree('tree3')
children = self.sitetree.get_children('tree3', self.t3_en_root)
self.assertEqual(len(children), 2)
self.assertFalse(children[0].is_current)
self.assertTrue(children[1].is_current)
activate('ru')
self.sitetree.lang_init()
self.sitetree.get_sitetree('tree3')
children = self.sitetree.get_children('tree3', self.t3_root)
self.assertEqual(len(children), 5)
self.assertFalse(children[1].is_current)
self.assertTrue(children[2].is_current)
self.assertFalse(children[3].is_current)
class DynamicTreeTest(SitetreeTest):
def test_basic_old(self):
self.basic_test()
def test_basic_new(self):
self.basic_test(new_style=True)
def basic_test(self, new_style=False, reset_cache=False):
trees = (
compose_dynamic_tree((
tree('dynamic_main_root', items=(
item('dynamic_main_root_1', 'dynamic_main_root_1_url', url_as_pattern=False),
item('dynamic_main_root_2', 'dynamic_main_root_2_url', url_as_pattern=False),
)),
), target_tree_alias='main'),
compose_dynamic_tree((
tree('dynamic_main_sub', items=(
item('dynamic_main_sub_1', 'dynamic_main_sub_1_url', url_as_pattern=False),
item('dynamic_main_sub_2', 'dynamic_main_sub_2_url', url_as_pattern=False),
)),
), target_tree_alias='main', parent_tree_item_alias='for_dynamic'),
compose_dynamic_tree((
tree('dynamic', items=(
item('dynamic_1', 'dynamic_1_url', children=(
item('dynamic_1_sub_1', 'dynamic_1_sub_1_url', url_as_pattern=False),
), url_as_pattern=False),
item('dynamic_2', 'dynamic_2_url', url_as_pattern=False),
)),
)),
)
kwargs = {
'reset_cache': reset_cache
}
if new_style:
register_dynamic_trees(*trees, **kwargs)
else:
register_dynamic_trees(trees, **kwargs)
self.sitetree._global_context = get_mock_context(path='/the_same_url/')
tree_alias, sitetree_items = self.sitetree.get_sitetree('main')
self.assertEqual(len(sitetree_items), 5)
self.assertEqual(sitetree_items[3].title, 'dynamic_main_root_1')
self.assertEqual(sitetree_items[4].title, 'dynamic_main_root_2')
children = self.sitetree.get_children('main', self.tree_main_root)
self.assertEqual(len(children), 2)
tree_alias, sitetree_items = self.sitetree.get_sitetree('dynamic')
self.assertEqual(len(sitetree_items), 3)
children = self.sitetree.get_children('dynamic', sitetree_items[0])
self.assertEqual(len(children), 1)
class UtilsItemTest(SitetreeTest):
def test_permission_any(self):
i1 = item('root', 'url')
self.assertEqual(i1.access_perm_type, i1.PERM_TYPE_ALL)
i2 = item('root', 'url', perms_mode_all=True)
self.assertEqual(i2.access_perm_type, i1.PERM_TYPE_ALL)
i3 = item('root', 'url', perms_mode_all=False)
self.assertEqual(i3.access_perm_type, i1.PERM_TYPE_ANY)
def test_permissions_none(self):
i1 = item('root', 'url')
self.assertEqual(i1.permissions, [])
def test_int_permissions(self):
i1 = item('root', 'url', access_by_perms=[1, 2, 3])
self.assertEqual(i1.permissions, [1, 2, 3])
def test_import_project_sitetree_modules(self):
cls = get_model_class('MODEL_TREE')
self.assertIs(cls, Tree)
def test_get_model_class(self):
import_project_sitetree_modules()
def test_import_app_sitetree_module(self):
self.assertRaises(ImportError, import_app_sitetree_module, 'sitetre')
def test_get_app_n_model(self):
app, model = get_app_n_model('MODEL_TREE')
self.assertEqual(app, 'sitetree')
self.assertEqual(model, 'Tree')
self.assertRaises(ImproperlyConfigured, get_app_n_model, 'ALIAS_TRUNK')
def test_valid_string_permissions(self):
perm = Permission.objects.all()[0]
perm_name = '%s.%s' % (perm.content_type.app_label, perm.codename)
i1 = item('root', 'url', access_by_perms=perm_name)
self.assertEqual(i1.permissions, [perm])
def test_perm_obj_permissions(self):
perm = Permission.objects.all()[0]
i1 = item('root', 'url', access_by_perms=perm)
self.assertEqual(i1.permissions, [perm])
def test_bad_string_permissions(self):
self.assertRaises(ValueError, item, 'root', 'url', access_by_perms='bad name')
self.assertRaises(ValueError, item, 'root', 'url', access_by_perms='unknown.name')
self.assertRaises(ValueError, item, 'root', 'url', access_by_perms=42.2)
def test_access_restricted(self):
# Test that default is False
i0 = item('root', 'url', access_by_perms=1)
self.assertEqual(i0.access_restricted, True)
# True is respected
i1 = item('root', 'url')
self.assertEqual(i1.access_restricted, False)
class TestAdmin(SitetreeTest):
def test_redirects_handler(self):
def get_handler(referer, item_id=None):
req = MockRequest(referer, True, {
'HTTP_REFERER': referer
})
args = [req]
kwargs = {}
if item_id is not None:
kwargs['item_id'] = item_id
return redirects_handler(*args, **kwargs)
handler = get_handler('/')
self.assertEqual(handler._headers['location'][1], '/../')
handler = get_handler('/delete/')
self.assertEqual(handler._headers['location'][1], '/delete/../../')
handler = get_handler('/history/')
self.assertEqual(handler._headers['location'][1], '/history/../../')
handler = get_handler('/history/', 42)
self.assertEqual(handler._headers['location'][1], '/history/../')
def test_tree_item_admin(self):
admin = TreeItemAdmin(TreeItem, site)
admin.tree = Tree.objects.get(alias='main')
form = admin.get_form(MockRequest())
self.assertEqual(len(form.known_url_names), 3)
self.assertIn('articles_list', form.known_url_names)
self.assertIn('articles_detailed', form.known_url_names)
self.assertIn('url', form.known_url_names)
def test_tree_item_admin_get_tree(self):
main_tree = Tree.objects.get(alias='main')
main_tree_item = TreeItem.objects.filter(tree__alias='main')[0]
admin = TreeItemAdmin(TreeItem, site)
tree = admin.get_tree(MockRequest(), main_tree.pk)
self.assertEqual(tree.alias, 'main')
tree = admin.get_tree(MockRequest(), None, main_tree_item.pk)
self.assertEqual(tree.alias, 'main')
def test_tree_item_admin_item_move(self):
main_tree = Tree.objects.get(alias='main')
admin = TreeItemAdmin(TreeItem, site)
new_item_1 = TreeItem(title='title_1', sort_order=1, tree_id=main_tree.pk)
new_item_1.save()
new_item_2 = TreeItem(title='title_2', sort_order=2, tree_id=main_tree.pk)
new_item_2.save()
new_item_3 = TreeItem(title='title_3', sort_order=3, tree_id=main_tree.pk)
new_item_3.save()
admin.item_move(None, None, new_item_2.id, 'up')
self.assertEqual(TreeItem.objects.get(pk=new_item_1.id).sort_order, 2)
self.assertEqual(TreeItem.objects.get(pk=new_item_2.id).sort_order, 1)
self.assertEqual(TreeItem.objects.get(pk=new_item_3.id).sort_order, 3)
admin.item_move(None, None, new_item_1.id, 'down')
self.assertEqual(TreeItem.objects.get(pk=new_item_1.id).sort_order, 3)
self.assertEqual(TreeItem.objects.get(pk=new_item_2.id).sort_order, 1)
self.assertEqual(TreeItem.objects.get(pk=new_item_3.id).sort_order, 2)
def test_tree_item_admin_save_model(self):
main_tree = Tree.objects.get(alias='main')
tree_item = TreeItem.objects.filter(tree__alias='main')[0]
admin = TreeItemAdmin(TreeItem, site)
admin.tree = main_tree
admin.save_model(MockRequest(), tree_item, None, change=True)
self.assertIs(tree_item.tree, admin.tree)
def test_tree_admin(self):
admin = TreeAdmin(Tree, site)
urls = admin.get_urls()
self.assertIn('tree_id', urls[1]._regex)
class TestForms(SitetreeTest):
def test_basic(self):
form = TreeItemForm(tree='main', tree_item='root')
self.assertIn('tree_item', form.fields)
self.assertEqual(form.fields['tree_item'].tree, 'main')
self.assertEqual(form.fields['tree_item'].initial, 'root')
self.assertEqual(form.fields['tree_item'].choices[1][1], 'root')
class TestManagementCommands(SitetreeTest):
def setUp(self):
self.file_contents = (
'[{"pk": 2, "fields": {"alias": "/tree1/", "title": "tree one"}, "model": "sitetree.tree"}, '
'{"pk": 3, "fields": {"alias": "/tree2/", "title": "tree two"}, "model": "sitetree.tree"}, '
'{"pk": 7, "fields": {"access_restricted": false, "inmenu": true, "title": "tree item one",'
' "hidden": false, "description": "", "alias": null, "url": "/tree1/item1/", "access_loggedin": false,'
' "urlaspattern": false, "access_perm_type": 1, "tree": 2, "hint": "", "inbreadcrumbs": true,'
' "access_permissions": [], "sort_order": 7, "access_guest": false, "parent": null, "insitetree": true},'
' "model": "sitetree.treeitem"}]')
def test_sitetreedump(self):
stdout = sys.stdout
sys.stdout = StringIO()
call_command('sitetreedump')
output = loads(sys.stdout.getvalue())
sys.stdout = stdout
self.assertEqual(output[0]['model'], 'sitetree.tree')
self.assertEqual(output[5]['model'], 'sitetree.treeitem')
def test_sitetreeload(self):
try:
import __builtin__
patch_val = '__builtin__.open'
except ImportError:
# python3
patch_val = 'builtins.open'
with mock.patch(patch_val) as mock_file:
mock_file.return_value.__enter__ = lambda s: s
mock_file.return_value.__exit__ = mock.Mock()
mock_file.return_value.read.return_value = self.file_contents
call_command('sitetreeload', 'somefile.json')
self.assertTrue(Tree.objects.filter(title='tree one').exists())
self.assertTrue(Tree.objects.filter(title='tree two').exists())
self.assertTrue(TreeItem.objects.filter(title='tree item one').exists())
|
|
import unittest
from test import test_support
import zlib
import random
# print test_support.TESTFN
def getbuf():
# This was in the original. Avoid non-repeatable sources.
# Left here (unused) in case something wants to be done with it.
import imp
try:
t = imp.find_module('test_zlib')
file = t[0]
except ImportError:
file = open(__file__)
buf = file.read() * 8
file.close()
return buf
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(""), zlib.crc32("", 0))
self.assert_(zlib.crc32("abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32("", 0), 0)
self.assertEqual(zlib.crc32("", 1), 1)
self.assertEqual(zlib.crc32("", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(""), zlib.adler32("", 1))
self.assert_(zlib.adler32("abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32("", 0), 0)
self.assertEqual(zlib.adler32("", 1), 1)
self.assertEqual(zlib.adler32("", 432), 432)
def assertEqual32(self, seen, expected):
# 32-bit values masked -- checksums on 32- vs 64- bit machines
# This is important if bit 31 (0x08000000L) is set.
self.assertEqual(seen & 0x0FFFFFFFFL, expected & 0x0FFFFFFFFL)
def test_penguins(self):
self.assertEqual32(zlib.crc32("penguin", 0), 0x0e5c1a120L)
self.assertEqual32(zlib.crc32("penguin", 1), 0x43b6aa94)
self.assertEqual32(zlib.adler32("penguin", 0), 0x0bcf02f6)
self.assertEqual32(zlib.adler32("penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32("penguin"), zlib.crc32("penguin", 0))
self.assertEqual(zlib.adler32("penguin"),zlib.adler32("penguin",1))
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_bigbits(self):
# specifying total bits too large causes an error
self.assertRaises(zlib.error,
zlib.compress, 'ERROR', zlib.MAX_WBITS + 1)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, 0)
class CompressTestCase(unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.decompress(x), data)
class CompressObjectTestCase(unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
data = HAMLET_SCENE * 128
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
dco = zlib.decompressobj()
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_compressoptions(self):
# specify lots of options to compressobj()
level = 2
method = zlib.DEFLATED
wbits = -12
memlevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level, method, wbits, memlevel, strategy)
x1 = co.compress(HAMLET_SCENE)
x2 = co.flush()
dco = zlib.decompressobj(wbits)
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(HAMLET_SCENE, y1 + y2)
def test_compressincremental(self):
# compress object in steps, decompress object as one-shot
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
dco = zlib.decompressobj()
y1 = dco.decompress(''.join(bufs))
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
# compress object in steps, decompress object in steps
source = source or HAMLET_SCENE
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf))
dco = zlib.decompressobj()
bufs = []
for i in range(0, len(combuf), dcx):
bufs.append(dco.decompress(combuf[i:i+dcx]))
self.assertEqual('', dco.unconsumed_tail, ########
"(A) uct should be '': not %d long" %
len(dco.unconsumed_tail))
if flush:
bufs.append(dco.flush())
else:
while True:
chunk = dco.decompress('')
if chunk:
bufs.append(chunk)
else:
break
self.assertEqual('', dco.unconsumed_tail, ########
"(B) uct should be '': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(data, ''.join(bufs))
# Failure means: "decompressobj with init options failed"
def test_decompincflush(self):
self.test_decompinc(flush=True)
def test_decompimax(self, source=None, cx=256, dcx=64):
# compress in steps, decompress in length-restricted steps
source = source or HAMLET_SCENE
# Check a decompression object with max_length specified
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
#max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, dcx)
self.failIf(len(chunk) > dcx,
'chunk too big (%d>%d)' % (len(chunk), dcx))
bufs.append(chunk)
cb = dco.unconsumed_tail
bufs.append(dco.flush())
self.assertEqual(data, ''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlen(self, flush=False):
# Check a decompression object with max_length specified
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, max_length)
self.failIf(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
cb = dco.unconsumed_tail
if flush:
bufs.append(dco.flush())
else:
while chunk:
chunk = dco.decompress('', max_length)
self.failIf(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
self.assertEqual(data, ''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlenflush(self):
self.test_decompressmaxlen(flush=True)
def test_maxlenmisc(self):
# Misc tests of max_length
dco = zlib.decompressobj()
self.assertRaises(ValueError, dco.decompress, "", -1)
self.assertEqual('', dco.unconsumed_tail)
def test_flushes(self):
# Test flush() with the various options, using all the
# different levels in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt
if hasattr(zlib, opt)]
data = HAMLET_SCENE * 8
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
a = obj.compress( data[:3000] )
b = obj.flush( sync )
c = obj.compress( data[3000:] )
d = obj.flush()
self.assertEqual(zlib.decompress(''.join([a,b,c,d])),
data, ("Decompress failed: flush "
"mode=%i, level=%i") % (sync, level))
del obj
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
if hasattr(zlib, 'Z_SYNC_FLUSH'):
# Testing on 17K of "random" data
# Create compressor and decompressor objects
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
dco = zlib.decompressobj()
# Try 17K of data
# generate random data stream
try:
# In 2.3 and later, WichmannHill is the RNG of the bug report
gen = random.WichmannHill()
except AttributeError:
try:
# 2.2 called it Random
gen = random.Random()
except AttributeError:
# others might simply have a single RNG
gen = random
gen.seed(1)
data = genblock(1, 17 * 1024, generator=gen)
# compress, sync-flush, and decompress
first = co.compress(data)
second = co.flush(zlib.Z_SYNC_FLUSH)
expanded = dco.decompress(first + second)
# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
# (Bug #1083110 -- calling .flush() on decompress objects
# caused a core dump.)
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
self.failUnless(co.flush()) # Returns a zlib header
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), "") # Returns nothing
if hasattr(zlib.compressobj(), "copy"):
def test_compresscopy(self):
# Test copying a compression object
data0 = HAMLET_SCENE
data1 = HAMLET_SCENE.swapcase()
c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
bufs0 = []
bufs0.append(c0.compress(data0))
c1 = c0.copy()
bufs1 = bufs0[:]
bufs0.append(c0.compress(data0))
bufs0.append(c0.flush())
s0 = ''.join(bufs0)
bufs1.append(c1.compress(data1))
bufs1.append(c1.flush())
s1 = ''.join(bufs1)
self.assertEqual(zlib.decompress(s0),data0+data0)
self.assertEqual(zlib.decompress(s1),data0+data1)
def test_badcompresscopy(self):
# Test copying a compression object in an inconsistent state
c = zlib.compressobj()
c.compress(HAMLET_SCENE)
c.flush()
self.assertRaises(ValueError, c.copy)
if hasattr(zlib.decompressobj(), "copy"):
def test_decompresscopy(self):
# Test copying a decompression object
data = HAMLET_SCENE
comp = zlib.compress(data)
d0 = zlib.decompressobj()
bufs0 = []
bufs0.append(d0.decompress(comp[:32]))
d1 = d0.copy()
bufs1 = bufs0[:]
bufs0.append(d0.decompress(comp[32:]))
s0 = ''.join(bufs0)
bufs1.append(d1.decompress(comp[32:]))
s1 = ''.join(bufs1)
self.assertEqual(s0,s1)
self.assertEqual(s0,data)
def test_baddecompresscopy(self):
# Test copying a compression object in an inconsistent state
data = zlib.compress(HAMLET_SCENE)
d = zlib.decompressobj()
d.decompress(data)
d.flush()
self.assertRaises(ValueError, d.copy)
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
if seed is not None:
generator.seed(seed)
randint = generator.randint
if length < step or step < 2:
step = length
blocks = []
for i in range(0, length, step):
blocks.append(''.join([chr(randint(0,255))
for x in range(step)]))
return ''.join(blocks)[:length]
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
generator.seed(seed)
sources = source.split('\n')
return [generator.choice(sources) for n in range(number)]
HAMLET_SCENE = """
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
def test_main():
test_support.run_unittest(
ChecksumTestCase,
ExceptionTestCase,
CompressTestCase,
CompressObjectTestCase
)
if __name__ == "__main__":
test_main()
def test(tests=''):
if not tests: tests = 'o'
testcases = []
if 'k' in tests: testcases.append(ChecksumTestCase)
if 'x' in tests: testcases.append(ExceptionTestCase)
if 'c' in tests: testcases.append(CompressTestCase)
if 'o' in tests: testcases.append(CompressObjectTestCase)
test_support.run_unittest(*testcases)
if False:
import sys
sys.path.insert(1, '/Py23Src/python/dist/src/Lib/test')
import test_zlib as tz
ts, ut = tz.test_support, tz.unittest
su = ut.TestSuite()
su.addTest(ut.makeSuite(tz.CompressTestCase))
ts.run_suite(su)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file(s).
- To operate on several files this can accept a comma separated list of files or a list of files.
aliases: [ 'files', 'file', 'filenames' ]
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
wait:
required: false
default: false
description:
- A flag to indicate to wait for resources to be created before continuing to the next step
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating or updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
recursive:
required: false
default: false
description:
- Process the directory used in -f, --filename recursively.
Useful when you want to manage related manifests organized
within the same directory.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
- name: test nginx and postgresql are present
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
- name: test nginx and postgresql are present
kube:
files:
- /tmp/nginx.yml
- /tmp/postgresql.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.wait = module.params.get('wait')
self.name = module.params.get('name')
self.filename = [f.strip() for f in module.params.get('filename') or []]
self.resource = module.params.get('resource')
self.label = module.params.get('label')
self.recursive = module.params.get('recursive')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True, force=True):
if check and self.exists():
return []
cmd = ['apply']
if force:
cmd.append('--force')
if self.wait:
cmd.append('--wait')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def replace(self, force=True):
cmd = ['apply']
if force:
cmd.append('--force')
if self.wait:
cmd.append('--wait')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
return self._execute(cmd)
def exists(self):
cmd = ['get']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
cmd.append('--no-headers')
result = self._execute_nofail(cmd)
if not result:
return False
return True
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(type='list', aliases=['files', 'file', 'filenames']),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
wait=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
recursive=dict(default=False, type='bool'),
),
mutually_exclusive=[['filename', 'list']]
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create(check=False)
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
result = manager.replace()
else:
module.fail_json(msg='Unrecognized state %s.' % state)
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
|
|
"""Tests for binary operators on subtypes of built-in types."""
import unittest
from operator import eq, le, ne
from abc import ABCMeta
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int."""
return isinstance(x, int)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test whether an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of ints."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0, den=1):
"""Constructor: Rat([num[, den]]).
The arguments must be ints, and default to (0, 1)."""
if not isint(num):
raise TypeError("Rat numerator must be int (%r)" % num)
if not isint(den):
raise TypeError("Rat denominator must be int (%r)" % den)
# But the zero is always on
if den == 0:
raise ZeroDivisionError("zero denominator")
g = gcd(den, num)
self.__num = int(num//g)
self.__den = int(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to a string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError("%s too large to convert to int" %
repr(self))
raise ValueError("can't convert %s to int" % repr(self))
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"""Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assertTrue(gcd(i, j) > 0)
self.assertTrue(gcd(-i, j) < 0)
self.assertTrue(gcd(i, -j) > 0)
self.assertTrue(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_true_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
# XXX Ran out of steam; TO DO: divmod, div, future division
class OperationLogger:
"""Base class for classes with operation logging."""
def __init__(self, logger):
self.logger = logger
def log_operation(self, *args):
self.logger(*args)
def op_sequence(op, *classes):
"""Return the sequence of operations that results from applying
the operation `op` to instances of the given classes."""
log = []
instances = []
for c in classes:
instances.append(c(log.append))
try:
op(*instances)
except TypeError:
pass
return log
class A(OperationLogger):
def __eq__(self, other):
self.log_operation('A.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('A.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('A.__ge__')
return NotImplemented
class B(OperationLogger, metaclass=ABCMeta):
def __eq__(self, other):
self.log_operation('B.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('B.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('B.__ge__')
return NotImplemented
class C(B):
def __eq__(self, other):
self.log_operation('C.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('C.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('C.__ge__')
return NotImplemented
class V(OperationLogger):
"""Virtual subclass of B"""
def __eq__(self, other):
self.log_operation('V.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('V.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('V.__ge__')
return NotImplemented
B.register(V)
class OperationOrderTests(unittest.TestCase):
def test_comparison_orders(self):
self.assertEqual(op_sequence(eq, A, A), ['A.__eq__', 'A.__eq__'])
self.assertEqual(op_sequence(eq, A, B), ['A.__eq__', 'B.__eq__'])
self.assertEqual(op_sequence(eq, B, A), ['B.__eq__', 'A.__eq__'])
# C is a subclass of B, so C.__eq__ is called first
self.assertEqual(op_sequence(eq, B, C), ['C.__eq__', 'B.__eq__'])
self.assertEqual(op_sequence(eq, C, B), ['C.__eq__', 'B.__eq__'])
self.assertEqual(op_sequence(le, A, A), ['A.__le__', 'A.__ge__'])
self.assertEqual(op_sequence(le, A, B), ['A.__le__', 'B.__ge__'])
self.assertEqual(op_sequence(le, B, A), ['B.__le__', 'A.__ge__'])
self.assertEqual(op_sequence(le, B, C), ['C.__ge__', 'B.__le__'])
self.assertEqual(op_sequence(le, C, B), ['C.__le__', 'B.__ge__'])
self.assertTrue(issubclass(V, B))
self.assertEqual(op_sequence(eq, B, V), ['B.__eq__', 'V.__eq__'])
self.assertEqual(op_sequence(le, B, V), ['B.__le__', 'V.__ge__'])
class SupEq(object):
"""Class that can test equality"""
def __eq__(self, other):
return True
class S(SupEq):
"""Subclass of SupEq that should fail"""
__eq__ = None
class F(object):
"""Independent class that should fall back"""
class X(object):
"""Independent class that should fail"""
__eq__ = None
class SN(SupEq):
"""Subclass of SupEq that can test equality, but not non-equality"""
__ne__ = None
class XN:
"""Independent class that can test equality, but not non-equality"""
def __eq__(self, other):
return True
__ne__ = None
class FallbackBlockingTests(unittest.TestCase):
"""Unit tests for None method blocking"""
def test_fallback_rmethod_blocking(self):
e, f, s, x = SupEq(), F(), S(), X()
self.assertEqual(e, e)
self.assertEqual(e, f)
self.assertEqual(f, e)
# left operand is checked first
self.assertEqual(e, x)
self.assertRaises(TypeError, eq, x, e)
# S is a subclass, so it's always checked first
self.assertRaises(TypeError, eq, e, s)
self.assertRaises(TypeError, eq, s, e)
def test_fallback_ne_blocking(self):
e, sn, xn = SupEq(), SN(), XN()
self.assertFalse(e != e)
self.assertRaises(TypeError, ne, e, sn)
self.assertRaises(TypeError, ne, sn, e)
self.assertFalse(e != xn)
self.assertRaises(TypeError, ne, xn, e)
if __name__ == "__main__":
unittest.main()
|
|
"""
Peasauce - interactive disassembler
Copyright (C) 2012-2017 Richard Tew
Licensed using the MIT license.
"""
from dataclasses import dataclass
import io
import logging
import os
import struct
from typing import Any, IO, List, Optional, Tuple
from . import amiga
from . import atarist
from . import binary
from . import human68k
from . import snes
from . import zxspectrum
from . import constants
from .constants import Endian
logger = logging.getLogger("loader")
systems_by_name = {}
def _generate_module_data():
global systems_by_name
for module in (amiga, atarist, human68k, binary, snes, zxspectrum):
system_name = module.__name__
systems_by_name[system_name] = module.System(system_name)
_generate_module_data()
def get_system(system_name):
return systems_by_name[system_name]
def get_system_data_types(system_name: str) -> "DataTypes":
system = systems_by_name[system_name]
return DataTypes(system.endian_id)
def load_file(input_file, file_name, loader_options=None, file_offset=0, file_length=None) -> Optional[Tuple["FileInfo", "DataTypes"]]:
for system_name, system in systems_by_name.items():
file_info = FileInfo(system, file_name, loader_options)
data_types = get_system_data_types(system_name)
if system.load_input_file(input_file, file_info, data_types, f_offset=file_offset, f_length=file_length):
return file_info, data_types
def identify_file(input_file, file_name, file_offset=0, file_length=None):
matches = []
for system_name, system in systems_by_name.items():
file_info = FileInfo(system, file_name)
data_types = get_system_data_types(system_name)
system_matches = system.identify_input_file(input_file, file_info, data_types, f_offset=file_offset, f_length=file_length)
matches.extend(((file_info, match, system) for match in system_matches))
if len(matches):
# For now take the match we are most confident in.
matches.sort(key = lambda v: v[1].confidence)
file_info, match, system = matches[0]
if match.file_format_id != constants.FileFormat.UNKNOWN and match.confidence != constants.MATCH_NONE:
result = {}
result["processor"] = system.get_processor_id()
result["platform"] = match.platform_id
result["filetype"] = match.file_format_id
result["endian"] = system.endian_id
return file_info, result
SEGMENT_TYPE_CODE = 1
SEGMENT_TYPE_DATA = 2
SEGMENT_TYPE_BSS = 3
@dataclass
class Segment:
type: int
file_offset: int
data_length: int
length: int
address: int
cached_data: Any
def get_segment_type(segments, segment_id):
return segments[segment_id].type
def get_segment_data_file_offset(segments, segment_id):
return segments[segment_id].file_offset
def get_segment_data_length(segments, segment_id):
return segments[segment_id].data_length
def get_segment_length(segments, segment_id):
return segments[segment_id].length
def get_segment_address(segments, segment_id):
return segments[segment_id].address
def get_segment_data(segments, segment_id):
return segments[segment_id].cached_data
def is_segment_type_code(segments, segment_id):
return segments[segment_id].type == SEGMENT_TYPE_CODE
def is_segment_type_data(segments, segment_id):
return segments[segment_id].type == SEGMENT_TYPE_DATA
def is_segment_type_bss(segments, segment_id):
return segments[segment_id].type == SEGMENT_TYPE_BSS
def cache_segment_data(input_file: io.RawIOBase, segments: List[Any], segment_id: int, base_file_offset: int=0) -> None:
"""
base_file_offset: when the input file is located within a containing file.
"""
data = None
file_offset = get_segment_data_file_offset(segments, segment_id)
# No data for segments that have no data..
if file_offset != -1:
file_length = get_segment_data_length(segments, segment_id)
input_file.seek(base_file_offset + file_offset, os.SEEK_SET)
file_data = bytearray(file_length)
if input_file.readinto(file_data) == file_length:
# NOTE(rmtew): Python 2, type(data[0]) is str. Python 3, type(data[0]) is int
data = memoryview(file_data)
else:
logger.error("Unable to cache segment %d data, got %d bytes, wanted %d", segment_id, len(file_data), file_length)
segments[segment_id].cached_data = data
def relocate_segment_data(segments, data_types, relocations, relocatable_addresses, relocated_addresses):
for segment_id in range(len(segments)):
# Generic longword-based relocation.
data = get_segment_data(segments, segment_id)
local_address = get_segment_address(segments, segment_id)
for target_segment_id, local_offsets in relocations[segment_id]:
target_address = get_segment_address(segments, target_segment_id)
for local_offset in local_offsets:
value = data_types.uint32_value(data[local_offset:local_offset+4])
address = value + target_address
relocated_addresses.setdefault(address, set()).add(local_address + local_offset)
relocatable_addresses.add(local_address + local_offset)
data[local_offset:local_offset+4] = data_types.uint32_value_as_string(address)
def has_segment_headers(system_name):
return get_system(system_name).has_segment_headers()
def get_segment_header(system_name, segment_id, data):
return get_system(system_name).get_segment_header(segment_id, data)
def get_data_instruction_string(system_name, segments, segment_id, data_size, with_file_data):
segment_type = get_segment_type(segments, segment_id)
is_bss_segment = segment_type == SEGMENT_TYPE_BSS
return get_system(system_name).get_data_instruction_string(data_size, is_bss_segment, with_file_data)
def get_load_address(file_info):
return file_info.load_address
def get_entrypoint_address(file_info):
#if file_info.entrypoint_address is not None:
# return file_info.entrypoint_address
return get_segment_address(file_info.segments, file_info.entrypoint_segment_id) + file_info.entrypoint_offset
class DataTypes(object):
def __init__(self, endian_id):
self.endian_id = endian_id
self._endian_char = [ "<", ">" ][endian_id == Endian.BIG]
s = b"12345"
bs = bytearray(s)
mv = memoryview(bs)
if type(mv[0]) is int:
self.uint8_value = self._uint8_value3
else:
self.uint8_value = self._uint8_value2
## Data access related operations.
def sized_value(self, data_size, bytes, idx=None):
if data_size == constants.DATA_TYPE_DATA32:
return self.uint32_value(bytes, idx)
elif data_size == constants.DATA_TYPE_DATA16:
return self.uint16_value(bytes, idx)
elif data_size == constants.DATA_TYPE_DATA08:
return self.uint8_value(bytes, idx)
raise Exception("unsupported size", data_size)
def _uint8_value2(self, bytes, idx=0):
return self.uint8(bytes[idx])
def _uint8_value3(self, bytes, idx=0):
return bytes[idx]
def uint16_value(self, bytes, idx=0):
return self.uint16(bytes[idx:idx+2])
def uint32_value(self, bytes, idx=0):
try:
return self.uint32(bytes[idx:idx+4])
except:
pass
def uint32_value_as_string(self, v):
if self.endian_id == Endian.BIG:
return struct.pack(">I", v)
else:
return struct.pack("<I", v)
# String to value.
def uint16(self, s):
if self.endian_id == Endian.BIG:
return struct.unpack(">H", s)[0]
else:
return struct.unpack("<H", s)[0]
def int16(self, s):
if self.endian_id == Endian.BIG:
return struct.unpack(">h", s)[0]
else:
return struct.unpack("<h", s)[0]
def uint32(self, s):
if self.endian_id == Endian.BIG:
return struct.unpack(">I", s)[0]
else:
return struct.unpack("<I", s)[0]
def int32(self, s):
if self.endian_id == Endian.BIG:
return struct.unpack(">i", s)[0]
else:
return struct.unpack("<i", s)[0]
def uint8(self, s):
if self.endian_id == Endian.BIG:
return struct.unpack(">B", s)[0]
else:
return struct.unpack("<B", s)[0]
def int8(self, s):
if self.endian_id == Endian.BIG:
return struct.unpack(">b", s)[0]
else:
return struct.unpack("<b", s)[0]
class FileInfo(object):
""" The custom system data for the loaded file. """
internal_data = None # type: Any
savefile_data = None # type: Any
def __init__(self, system, file_name, loader_options=None):
self.system = system
self.file_name = file_name
self.loader_options = loader_options
self.segments = []
self.relocations_by_segment_id = []
self.symbols_by_segment_id = []
if loader_options is not None and loader_options.is_binary_file:
self.load_address = loader_options.load_address
else:
self.load_address = 0
""" The segment id and offset in that segment of the program entrypoint. """
if loader_options is not None:
self.entrypoint_segment_id = loader_options.entrypoint_segment_id
self.entrypoint_offset = loader_options.entrypoint_offset
else:
self.entrypoint_segment_id = 0
self.entrypoint_offset = 0
## Query..
def has_file_name_suffix(self, suffix):
return self.file_name.lower().endswith("."+ suffix.lower())
## Segment registration related operations
def set_internal_data(self, file_data):
self.internal_data = file_data
def get_internal_data(self):
return self.internal_data
def set_savefile_data(self, file_data):
self.savefile_data = file_data
def get_savefile_data(self):
return self.savefile_data
def print_summary(self):
self.system.print_summary(self)
def add_code_segment(self, file_offset, data_length, segment_length, relocations, symbols):
logger.debug("Added code segment %d %d %d #relocs %d", file_offset, data_length, segment_length, len(relocations))
self.add_segment(SEGMENT_TYPE_CODE, file_offset, data_length, segment_length, relocations, symbols)
def add_data_segment(self, file_offset, data_length, segment_length, relocations, symbols):
logger.debug("Added data segment %d %d %d #relocs %d", file_offset, data_length, segment_length, len(relocations))
self.add_segment(SEGMENT_TYPE_DATA, file_offset, data_length, segment_length, relocations, symbols)
def add_bss_segment(self, file_offset, data_length, segment_length, relocations, symbols):
logger.debug("Added bss segment %d %d %d #relocs %d", file_offset, data_length, segment_length, len(relocations))
self.add_segment(SEGMENT_TYPE_BSS, file_offset, data_length, segment_length, relocations, symbols)
def add_segment(self, segment_type, file_offset, data_length, segment_length, relocations, symbols):
segment_id = len(self.segments)
segment_address = self.load_address
if segment_id > 0:
segment_address = get_segment_address(self.segments, segment_id-1) + get_segment_length(self.segments, segment_id-1)
segment = Segment(segment_type, file_offset, data_length, segment_length, segment_address, None)
self.segments.append(segment)
self.relocations_by_segment_id.append(relocations)
self.symbols_by_segment_id.append(symbols)
def set_entrypoint(self, segment_id, offset):
self.entrypoint_segment_id = segment_id
self.entrypoint_offset = offset
## Segment querying related operations
class BinaryFileOptions:
is_binary_file = True
processor_id: Optional[int] = None
load_address: Optional[int] = None
entrypoint_segment_id: int = 0
entrypoint_offset: Optional[int] = None
|
|
#!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under both the Apache 2.0 license (found in the
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
# in the COPYING file in the root directory of this source tree).
# You may select, at your option, one of the above-listed licenses.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import subprocess
import sys
import time
try:
import argparse
except ImportError:
print("Cannot import argparse.")
exit(1)
# Import the testing utils
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../tests/")
import utils
KB = 1024 * 1024
RANGES = {
"colors": (utils.blue, utils.green, utils.yellow, utils.red),
"utilization": (8, 20, 50),
"cpu_time": (0.4, 1, 10),
"memory": (8 * KB, 12 * KB, 24 * KB),
"fds": (10, 20, 50),
"duration": (0.8, 1, 3),
}
def check_leaks_linux(shell, query, count=1, supp_file=None):
"""Run valgrind using the shell and a query, parse leak reports."""
suppressions = "" if supp_file is None else "--suppressions=%s" % supp_file
cmd = [
"valgrind",
"--tool=memcheck",
suppressions,
shell,
"--profile",
"%d" % count,
query,
"--disable_extensions",
]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
_, stderr = proc.communicate()
summary = {
"definitely": None,
"indirectly": None,
"possibly": None,
}
if args.verbose:
print(stderr)
for line in stderr.split("\n"):
for key in summary:
if line.find(key) >= 0:
summary[key] = line.split(":")[1].strip()
if summary["definitely"] is None:
raise Exception("Could not execute valgrind correctly")
return summary
def check_leaks_darwin(shell, query, count=1):
# Run the shell with a --delay flag such that leaks can attach before exit.
proc = subprocess.Popen(
[shell, "--profile", str(count), "--profile_delay", "1", query],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
leak_checks = None
while proc.poll() is None:
# Continue to run leaks until the monitored shell exits.
leaks = subprocess.Popen(
["leaks", "%s" % proc.pid],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = leaks.communicate()
if args.verbose:
print(stdout)
try:
for line in stdout.split("\n"):
if line.find("total leaked bytes") >= 0:
leak_checks = line.split(":")[1].strip()
except:
print("Encountered exception while running leaks:")
print(stdout)
return {"definitely": leak_checks}
def check_leaks(shell, query, count=1, supp_file=None):
if utils.platform() == "darwin":
return check_leaks_darwin(shell, query, count=count)
else:
return check_leaks_linux(shell, query, count=count, supp_file=supp_file)
def profile_leaks(shell, queries, count=1, rounds=1, supp_file=None):
report = {}
for name, query in queries.iteritems():
print("Analyzing leaks in query: %s" % query)
# Apply count (optionally run the query several times).
summary = check_leaks(shell, query, count, supp_file)
display = []
for key in summary:
output = summary[key]
if output is not None and output[0] != "0":
# Add some fun colored output if leaking.
if key == "definitely":
output = utils.red(output)
report[name] = "LEAKING"
if key == "indirectly":
output = utils.yellow(output)
report[name] = "WARNING"
elif name not in report.keys():
report[name] = "SAFE"
display.append("%s: %s" % (key, output))
print(" %s" % "; ".join(display))
return report
def run_query(shell, query, timeout=0, count=1):
"""Execute the osqueryi shell in profile mode with a setup/teardown delay."""
start_time = time.time()
return utils.profile_cmd([
shell,
"--profile",
str(count),
"--profile_delay",
"1",
query,
"--disable_extensions",
], timeout=timeout, count=count)
def summary_line(name, result):
if not args.n:
for key, v in result.iteritems():
print("%s" % (
RANGES["colors"][v[0]]("%s:%s" % (
key[0].upper(), v[0]))),
end="")
print(" ", end="")
print("%s:" % name, end=" ")
for key, v in result.iteritems():
print("%s: %s" % (key, v[1]), end=" ")
print("")
def summary(results, display=False):
"""Map the results to simple thresholds."""
def rank(value, ranges):
for i, r in enumerate(ranges):
if value < r:
return i
return len(ranges)
summary_results = {}
for name, result in results.iteritems():
failed = "exit" in result and result["exit"] > 0
summary_result = {}
for key in RANGES:
if key == "colors":
continue
if key not in result:
continue
if failed:
summary_result[key] = (len(RANGES["colors"]) - 1, -1)
else:
summary_result[key] = (rank(result[key], RANGES[key]),
result[key])
if display and not args.check:
summary_line(name, summary_result)
summary_results[name] = summary_result
return summary_results
def profile(shell, queries, timeout=0, count=1, rounds=1):
report = {}
for name, query in queries.iteritems():
forced = True if name == "force" else False
if not forced:
print("Profiling query: %s" % query)
results = {}
for i in range(rounds):
if forced:
result = utils.profile_cmd(shell, shell=True,
timeout=timeout, count=count)
else:
result = run_query(shell, query, timeout=timeout, count=count)
summary(
{"%s (%d/%d)" % (name, i + 1, rounds): result}, display=True)
# Store each result round to return an average.
for k, v in result.iteritems():
results[k] = results.get(k, [])
results[k].append(v)
average_results = {}
for k in results:
average_results[k] = sum(results[k]) / len(results[k])
report[name] = average_results
if rounds > 1:
summary({"%s avg" % name: report[name]}, display=True)
return report
def compare(profile1, profile2):
"""Compare two jSON profile outputs."""
for table in profile1:
if table not in profile2:
# No comparison possible
continue
summary_line(table, profile1[table])
summary_line(table, profile2[table])
def regress_check(profile1, profile2):
regressed = False
for table in profile1:
if table not in profile2:
continue
for measure in profile1[table]:
if profile2[table][measure][0] > profile1[table][measure][0]:
print("%s %s has regressed (%s->%s)!" % (table, measure,
profile1[table][measure][0], profile2[table][measure][0]))
regressed = True
if not regressed:
print("No regressions!")
return 0
return 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"Profile osquery, individual tables, "
"or a set of osqueryd config queries."
))
parser.add_argument(
"-n", action="store_true", default=False,
help="Do not output colored ranks."
)
parser.add_argument(
"--verbose", action="store_true", default=False, help="Be verbose.")
parser.add_argument(
"--leaks", default=False, action="store_true",
help="Check for memory leaks instead of performance."
)
group = parser.add_argument_group("Query Options:")
group.add_argument(
"--restrict", metavar="LIST", default="",
help="Limit to a list of comma-separated tables."
)
group.add_argument(
"--tables", metavar="PATH", default="./specs",
help="Path to the osquery table specs."
)
group.add_argument(
"--config", metavar="FILE", default=None,
help="Use scheduled queries from a config."
)
group.add_argument(
"--query", metavar="STRING", default=None,
help="Profile a single query."
)
group = parser.add_argument_group("Run Options:")
group.add_argument(
"--timeout", metavar="N", default=0, type=int,
help="Max seconds a query may run --count times."
)
group.add_argument(
"--count", metavar="N", default=1, type=int,
help="Run the query N times serially."
)
group.add_argument(
"--rounds", metavar="N", default=1, type=int,
help="Run the profile for N rounds and use the average."
)
group.add_argument(
"--shell", metavar="PATH", default="./build/%s/osquery/osqueryi" % (
utils.platform()),
help="Path to osqueryi shell (./build/<sys>/osquery/osqueryi)."
)
group.add_argument(
"--force", action="store_true", default=False,
help="Force run the target of shell",
)
group = parser.add_argument_group("Performance Options:")
group.add_argument(
"--output", metavar="FILE", default=None,
help="Write JSON performance output to file."
)
group.add_argument(
"--check", metavar="OLD_OUTPUT", nargs=1,
help="Check regressions using an existing output."
)
group.add_argument(
"--compare", metavar="FILE", nargs=2,
help="Compare existing performance outputs (old, new)."
)
group = parser.add_argument_group("Memory Options:")
group.add_argument(
"--suppressions", metavar="SUPP", default="./tools/analysis/valgrind.supp",
help="Add a suppressions files to memory leak checking (linux only)."
)
args = parser.parse_args()
if args.compare:
with open(args.compare[0]) as fh:
profile1 = json.loads(fh.read())
with open(args.compare[1]) as fh:
profile2 = json.loads(fh.read())
compare(profile1, profile2)
exit(0)
if args.check:
with open(args.check[0]) as fh:
profile1 = json.loads(fh.read())
if not args.force and not os.path.exists(args.shell):
print("Cannot find --shell: %s" % (args.shell))
exit(1)
if args.config is None and not os.path.exists(args.tables):
print("Cannot find --tables: %s" % (args.tables))
exit(1)
queries = {}
if args.config is not None:
if not os.path.exists(args.config):
print("Cannot find --config: %s" % (args.config))
exit(1)
queries = utils.queries_from_config(args.config)
# Search queries in subdirectory ".d" based on the config filename
if os.path.isdir(args.config + ".d"):
for config_file in os.listdir(args.config + ".d"):
queries.update(utils.queries_from_config(os.path.join(
args.config + ".d", config_file)))
elif args.query is not None:
queries["manual"] = args.query
elif args.force:
queries["force"] = True
else:
queries = utils.queries_from_tables(args.tables, args.restrict)
if args.leaks:
results = profile_leaks(
args.shell, queries, count=args.count,
rounds=args.rounds, supp_file=args.suppressions
)
else:
# Start the profiling!
results = profile(
args.shell, queries,
timeout=args.timeout, count=args.count, rounds=args.rounds
)
# Only apply checking/regressions to performance, not leaks.
if args.check:
exit(regress_check(profile1, summary(results)))
if args.output is not None:
with open(args.output, "w") as fh:
if args.leaks:
# Leaks report does not need a summary view.
fh.write(json.dumps(results, indent=1))
else:
fh.write(json.dumps(summary(results), indent=1))
print("Wrote output summary: %s" % args.output)
if args.leaks:
for name in results.keys():
if results[name] != "SAFE":
sys.exit(1)
sys.exit(0)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import webob
from nova.api.openstack.compute.contrib import quotas as quotas_v2
from nova.api.openstack.compute.plugins.v3 import quota_sets as quotas_v21
from nova.api.openstack import extensions
from nova import context as context_maker
from nova import exception
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
def quota_set(id, include_server_group_quotas=True):
res = {'quota_set': {'id': id, 'metadata_items': 128,
'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
'key_pairs': 100, 'injected_file_path_bytes': 255}}
if include_server_group_quotas:
res['quota_set']['server_groups'] = 10
res['quota_set']['server_group_members'] = 10
return res
class BaseQuotaSetsTest(test.TestCase):
def _is_v20_api_test(self):
# NOTE(oomichi): If a test is for v2.0 API, this method returns
# True. Otherwise(v2.1 API test), returns False.
return (self.plugin == quotas_v2)
def get_update_expected_response(self, base_body):
# NOTE(oomichi): "id" parameter is added to a response of
# "update quota" API since v2.1 API, because it makes the
# API consistent and it is not backwards incompatible change.
# This method adds "id" for an expected body of a response.
if self._is_v20_api_test():
expected_body = base_body
else:
expected_body = copy.deepcopy(base_body)
expected_body['quota_set'].update({'id': 'update_me'})
return expected_body
def setup_mock_for_show(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
def setup_mock_for_update(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
def get_delete_status_int(self, res):
if self._is_v20_api_test():
return res.status_int
else:
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
return self.controller.delete.wsgi_code
class QuotaSetsTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
validation_error = exception.ValidationError
include_server_group_quotas = True
def setUp(self):
super(QuotaSetsTestV21, self).setUp()
self._setup_controller()
self.default_quotas = {
'instances': 10,
'cores': 20,
'ram': 51200,
'floating_ips': 10,
'fixed_ips': -1,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}
if self.include_server_group_quotas:
self.default_quotas['server_groups'] = 10
self.default_quotas['server_group_members'] = 10
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
def test_format_quota_set(self):
quota_set = self.controller._format_quota_set('1234',
self.default_quotas)
qs = quota_set['quota_set']
self.assertEqual(qs['id'], '1234')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['fixed_ips'], -1)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
if self.include_server_group_quotas:
self.assertEqual(qs['server_groups'], 10)
self.assertEqual(qs['server_group_members'], 10)
def test_validate_quota_limit(self):
resource = 'fake'
# Valid - finite values
self.assertIsNone(self.controller._validate_quota_limit(resource,
50, 10, 100))
# Valid - finite limit and infinite maximum
self.assertIsNone(self.controller._validate_quota_limit(resource,
50, 10, -1))
# Valid - infinite limit and infinite maximum
self.assertIsNone(self.controller._validate_quota_limit(resource,
-1, 10, -1))
# Valid - all infinite
self.assertIsNone(self.controller._validate_quota_limit(resource,
-1, -1, -1))
# Invalid - limit is less than -1
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, -2, 10, 100)
# Invalid - limit is less than minimum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, 5, 10, 100)
# Invalid - limit is greater than maximum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, 200, 10, 100)
# Invalid - infinite limit is greater than maximum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, -1, 10, 100)
# Invalid - limit is less than infinite minimum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, 50, -1, -1)
def test_quotas_defaults(self):
uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
req = fakes.HTTPRequest.blank(uri)
res_dict = self.controller.defaults(req, 'fake_tenant')
self.default_quotas.update({'id': 'fake_tenant'})
expected = {'quota_set': self.default_quotas}
self.assertEqual(res_dict, expected)
def test_quotas_show_as_admin(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
ref_quota_set = quota_set('1234', self.include_server_group_quotas)
self.assertEqual(res_dict, ref_quota_set)
def test_quotas_show_as_unauthorized_user(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_quotas_update_as_admin(self):
self.setup_mock_for_update()
self.default_quotas.update({
'instances': 50,
'cores': 50
})
body = {'quota_set': self.default_quotas}
expected_body = self.get_update_expected_response(body)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(expected_body, res_dict)
def test_quotas_update_zero_value_as_admin(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 0, 'cores': 0,
'ram': 0, 'floating_ips': 0,
'metadata_items': 0,
'injected_files': 0,
'injected_file_content_bytes': 0,
'injected_file_path_bytes': 0,
'security_groups': 0,
'security_group_rules': 0,
'key_pairs': 100, 'fixed_ips': -1}}
if self.include_server_group_quotas:
body['quota_set']['server_groups'] = 10
body['quota_set']['server_group_members'] = 10
expected_body = self.get_update_expected_response(body)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(expected_body, res_dict)
def test_quotas_update_as_user(self):
self.setup_mock_for_update()
self.default_quotas.update({
'instances': 50,
'cores': 50
})
body = {'quota_set': self.default_quotas}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body=body)
def _quotas_update_bad_request_case(self, body):
self.setup_mock_for_update()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(self.validation_error, self.controller.update,
req, 'update_me', body=body)
def test_quotas_update_invalid_key(self):
body = {'quota_set': {'instances2': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
self._quotas_update_bad_request_case(body)
def test_quotas_update_empty_body(self):
body = {}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_non_int(self):
# when PUT non integer value
self.default_quotas.update({
'instances': 'test'
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_with_float(self):
# when PUT non integer value
self.default_quotas.update({
'instances': 50.5
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_with_unicode(self):
# when PUT non integer value
self.default_quotas.update({
'instances': u'\u30aa\u30fc\u30d7\u30f3'
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_delete_as_unauthorized_user(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1234)
def test_quotas_delete_as_admin(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
context = context_maker.get_admin_context()
self.req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.req.environ['nova.context'] = context
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project")
quota.QUOTAS.destroy_all_by_project(context, 1234)
self.mox.ReplayAll()
res = self.controller.delete(self.req, 1234)
self.mox.VerifyAll()
self.assertEqual(202, self.get_delete_status_int(res))
class ExtendedQuotasTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
def setUp(self):
super(ExtendedQuotasTestV21, self).setUp()
self._setup_controller()
self.setup_mock_for_update()
fake_quotas = {'ram': {'limit': 51200,
'in_use': 12800,
'reserved': 12800},
'cores': {'limit': 20,
'in_use': 10,
'reserved': 5},
'instances': {'limit': 100,
'in_use': 0,
'reserved': 0}}
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
def fake_get_quotas(self, context, id, user_id=None, usages=False):
if usages:
return self.fake_quotas
else:
return dict((k, v['limit']) for k, v in self.fake_quotas.items())
def fake_get_settable_quotas(self, context, project_id, user_id=None):
return {
'ram': {'minimum': self.fake_quotas['ram']['in_use'] +
self.fake_quotas['ram']['reserved'],
'maximum': -1},
'cores': {'minimum': self.fake_quotas['cores']['in_use'] +
self.fake_quotas['cores']['reserved'],
'maximum': -1},
'instances': {'minimum': self.fake_quotas['instances']['in_use'] +
self.fake_quotas['instances']['reserved'],
'maximum': -1},
}
def test_quotas_update_exceed_in_used(self):
patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
get_settable_quotas = patcher.start()
body = {'quota_set': {'cores': 10}}
get_settable_quotas.side_effect = self.fake_get_settable_quotas
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
mock.patch.stopall()
def test_quotas_force_update_exceed_in_used(self):
patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
get_settable_quotas = patcher.start()
patcher = mock.patch.object(self.plugin.QuotaSetsController,
'_get_quotas')
_get_quotas = patcher.start()
body = {'quota_set': {'cores': 10, 'force': 'True'}}
get_settable_quotas.side_effect = self.fake_get_settable_quotas
_get_quotas.side_effect = self.fake_get_quotas
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.controller.update(req, 'update_me', body=body)
mock.patch.stopall()
class UserQuotasTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
include_server_group_quotas = True
def setUp(self):
super(UserQuotasTestV21, self).setUp()
self._setup_controller()
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
def test_user_quotas_show_as_admin(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
ref_quota_set = quota_set('1234', self.include_server_group_quotas)
self.assertEqual(res_dict, ref_quota_set)
def test_user_quotas_show_as_unauthorized_user(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_user_quotas_update_as_admin(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
if self.include_server_group_quotas:
body['quota_set']['server_groups'] = 10
body['quota_set']['server_group_members'] = 10
expected_body = self.get_update_expected_response(body)
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(expected_body, res_dict)
def test_user_quotas_update_as_user(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
'server_groups': 10,
'server_group_members': 10}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body=body)
def test_user_quotas_update_exceed_project(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 20}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
def test_user_quotas_delete_as_unauthorized_user(self):
self.setup_mock_for_update()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1234)
def test_user_quotas_delete_as_admin(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
context = context_maker.get_admin_context()
url = '/v2/fake4/os-quota-sets/1234?user_id=1'
self.req = fakes.HTTPRequest.blank(url)
self.req.environ['nova.context'] = context
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project_and_user")
quota.QUOTAS.destroy_all_by_project_and_user(context, 1234, '1')
self.mox.ReplayAll()
res = self.controller.delete(self.req, 1234)
self.mox.VerifyAll()
self.assertEqual(202, self.get_delete_status_int(res))
class QuotaSetsTestV2(QuotaSetsTestV21):
plugin = quotas_v2
validation_error = webob.exc.HTTPBadRequest
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
AndReturn(self.include_server_group_quotas)
self.mox.ReplayAll()
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
self.mox.ResetAll()
# NOTE: The following tests are tricky and v2.1 API does not allow
# this kind of input by strong input validation. Just for test coverage,
# we keep them now.
def test_quotas_update_invalid_value_json_fromat_empty_string(self):
self.setup_mock_for_update()
self.default_quotas.update({
'instances': 50,
'cores': 50
})
expected_resp = {'quota_set': self.default_quotas}
# when PUT JSON format with empty string for quota
body = copy.deepcopy(expected_resp)
body['quota_set']['ram'] = ''
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected_resp)
def test_quotas_update_invalid_value_xml_fromat_empty_string(self):
self.default_quotas.update({
'instances': 50,
'cores': 50
})
expected_resp = {'quota_set': self.default_quotas}
# when PUT XML format with empty string for quota
body = copy.deepcopy(expected_resp)
body['quota_set']['ram'] = {}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.setup_mock_for_update()
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected_resp)
# NOTE: os-extended-quotas and os-user-quotas are only for v2.0.
# On v2.1, these features are always enable. So we need the following
# tests only for v2.0.
def test_delete_quotas_when_extension_not_loaded(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(False)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1234)
def test_delete_user_quotas_when_extension_not_loaded(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(False)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1234)
class QuotaSetsTestV2WithoutServerGroupQuotas(QuotaSetsTestV2):
include_server_group_quotas = False
# NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
# is always enabled, so this test is only needed for v2.0
def test_quotas_update_without_server_group_quotas_extenstion(self):
self.setup_mock_for_update()
self.default_quotas.update({
'server_groups': 50,
'sever_group_members': 50
})
body = {'quota_set': self.default_quotas}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
class ExtendedQuotasTestV2(ExtendedQuotasTestV21):
plugin = quotas_v2
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
AndReturn(False)
self.mox.ReplayAll()
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
self.mox.ResetAll()
class UserQuotasTestV2(UserQuotasTestV21):
plugin = quotas_v2
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
AndReturn(self.include_server_group_quotas)
self.mox.ReplayAll()
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
self.mox.ResetAll()
class UserQuotasTestV2WithoutServerGroupQuotas(UserQuotasTestV2):
include_server_group_quotas = False
# NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
# is always enabled, so this test is only needed for v2.0
def test_user_quotas_update_as_admin_without_sg_quota_extension(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
'server_groups': 100,
'server_group_members': 200}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
|
|
#-------------------------------------------------------------------------------
#
# Vector Geometry Manipulations
#
# Project: XML Metadata Handling
# Authors: Martin Paces <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import re
import sys
import math as m
import numpy as np
from collections import Iterable
from osgeo import ogr ; ogr.UseExceptions()
from osgeo import osr ; osr.UseExceptions()
_gerexURL = re.compile(r"^http://www.opengis.net/def/crs/epsg/\d+\.?\d*/(\d+)$", re.IGNORECASE)
_gerexURN = re.compile(r"^urn:ogc:def:crs:epsg:\d*\.?\d*:(\d+)$", re.IGNORECASE)
_gerexShortCode = re.compile(r"^epsg:(\d+)$", re.IGNORECASE)
#-------------------------------------------------------------------------------
# coordinate transformation
RO = ['readonly']
WO = ['writeonly', 'allocate']
class CTransform(object):
def __init__(self, sr_src, sr_dst):
self._ct = osr.CoordinateTransformation(sr_src, sr_dst)
def __call__(self, xarr, yarr):
if hasattr(np, 'nditer') and isinstance(xarr, np.ndarray) and isinstance(yarr, np.ndarray):
# NumPy array
if xarr.shape != yarr.shape:
raise ValueError("Array shape mismatch!")
itr = np.nditer([xarr, yarr, None, None], [], [RO, RO, WO, WO])
for x, y, u, v in itr:
u[...], v[...], _ = self._ct.TransformPoint(float(x), float(y))
return itr.operands[2], itr.operands[3]
elif isinstance(xarr, Iterable) and isinstance(xarr, Iterable):
# generic iterables + NumPy prior 'np.nditer'
u, v = [], []
for x, y in zip(xarr, yarr):
_u, _v, _ = self._ct.TransformPoint(float(x), float(y))
u.append(_u)
v.append(_v)
return u, v
else: # assuming scalar values
return self._ct.TransformPoint(float(xarr), float(yarr))[0:2]
#-------------------------------------------------------------------------------
# spatial references
# the most common spatial references
def createSRFromEPSG(epsg):
""" Create OSR Spatial Reference from EPSG number code"""
sr = osr.SpatialReference()
sr.ImportFromEPSG(epsg)
return sr
OSR_WGS84 = createSRFromEPSG(4326)
OSR_USP_N = createSRFromEPSG(32661)
OSR_USP_N = createSRFromEPSG(32761)
OSR_UTM_N = tuple(createSRFromEPSG(32601+i) for i in xrange(60))
OSR_UTM_S = tuple(createSRFromEPSG(32701+i) for i in xrange(60))
def setSR(geom, sr):
"""Assing spatial reference to a geometry and return it."""
geom.AssignSpatialReference(sr)
return geom
def parseSR(srs, debug=False):
if debug:
print >>sys.stderr, "SRS: ", srs
for regex in (_gerexShortCode, _gerexURN, _gerexURL):
match = regex.match(srs)
if match is not None:
return createSRFromEPSG(int(match.group(1)))
if srs[:7] == "PROJCS[":
return osr.SpatialReference(srs)
if srs in (None, "", "NONE"):
return None
raise ValueError("Failed to parse the spatial reference! SRS='%s'"%(srs))
def dumpSR(sr, delimiter="", debug=False):
# check whether geometry has a spatial reference
if sr is not None:
an, ac = (sr.GetAuthorityName(None), sr.GetAuthorityCode(None))
if an == "EPSG" and ac > 0:
#out = "%s:%s%s"%(an, ac, delimiter)
out = "urn:ogc:def:crs:%s:6.3:%s"%(an, ac)
else:
print >>sys.stderr, "WARNING: Unsupported projection! %s"%(sr.ExportToWkt())
out = ""
else:
out = ""
return out
#-------------------------------------------------------------------------------
# File I/O subroutines
def parseGeom(buf, debug=False):
""" parse geometry from a source buffer """
# parse prefix
if buf.startswith("EPSG:") or buf.startswith("PROJCS["):
srs, _, buf = buf.partition(';')
sr = parseSR(srs)
else:
sr = None
# create the geometry
for loader in (ogr.CreateGeometryFromWkb,
ogr.CreateGeometryFromWkt,
ogr.CreateGeometryFromGML,
ogr.CreateGeometryFromJson):
try:
if debug:
print >>sys.stderr, "LOADER: ", loader,
geom = loader(buf)
except Exception as e:
if debug:
print >>sys.stderr, e
continue
if debug:
print >>sys.stderr, "OK"
break
else:
raise ValueError("ERROR: Failed to parse the source geometry!")
if sr is not None:
geom.AssignSpatialReference(sr)
return geom
#OUTPUT_FORMATS = ("WKB", "WKT", "JSON", "GML", "KML")
OUTPUT_FORMATS = ("WKB", "WKT", "JSON", "KML")
def dumpGeom(geom, format="WKB", debug=False):
""" dump geometry to a buffer possible formats are: WKB(*)|WKT|JSON|GML|KML """
# dump SRS prefix
prefix = dumpSR(geom.GetSpatialReference(), ";", debug)
if format == "WKB":
data = geom.ExportToWkb()
if prefix:
data = "%s%s"%(prefix, data)
elif format == "WKT":
data = "%s%s\n"%(prefix, geom.ExportToWkt())
elif format == "JSON":
data = geom.ExportToJson()
# the GML needs to be verified
# elif format == "GML":
# data = geom.ExportToGML()
elif format == "KML":
data = geom.ExportToKML()
else:
raise ValueError("Invalid format specification! FORMAT='%s'"%(format))
return data
#-------------------------------------------------------------------------------
def wrapArroundDateLine(geom, (xmin, ymin, xmax, ymax), nstep=200):
"""
wrap (split) geometry arround the date-line
nstep controls the split border segmentation (dy = (ymax-ymin)/nstep)
"""
xdif = xmax - xmin
step = (ymax - ymin) / nstep
x0, x1, _, _ = geom.GetEnvelope()
p_start = int(m.floor((x0-xmin)/xdif))
p_stop = int(m.ceil((x1-xmin)/xdif))
# skip geometries falling to a regular domain
if (p_start == 0) and (p_stop == 1):
return geom
# wrap-arround
lgeom = []
for p in xrange(p_start, p_stop):
offset = p*xdif
clip = getRectangle((xmin+offset, ymin, xmax+offset, ymax), step)
tmp = geom.Intersection(clip)
tmp = shiftGeom(tmp, (-offset, 0.0))
lgeom.extend(extractPolygons(tmp))
return groupPolygons(lgeom)
def wrapArroundWGS84(geom, nstep=200):
"""
logitude wrap-arround of geometry in WGS84
nstep controls the split border segmentation (dy = (ymax-ymin)/nstep)
eqivalent to:
wrapArroundDateLine(geom, (-180., -90., +180., +90.), nstep)
"""
return wrapArroundDateLine(geom, (-180., -90., +180., +90.), nstep)
#-------------------------------------------------------------------------------
def mapToWGS84(geom):
def between(v, (v0, v1)):
if v0 <= v1:
return (v0 <= v)and(v1 >= v)
else: #v1 > v0
return (v1 <= v)and(v0 >= v)
def extent_contains(x0, y0):
return ((x0_min <= x0)and(x0_max >= x0)
and(y0_min <= y0)and(y0_max >= y0))
def generate_polar_section(north, east):
eps = 1e-9
y00 = 89 # max. opposite pole lat.distnace from the equator
x0 = 0 if east else -180
y0 = (-y00) if north else (+y00)
y1 = (90-eps) if north else (eps-90)
lr = ogr.Geometry(type=ogr.wkbLinearRing)
for i in xrange(31):
lr.AddPoint_2D(i*6+x0, y0)
lr.AddPoint_2D(180+x0, y1)
lr.AddPoint_2D(x0, y1)
lr.AddPoint_2D(x0, y0)
p = ogr.Geometry(type=ogr.wkbPolygon)
p.AddGeometry(lr)
p.AssignSpatialReference(OSR_WGS84)
return p
def fix_dateline(geom, east):
"""fix the +/-180dg ambiguity of the date-line nodes"""
def _dlflip_east((x, y, _)): # date-line point flipper
return (x+360.0 if x < -179.0 else x, y)
def _dlflip_west((x, y, _)): # date-line point flipper
return (x-360.0 if x > (+179.0) else x, y)
return Transfomer(_dlflip_east if east else _dlflip_west)(geom)
def transform_polar(north):
# generate polygon spliting the polar geometry to halves
s1 = generate_polar_section(north, east=True)
s2 = generate_polar_section(north, east=False)
# transform coordinates
s1.Transform(ct_rev)
s2.Transform(ct_rev)
# split the polar geometry to halves
g1 = geom.Intersection(s1)
g2 = geom.Intersection(s2)
# transform halves to the target projection
g1.Transform(ct_fwd)
g2.Transform(ct_fwd)
# fix the dateline ambiguity
g1 = fix_dateline(g1, east=True)
g2 = fix_dateline(g2, east=False)
# return the unified geometry
return g1.Union(g2)
#--------------------------------------------------------------------------
sr_src = geom.GetSpatialReference()
sr_dst = OSR_WGS84
# coordinate transformation objects
ct_fwd = osr.CoordinateTransformation(sr_src, sr_dst)
ct_rev = osr.CoordinateTransformation(sr_dst, sr_src)
# envelope and centroid in the source coordinates
x0_min, x0_max, y0_min, y0_max = geom.GetEnvelope()
# centroid
x0_cnt, y0_cnt = 0.5*(x0_min+x0_max), 0.5*(y0_min+y0_max)
# try to get coordinates of the north and south pole in the source CRS
try:
xy0_np = ct_rev.TransformPoint(0.0, 90.0)[:2]
except RuntimeError:
xy0_np = None
try:
xy0_sp = ct_rev.TransformPoint(0.0, -90.0)[:2]
except RuntimeError:
xy0_sp = None
# case #1 - extent contains the north pole
if xy0_np and extent_contains(*xy0_np):
return setSR(transform_polar(north=True), OSR_WGS84)
# case #2 - extent contains the south pole
# check whether the extent contains the south pole
elif xy0_sp and extent_contains(*xy0_sp):
return setSR(transform_polar(north=False), OSR_WGS84)
# case #3 proceed with the date-line handling
# perform transformation
geom.Transform(ct_fwd)
# get extent and centroid in the target coordinates
x1_min, _, _ = ct_fwd.TransformPoint(x0_min, y0_cnt)
x1_max, _, _ = ct_fwd.TransformPoint(x0_max, y0_cnt)
x1_cnt, _, _ = ct_fwd.TransformPoint(x0_cnt, y0_cnt)
# fix the wild easting wrap-arround
if not between(x1_cnt, (x1_min, x1_max)):
if x1_max < x1_min: # axis orientation preserved
x_cnt, x_min, x_max = x1_cnt, x1_min, x1_max
else: # (x1_min < x1_max) # flipped axis orientation
x_cnt, x_min, x_max = x1_cnt, x1_max, x1_min
# point unwrapping fuctions
if x_cnt < x_max: # EAST to WEST
def _dlflip(p):
return (p[0]-360*(p[0] > x_max), p[1])
elif x_cnt > x_min: # WEST to EAST
def _dlflip(p):
return (p[0]+360*(p[0] < x_min), p[1])
geom = setSR(Transfomer(_dlflip)(geom), OSR_WGS84)
# perform proper wrapparround
return setSR(wrapArroundDateLine(geom, (-180, -90, 180, 90), 1), OSR_WGS84)
#-------------------------------------------------------------------------------
def groupPolygons(plist):
""" group polygons to a multi-polygon """
mp = ogr.Geometry(ogr.wkbMultiPolygon)
for p in plist:
mp.AddGeometry(p)
return mp
def ungroupMultiPolygon(mpol):
""" un-group multi-polygon to a list of multi-polygons """
return [mpol.GetGeometryRef(i) for i in xrange(mpol.GetGeometryCount())]
def extractPolygons(geom):
if geom.GetGeometryName() == "GEOMETRYCOLLECTION":
l = []
for i in xrange(geom.GetGeometryCount()):
l.extend(extractPolygons(geom.GetGeometryRef(i)))
return l
elif geom.GetGeometryName() == "MULTIPOLYGON":
return ungroupMultiPolygon(geom)
elif geom.GetGeometryName() == "POLYGON":
return [geom]
else:
return []
def getRectangle((x_min, y_min, x_max, y_max), step=1.0):
""" Create rectangle polygon with the edges broken to smaller
line segments. The size of the lenght line segments is approx.
the value of the step parameter
"""
n_x = max(1, int(m.ceil((max(x_min, x_max)-min(x_min, x_max))/float(step))))
n_y = max(1, int(m.ceil((max(y_min, y_max)-min(y_min, y_max))/float(step))))
lx = []
ly = []
# generate polygon
lx.append(np.linspace(x_min, x_max, n_x, False))
ly.append(np.ones(n_x)*y_min)
lx.append(np.ones(n_y)*x_max)
ly.append(np.linspace(y_min, y_max, n_y, False))
lx.append(np.linspace(x_max, x_min, n_x, False))
ly.append(np.ones(n_x)*y_max)
lx.append(np.ones(n_y)*x_min)
ly.append(np.linspace(y_max, y_min, n_y, False))
# close ring
lx.append(np.array([x_min]))
ly.append(np.array([y_min]))
# concatenate arrays
x = np.concatenate(lx)
y = np.concatenate(ly)
# convert to polygon
r = ogr.Geometry(ogr.wkbLinearRing)
for xx, yy in zip(x, y):
r.AddPoint_2D(xx, yy)
p = ogr.Geometry(ogr.wkbPolygon)
p.AddGeometry(r)
return p
def shiftGeom(g, (dx, dy)):
""" shift geometry by a given offset """
def _shift(p, (dx, dy)):
return (p[0]+dx, p[1]+dy)
t = Transfomer(_shift)
return t(g, (dx, dy))
#------------------------------------------------------------------------------
class Transfomer(object):
def __init__(self, f):
self.__f = f
def __call__(self, g0, *prm, **kw):
return self._geometry(g0, *prm, **kw)
def _geometry(self, g0, *prm, **kw):
#print g0.GetGeometryName(), g0.GetGeometryType()
if g0.GetGeometryName() == "MULTIPOLYGON":
return self._multi_polygon(g0, *prm, **kw)
elif g0.GetGeometryName() == "POLYGON":
return self._polygon(g0, *prm, **kw)
elif g0.GetGeometryName() == "LINEARRING":
return self._linear_ring(g0, *prm, **kw)
else:
return None
def _linear_ring(self, r0, *prm, **kw):
#print r0.GetGeometryName(), r0.GetGeometryType()
if r0.GetGeometryName() != "LINEARRING":
return None
r1 = ogr.Geometry(ogr.wkbLinearRing)
for i in xrange(r0.GetPointCount()):
rv = (self.__f)(r0.GetPoint(i), *prm, **kw)
if rv is not None:
r1.AddPoint_2D(*rv)
return r1
def _polygon(self, p0, *prm, **kw):
#print p0.GetGeometryName(), p0.GetGeometryType()
if p0.GetGeometryName() != "POLYGON":
return None
p1 = ogr.Geometry(ogr.wkbPolygon)
for i in xrange(p0.GetGeometryCount()):
rv = self._linear_ring(p0.GetGeometryRef(i), *prm, **kw)
if rv is not None:
p1.AddGeometry(rv)
return p1
def _multi_polygon(self, m0, *prm, **kw):
#print m0.GetGeometryName(), m0.GetGeometryType()
if m0.GetGeometryName() != "MULTIPOLYGON":
return None
m1 = ogr.Geometry(ogr.wkbMultiPolygon)
for i in xrange(m0.GetGeometryCount()):
rv = self._polygon(m0.GetGeometryRef(i), *prm, **kw)
if rv is not None:
m1.AddGeometry(rv)
return m1
|
|
from nose.tools import ok_, eq_, raises
from flask import Flask, request
from flask.views import MethodView
from flask.ext.admin import base
class MockView(base.BaseView):
# Various properties
allow_call = True
allow_access = True
@base.expose('/')
def index(self):
return 'Success!'
@base.expose('/test/')
def test(self):
return self.render('mock.html')
def _handle_view(self, name, **kwargs):
if self.allow_call:
return super(MockView, self)._handle_view(name, **kwargs)
else:
return 'Failure!'
def is_accessible(self):
if self.allow_access:
return super(MockView, self).is_accessible()
else:
return False
class MockMethodView(base.BaseView):
@base.expose('/')
def index(self):
return 'Success!'
@base.expose_plugview('/_api/1')
class API1(MethodView):
def get(self, cls):
return cls.render('method.html', request=request, name='API1')
def post(self, cls):
return cls.render('method.html', request=request, name='API1')
def put(self, cls):
return cls.render('method.html', request=request, name='API1')
def delete(self, cls):
return cls.render('method.html', request=request, name='API1')
@base.expose_plugview('/_api/2')
class API2(MethodView):
def get(self, cls):
return cls.render('method.html', request=request, name='API2')
def post(self, cls):
return cls.render('method.html', request=request, name='API2')
def test_baseview_defaults():
view = MockView()
eq_(view.name, None)
eq_(view.category, None)
eq_(view.endpoint, None)
eq_(view.url, None)
eq_(view.static_folder, None)
eq_(view.admin, None)
eq_(view.blueprint, None)
def test_base_defaults():
admin = base.Admin()
eq_(admin.name, 'Admin')
eq_(admin.url, '/admin')
eq_(admin.endpoint, 'admin')
eq_(admin.app, None)
ok_(admin.index_view is not None)
eq_(admin.index_view._template, 'admin/index.html')
# Check if default view was added
eq_(len(admin._views), 1)
eq_(admin._views[0], admin.index_view)
def test_custom_index_view():
view = base.AdminIndexView(name='a', category='b', endpoint='c',
url='/d', template='e')
admin = base.Admin(index_view=view)
eq_(admin.endpoint, 'c')
eq_(admin.url, '/d')
ok_(admin.index_view is view)
eq_(view.name, 'a')
eq_(view.category, 'b')
eq_(view._template, 'e')
# Check if view was added
eq_(len(admin._views), 1)
eq_(admin._views[0], view)
def test_base_registration():
app = Flask(__name__)
admin = base.Admin(app)
eq_(admin.app, app)
ok_(admin.index_view.blueprint is not None)
def test_admin_customizations():
app = Flask(__name__)
admin = base.Admin(app, name='Test', url='/foobar')
eq_(admin.name, 'Test')
eq_(admin.url, '/foobar')
client = app.test_client()
rv = client.get('/foobar/')
eq_(rv.status_code, 200)
def test_baseview_registration():
admin = base.Admin()
view = MockView()
bp = view.create_blueprint(admin)
# Base properties
eq_(view.admin, admin)
ok_(view.blueprint is not None)
# Calculated properties
eq_(view.endpoint, 'mockview')
eq_(view.url, '/admin/mockview')
eq_(view.name, 'Mock View')
# Verify generated blueprint properties
eq_(bp.name, view.endpoint)
eq_(bp.url_prefix, view.url)
eq_(bp.template_folder, 'templates')
eq_(bp.static_folder, view.static_folder)
# Verify customizations
view = MockView(name='Test', endpoint='foobar')
view.create_blueprint(base.Admin())
eq_(view.name, 'Test')
eq_(view.endpoint, 'foobar')
eq_(view.url, '/admin/foobar')
view = MockView(url='test')
view.create_blueprint(base.Admin())
eq_(view.url, '/admin/test')
view = MockView(url='/test/test')
view.create_blueprint(base.Admin())
eq_(view.url, '/test/test')
def test_baseview_urls():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
eq_(len(view._urls), 2)
@raises(Exception)
def test_no_default():
app = Flask(__name__)
admin = base.Admin(app)
admin.add_view(base.BaseView())
def test_call():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/')
eq_(rv.status_code, 200)
rv = client.get('/admin/mockview/')
eq_(rv.data, 'Success!')
rv = client.get('/admin/mockview/test/')
eq_(rv.data, 'Success!')
# Check authentication failure
view.allow_call = False
rv = client.get('/admin/mockview/')
eq_(rv.data, 'Failure!')
def test_permissions():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
client = app.test_client()
view.allow_access = False
rv = client.get('/admin/mockview/')
eq_(rv.status_code, 404)
def test_submenu():
app = Flask(__name__)
admin = base.Admin(app)
admin.add_view(MockView(name='Test 1', category='Test', endpoint='test1'))
# Second view is not normally accessible
view = MockView(name='Test 2', category='Test', endpoint='test2')
view.allow_access = False
admin.add_view(view)
ok_('Test' in admin._menu_categories)
eq_(len(admin._menu), 2)
eq_(admin._menu[1].name, 'Test')
eq_(len(admin._menu[1]._children), 2)
# Categories don't have URLs and they're not accessible
eq_(admin._menu[1].get_url(), None)
eq_(admin._menu[1].is_accessible(), False)
eq_(len(admin._menu[1].get_children()), 1)
def test_delayed_init():
app = Flask(__name__)
admin = base.Admin()
admin.add_view(MockView())
admin.init_app(app)
client = app.test_client()
rv = client.get('/admin/mockview/')
eq_(rv.data, 'Success!')
def test_multi_instances_init():
app = Flask(__name__)
admin = base.Admin(app)
class ManageIndex(base.AdminIndexView):
pass
manage = base.Admin(app, index_view=ManageIndex(url='/manage', endpoint='manage'))
@raises(Exception)
def test_double_init():
app = Flask(__name__)
admin = base.Admin(app)
admin.init_app(app)
def test_nested_flask_views():
app = Flask(__name__)
admin = base.Admin(app)
view = MockMethodView()
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/mockmethodview/_api/1')
assert rv.data == 'GET - API1'
rv = client.put('/admin/mockmethodview/_api/1')
assert rv.data == 'PUT - API1'
rv = client.post('/admin/mockmethodview/_api/1')
assert rv.data == 'POST - API1'
rv = client.delete('/admin/mockmethodview/_api/1')
assert rv.data == 'DELETE - API1'
rv = client.get('/admin/mockmethodview/_api/2')
assert rv.data == 'GET - API2'
rv = client.post('/admin/mockmethodview/_api/2')
assert rv.data == 'POST - API2'
rv = client.delete('/admin/mockmethodview/_api/2')
assert rv.status_code == 405
rv = client.put('/admin/mockmethodview/_api/2')
assert rv.status_code == 405
|
|
"""
Reports base classes. This reports module tries to provide an ORM agnostic reports engine that will allow nice reports
to be generated and exportable in a variety of formats. It seeks to be easy to use with query sets, raw SQL, or pure
python. An additional goal is to have the reports be managed by model instances as well (e.g. a generic SQL based
report that can be done in the backend).
"""
from django import forms
from django.db.models.fields.related import RelatedField
from .filtercontrols import *
from .outputformats import *
import datetime
# Pulled from vitalik's Django-reporting
def get_model_field(model, name):
"""
Gets a field from a Django model.
:param model: A Django model, this should be the class itself.
:param name: A Django model's field.
:return: The field from the model, a subclass of django.db.models.Model
"""
return model._meta.get_field(name)
# Based on vitalik's Django-reporting
def get_lookup_field(model, original, lookup):
"""
Gets a lookup field from a django model, this recursively follows relations
that are indicated by Django's __ notation.
If there were a model like Customer -> Address -> Street (where even street is a model),
calling get_lookup_field(Customer, "address__street__line1") would return
(line1 (a CharField), and Street (a subclass of Model))
:param model: A django model, this should be the actual Model class.
:param original: A django model, this should be the initial model class.
It seems this is not used by the function.
:param lookup: The django lookup string, delimited by __
:return: A tuple of (field, model) where model is a subclass of django.db.models.Model and field is a
subclass of django.db.models.fields.Field
"""
parts = lookup.split('__')
field = get_model_field(model, parts[0])
if not isinstance(field, RelatedField) or len(parts) == 1:
return field,model
rel_model = field.rel.to
next_lookup = '__'.join(parts[1:])
return get_lookup_field(rel_model, original, next_lookup)
class Report(object):
"""
An abstract reportengine report. Concrete report types inherit from this. Override get_rows to make this concrete.
For Example::
class MyReport(Report):
def get_rows(self, *args, **kwargs):
return [(x,x*10) for x in range(0,100)], (('total', 100),)
"""
verbose_name="Abstract Report"
namespace = "Default"
slug ="base"
labels = None
per_page=100
can_show_all=True
output_formats=[AdminOutputFormat(),CSVOutputFormat()]
if XLS_AVAILABLE:
output_formats.append(XLSOutputFormat())
allow_unspecified_filters = False
date_field = None # if specified will lookup for this date field. .this is currently limited to queryset based lookups
default_mask = {} # a dict of filter default values. Can be callable
# TODO add charts = [ {'name','type e.g. bar','data':(0,1,3) cols in table}]
# then i can auto embed the charts at the top of the report based upon that data..
def get_default_mask(self):
"""
Builds default mask. The filter is merged with this to create the filter for the report. Items can be
callable and will be resolved when called here (which should be at view time).
:return: a dictionary of filter key/value pairs
"""
m={}
for k in self.default_mask.keys():
v=self.default_mask[k]
m[k] = callable(v) and v() or v
return m
def get_filter_form(self, data):
"""
Returns a form with data.
:param data: Should be a dictionary, with filter data in it.
:return: A form that is ready for validation.
"""
form = forms.Form(data=data)
return form
# CONSIDER maybe an "update rows"?
# CONSIDER should the resultant rows be a generator instead of a list?
# CONSIDER should paging be dealt with here to more intelligently handle aggregates?
def get_rows(self,filters={},order_by=None):
"""
Given filter parameters and an order by field, this returns the actual rows of the report.
:param filters: The parameters by which this report should be filtered.
:param order_by: The field by which this report should be ordered.
:return: A tuple (resultant rows, metadata)
"""
raise NotImplementedError("Subclass should return ([],('total',0),)")
# CONSIDER do this by day or by month? month seems most efficient in terms of optimizing queries
# CONSIDER - should this be removed from the API? Is it implemented by any subclasses?
def get_monthly_aggregates(self,year,month):
"""Called when assembling a calendar view of reports. This will be queried for every day, so must be quick"""
# CONSIDER worry about timezone? or just assume Django has this covered?
raise NotImplementedError("Still an idea in the works")
class QuerySetReport(Report):
"""
A report that is based on a Django ORM Queryset.
"""
# TODO make labels more addressable. now fixed to fields in model. what happens with relations?
labels = None
queryset = None
"""
list_filter must contain either ModelFields or FilterControls
"""
list_filter = []
def get_filter_form(self, data):
"""
get_filter_form constructs a filter form, with the appropriate filtercontrol fields, based on the data passed.
If the item in list_filter is a FilterControl, then the control will be added to the form filters.
If the item in list_filter is a field lookup string, then a pre-registered filtercontrol corresponding to that field
may be added to the form filters.
This will follow __ relations (see get_lookup_field docs above)
:param data: A dictionary of filter fields.
:return: A form with the filtered fields.
"""
# NOTE - get_lookup_field does follow __ relations, so not sure about the above comment.
# TODO iterate through list filter and create appropriate widget and prefill from request
form = forms.Form(data=data)
for f in self.list_filter:
# Allow specification of custom filter control, or specify field name (and label?)
if isinstance(f,FilterControl):
control=f
else:
mfi,mfm=get_lookup_field(self.queryset.model,self.queryset.model,f)
# TODO allow label as param 2
control = FilterControl.create_from_modelfield(mfi,f)
if control:
fields = control.get_fields()
form.fields.update(fields)
form.full_clean()
return form
def get_queryset(self, filters, order_by, queryset=None):
"""
Given filters, an order_by and an optional query set, this returns a queryset for this report. Override this
to change the querysets in your reports.
:param filters: A dictionary of field/value pairs that the report can be filtered on.
:param order_by: The field or statement by which this queryset should be ordered.
:param queryset: An optional queryset. If None, self.queryset will be used.
:return: A filtered and ordered queryset.
"""
if queryset is None:
queryset = self.queryset
queryset = queryset.filter(**filters)
if order_by:
queryset = queryset.order_by(order_by)
return queryset
def get_rows(self,filters={},order_by=None):
"""
Given the rows and order_by value, this returns the actual report tuple. This needn't be overriden by
subclasses unless special functionality is needed. Instead, consider overriding `get_queryset.`
:param filters: A dictionary of field/value pairs that the report can be filtered on.
:param order_by: The field or statement by which this queryset should be ordered.
:return: A tuple of rows and metadata.
"""
qs = self.get_queryset(filters, order_by)
return qs.values_list(*self.labels),(("total",qs.count()),)
class ModelReport(QuerySetReport):
"""
A report on a specific django model. Subclasses must define `model` on the class.
"""
model = None
def __init__(self):
"""
Instantiate the ModelReport
"""
super(ModelReport, self).__init__()
self.queryset = self.model.objects.all()
def get_queryset(self, filters, order_by, queryset=None):
"""
Gets a report based on the Model's fields, given filters, an order by, and an optional queryset.
:param filters: The dictionary of filters with which to filter this model report.
:param order_by: The field by which this report will be ordered.
:param queryset: An optional queryset. If none, this will use a queryset that gets all instances of
the given model.
:return: A filtered queryset.
"""
if queryset is None and self.queryset is None:
queryset = self.model.objects.all()
return super(ModelReport, self).get_queryset(filters, order_by, queryset)
class SQLReport(Report):
"""
A subclass of Report, used with raw SQL.
"""
row_sql=None # sql statement with named parameters in python syntax (e.g. "%(age)s" )
aggregate_sql=None # sql statement that brings in aggregates. pulls from column name and value for first row only
query_params=[] # list of tuples, (name,label,datatype) where datatype is a mapping to a registerd filtercontrol
#TODO this should be _private.
def get_connection(self):
"""
Gets the django database connection.
:return: The database connection.
"""
from django.db import connection
return connection
#TODO this should be _private.
def get_cursor(self):
"""
Gets the cursor for the connection.
:return: Database connection cursor
"""
return self.get_connection().cursor()
#TODO use string formatting instead of older python replacement
def get_row_sql(self, filters, order_by):
"""
This applies filters directly to the SQL string, which should contain python keyed strings.
:param filters: A dictionary of filters to apply to this sql.
:param order_by: This is ignored, but may be used by subclasses.
:return: The text-replaced SQL, or none if self.row_sql doesn't exist.
"""
if self.row_sql:
return self.row_sql % filters
return None
def get_aggregate_sql(self, filters):
"""
This applies filters to the aggregate SQL.
:param filters: A dictoinary of filters to apply to the sql.
:return: The text-replaced SQL or None if self.aggregate_sql doesn't exist.
"""
if self.aggregate_sql:
return self.aggregate_sql % filters
return None
#TODO make this _private.
#TODO Instead of fetchall, use a generator.
def get_row_data(self, filters, order_by):
"""
Returns the cursor based on a filter dictionary.
:param filters: A dictionary of field->value filters to filter the report.
:param order_by: The field by which this report should be ordered. (Currently ignored by get_row_sql)
:return: A list of all results (from fetchall)
"""
sql = self.get_row_sql(filters, order_by)
if not sql:
return []
cursor = self.get_cursor()
cursor.execute(sql)
return cursor.fetchall()
def get_aggregate_data(self, filters):
"""
Returns the cursor based on a filter dictionary.
:param filters: A dictionary of paramters by which this report will be filtered.
:return: The aggregates for this report, based on the aggregate sql.
"""
sql = self.get_aggregate_sql(filters)
if not sql:
return []
cursor = self.get_cursor()
cursor.execute(sql)
result = cursor.fetchone()
agg = list()
for i in range(len(result)):
agg.append((cursor.description[i][0],result[i]))
return agg
def get_filter_form(self, data):
"""
Returns the filter form based on filter data.
:param data: A dictionary with filters that should be used.
:return: A filtering form for this report.
"""
form=forms.Form(data=data)
for q in self.query_params:
control = FilterControl.create_from_datatype(q[2],q[0],q[1])
fields = control.get_fields()
form.fields.update(fields)
form.full_clean()
return form
# CONSIDER not ideal in terms paging, would be better to fetch within a range..
# TODO Make this work with order_by
# TODO Use a generator instead of getting a big list of results.
# TODO Make the return from this function match the implied contract from all of the other subclasses of Report.
def get_rows(self,filters={},order_by=None):
"""
This returns all of the rows in the report, ignores order_by
:param filters: A dictionary of filters upon which to filter the report.
:param order_by: The field by which the report should be ordered.
:return: A tuple of rows and aggregate data (no meta data!)
"""
rows = self.get_row_data(filters, order_by)
agg = self.get_aggregate_data(filters)
return rows,agg
class DateSQLReport(SQLReport):
"""
A date based SQL report. Implies that the row and aggregate SQL should contain date__gte and date__lt variables.
"""
aggregate_sql=None
query_params=[("date","Date","datetime")]
date_field="date"
default_mask={
"date__gte":lambda: (datetime.datetime.today() -datetime.timedelta(days=30)).strftime("%Y-%m-%d"),
"date__lt":lambda: (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
}
# TODO build AnnotatedReport that deals with .annotate functions in ORM
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements an interface to enumlib, Gus Hart"s excellent Fortran
code for enumerating derivative structures.
This module depends on a compiled enumlib with the executables multienum.x and
makestr.x available in the path. Please download the library at
http://enum.sourceforge.net/ and follow the instructions in the README to
compile these two executables accordingly.
If you use this module, please cite the following:
Gus L. W. Hart and Rodney W. Forcade, "Algorithm for generating derivative
structures," Phys. Rev. B 77 224115 (26 June 2008)
Gus L. W. Hart and Rodney W. Forcade, "Generating derivative structures from
multilattices: Application to hcp alloys," Phys. Rev. B 80 014120 (July 2009)
Gus L. W. Hart, Lance J. Nelson, and Rodney W. Forcade, "Generating
derivative structures at a fixed concentration," Comp. Mat. Sci. 59
101-107 (March 2012)
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jul 16, 2012"
import re
import math
import subprocess
import itertools
import logging
import numpy as np
from monty.fractions import lcm
from monty.fractions import fractions
from six.moves import reduce
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import DummySpecie
from monty.os.path import which
from monty.dev import requires
from monty.tempfile import ScratchDir
logger = logging.getLogger(__name__)
# Favor the use of the newer "enum.x" by Gus Hart instead of the older
# "multienum.x"
enum_cmd = which('multienum.x')
@requires(enum_cmd and which('makestr.x'),
"EnumlibAdaptor requires the executables 'enum.x' or 'multienum.x' "
"and 'makestr.x' to be in the path. Please download the library at"
"http://enum.sourceforge.net/ and follow the instructions in "
"the README to compile these two executables accordingly.")
class EnumlibAdaptor(object):
"""
An adaptor for enumlib.
.. attribute:: structures
List of all enumerated structures.
"""
amount_tol = 1e-5
def __init__(self, structure, min_cell_size=1, max_cell_size=1,
symm_prec=0.1, enum_precision_parameter=0.001,
refine_structure=False, check_ordered_symmetry=True):
"""
Initializes the adapter with a structure and some parameters.
Args:
structure: An input structure.
min_cell_size (int): The minimum cell size wanted. Defaults to 1.
max_cell_size (int): The maximum cell size wanted. Defaults to 1.
symm_prec (float): Symmetry precision. Defaults to 0.1.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
refine_structure (bool): If you are starting from a structure that
has been relaxed via some electronic structure code,
it is usually much better to start with symmetry determination
and then obtain a refined structure. The refined structure have
cell parameters and atomic positions shifted to the expected
symmetry positions, which makes it much less sensitive precision
issues in enumlib. If you are already starting from an
experimental cif, refinement should have already been done and
it is not necessary. Defaults to False.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
"""
if refine_structure:
finder = SpacegroupAnalyzer(structure, symm_prec)
self.structure = finder.get_refined_structure()
else:
self.structure = structure
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.symm_prec = symm_prec
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
self.structures = None
def run(self):
"""
Run the enumeration.
"""
# Create a temporary directory for working.
with ScratchDir(".") as d:
logger.debug("Temp dir : {}".format(d))
try:
# Generate input files
self._gen_input_file()
# Perform the actual enumeration
num_structs = self._run_multienum()
# Read in the enumeration output as structures.
if num_structs > 0:
self.structures = self._get_structures(num_structs)
else:
raise ValueError("Unable to enumerate structure.")
except Exception:
import sys
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=10, file=sys.stdout)
def _gen_input_file(self):
"""
Generate the necessary struct_enum.in file for enumlib. See enumlib
documentation for details.
"""
coord_format = "{:.6f} {:.6f} {:.6f}"
# Using symmetry finder, get the symmetrically distinct sites.
fitter = SpacegroupAnalyzer(self.structure, self.symm_prec)
symmetrized_structure = fitter.get_symmetrized_structure()
logger.debug("Spacegroup {} ({}) with {} distinct sites".format(
fitter.get_spacegroup_symbol(),
fitter.get_spacegroup_number(),
len(symmetrized_structure.equivalent_sites))
)
"""
Enumlib doesn"t work when the number of species get too large. To
simplify matters, we generate the input file only with disordered sites
and exclude the ordered sites from the enumeration. The fact that
different disordered sites with the exact same species may belong to
different equivalent sites is dealt with by having determined the
spacegroup earlier and labelling the species differently.
"""
# index_species and index_amounts store mappings between the indices
# used in the enum input file, and the actual species and amounts.
index_species = []
index_amounts = []
# Stores the ordered sites, which are not enumerated.
ordered_sites = []
disordered_sites = []
coord_str = []
for sites in symmetrized_structure.equivalent_sites:
if sites[0].is_ordered:
ordered_sites.append(sites)
else:
sp_label = []
species = {k: v for k, v in sites[0].species_and_occu.items()}
if sum(species.values()) < 1 - EnumlibAdaptor.amount_tol:
# Let us first make add a dummy element for every single
# site whose total occupancies don't sum to 1.
species[DummySpecie("X")] = 1 - sum(species.values())
for sp in species.keys():
if sp not in index_species:
index_species.append(sp)
sp_label.append(len(index_species) - 1)
index_amounts.append(species[sp] * len(sites))
else:
ind = index_species.index(sp)
sp_label.append(ind)
index_amounts[ind] += species[sp] * len(sites)
sp_label = "/".join(["{}".format(i) for i in sorted(sp_label)])
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
def get_sg_info(ss):
finder = SpacegroupAnalyzer(Structure.from_sites(ss),
self.symm_prec)
return finder.get_spacegroup_number()
curr_sites = list(itertools.chain.from_iterable(disordered_sites))
min_sgnum = get_sg_info(curr_sites)
logger.debug("Disorderd sites has sgnum %d" % (
min_sgnum))
# It could be that some of the ordered sites has a lower symmetry than
# the disordered sites. So we consider the lowest symmetry sites as
# disordered in our enumeration.
self.ordered_sites = []
to_add = []
if self.check_ordered_symmetry:
for sites in ordered_sites:
temp_sites = list(curr_sites) + sites
sgnum = get_sg_info(temp_sites)
if sgnum < min_sgnum:
logger.debug("Adding {} to sites to be ordered. "
"New sgnum {}"
.format(sites, sgnum))
to_add = sites
min_sgnum = sgnum
for sites in ordered_sites:
if sites == to_add:
index_species.append(sites[0].specie)
index_amounts.append(len(sites))
sp_label = len(index_species) - 1
logger.debug("Lowest symmetry {} sites are included in enum."
.format(sites[0].specie))
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
else:
self.ordered_sites.extend(sites)
self.index_species = index_species
lattice = self.structure.lattice
output = [self.structure.formula, "bulk"]
for vec in lattice.matrix:
output.append(coord_format.format(*vec))
output.append("{}".format(len(index_species)))
output.append("{}".format(len(coord_str)))
output.extend(coord_str)
output.append("{} {}".format(self.min_cell_size, self.max_cell_size))
output.append(str(self.enum_precision_parameter))
output.append("partial")
ndisordered = sum([len(s) for s in disordered_sites])
base = int(ndisordered*reduce(lcm,
[f.limit_denominator(
ndisordered *
self.max_cell_size).denominator
for f in map(fractions.Fraction,
index_amounts)]))
# base = ndisordered #10 ** int(math.ceil(math.log10(ndisordered)))
# To get a reasonable number of structures, we fix concentrations to the
# range expected in the original structure.
total_amounts = sum(index_amounts)
for amt in index_amounts:
conc = amt / total_amounts
if abs(conc * base - round(conc * base)) < 1e-5:
output.append("{} {} {}".format(int(round(conc * base)),
int(round(conc * base)),
base))
else:
min_conc = int(math.floor(conc * base))
output.append("{} {} {}".format(min_conc - 1, min_conc + 1,
base))
output.append("")
logger.debug("Generated input file:\n{}".format("\n".join(output)))
with open("struct_enum.in", "w") as f:
f.write("\n".join(output))
def _run_multienum(self):
p = subprocess.Popen([enum_cmd],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
output = p.communicate()[0].decode("utf-8")
count = 0
start_count = False
for line in output.strip().split("\n"):
if line.strip().endswith("RunTot"):
start_count = True
elif start_count and re.match("\d+\s+.*", line.strip()):
count = int(line.split()[-1])
logger.debug("Enumeration resulted in {} structures".format(count))
return count
def _get_structures(self, num_structs):
structs = []
rs = subprocess.Popen(["makestr.x",
"struct_enum.out", str(0),
str(num_structs - 1)],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if len(self.ordered_sites) > 0:
original_latt = self.ordered_sites[0].lattice
# Need to strip sites of site_properties, which would otherwise
# result in an index error. Hence Structure is reconstructed in
# the next step.
ordered_structure = Structure(
original_latt,
[site.species_and_occu for site in self.ordered_sites],
[site.frac_coords for site in self.ordered_sites])
inv_org_latt = np.linalg.inv(original_latt.matrix)
for n in range(1, num_structs + 1):
with open("vasp.{:06d}".format(n)) as f:
data = f.read()
data = re.sub("scale factor", "1", data)
data = re.sub("(\d+)-(\d+)", r"\1 -\2", data)
poscar = Poscar.from_string(data, self.index_species)
sub_structure = poscar.structure
# Enumeration may have resulted in a super lattice. We need to
# find the mapping from the new lattice to the old lattice, and
# perform supercell construction if necessary.
new_latt = sub_structure.lattice
sites = []
if len(self.ordered_sites) > 0:
transformation = np.dot(new_latt.matrix, inv_org_latt)
transformation = [[int(round(cell)) for cell in row]
for row in transformation]
logger.debug("Supercell matrix: {}".format(transformation))
s = Structure.from_sites(ordered_structure)
s.make_supercell(transformation)
sites.extend([site.to_unit_cell for site in s])
super_latt = sites[-1].lattice
else:
super_latt = new_latt
for site in sub_structure:
if site.specie.symbol != "X": # We exclude vacancies.
sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
super_latt).to_unit_cell)
structs.append(Structure.from_sites(sorted(sites)))
logger.debug("Read in a total of {} structures.".format(num_structs))
return structs
|
|
from collections import OrderedDict
from bson import DBRef, ObjectId
from bson.errors import InvalidId
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
from mongoengine import Document, EmbeddedDocument
from mongoengine import fields as me_fields
from mongoengine.base import get_document
from mongoengine.base.common import _document_registry
from mongoengine.errors import DoesNotExist, NotRegistered
from mongoengine.errors import ValidationError as MongoValidationError
from mongoengine.queryset import QuerySet, QuerySetManager
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import empty
from rest_framework.utils import html
from rest_framework.settings import api_settings
class ObjectIdField(serializers.Field):
""" Field for ObjectId values """
def to_internal_value(self, value):
try:
return ObjectId(smart_str(value))
except InvalidId:
raise serializers.ValidationError("'%s' is not a valid ObjectId" % value)
def to_representation(self, value):
return smart_str(value)
class DocumentField(serializers.Field):
""" Replacement of DRF ModelField.
Keeps track of underlying mognoengine field.
Used by DocumentSerializers to map unknown fields.
NB: This is not DocumentField from previous releases. For previous behaviour see GenericField
"""
def __init__(self, model_field, **kwargs):
self.model_field = model_field
super(DocumentField, self).__init__(**kwargs)
def get_attribute(self, obj):
return obj
def to_internal_value(self, data):
""" convert input to python value.
Uses document field's ``to_python()``.
"""
return self.model_field.to_python(data)
def to_representation(self, obj):
""" convert value to representation.
DRF ModelField uses ``value_to_string`` for this purpose. Mongoengine fields do not have such method.
This implementation uses ``django.utils.encoding.smart_str`` to convert everything to text, while keeping json-safe types intact.
NB: The argument is whole object, instead of attribute value. This is upstream feature.
Probably because the field can be represented by a complicated method with nontrivial way to extract data.
"""
value = self.model_field.__get__(obj, None)
return smart_str(value, strings_only=True)
def run_validators(self, value):
""" validate value.
Uses document field's ``validate()``
"""
try:
self.model_field.validate(value)
except MongoValidationError as e:
raise ValidationError(e.message)
super(DocumentField, self).run_validators(value)
class GenericEmbeddedField(serializers.Field):
""" Field for generic embedded documents.
Serializes like DictField with additional item ``_cls``.
"""
default_error_messages = {
'not_a_dict': serializers.DictField.default_error_messages['not_a_dict'],
'not_a_doc': _('Expected an EmbeddedDocument but got type "{input_type}".'),
'undefined_model': _('Document `{doc_cls}` has not been defined.'),
'missing_class': _('Provided data has not `_cls` item.')
}
def to_internal_value(self, data):
if not isinstance(data, dict):
self.fail('not_a_dict', input_type=type(data).__name__)
try:
doc_name = data['_cls']
doc_cls = get_document(doc_name)
except KeyError:
self.fail('missing_class')
except NotRegistered:
self.fail('undefined_model', doc_cls=doc_name)
return doc_cls(**data)
def to_representation(self, doc):
if not isinstance(doc, EmbeddedDocument):
self.fail('not_a_doc', input_type=type(doc).__name__)
data = OrderedDict()
data['_cls'] = doc.__class__.__name__
for field_name in doc._fields:
if not hasattr(doc, field_name):
continue
data[field_name] = getattr(doc, field_name)
return data
class GenericField(serializers.Field):
""" Field for generic values.
Recursively traverses lists and dicts.
Primitive values are serialized using ``django.utils.encoding.smart_str`` (keeping json-safe intact).
Embedded documents handled using temporary GenericEmbeddedField.
No validation performed.
Note: it will not work properly if a value contains some complex elements.
"""
def to_representation(self, value):
return self.represent_data(value)
def represent_data(self, data):
if isinstance(data, EmbeddedDocument):
field = GenericEmbeddedField()
return field.to_representation(data)
elif isinstance(data, dict):
return dict([(key, self.represent_data(val)) for key, val in data.items()])
elif isinstance(data, list):
return [self.represent_data(value) for value in data]
elif data is None:
return None
else:
return smart_str(data, strings_only=True)
def to_internal_value(self, value):
return self.parse_data(value)
def parse_data(self, data):
if isinstance(data, dict):
if '_cls' in data:
field = GenericEmbeddedField()
return field.to_internal_value(data)
else:
return dict([(key, self.parse_data(val)) for key, val in data.items()])
elif isinstance(data, list):
return [self.parse_data(value) for value in data]
else:
return data
class AttributedDocumentField(DocumentField):
def get_attribute(self, instance):
return serializers.Field.get_attribute(self, instance)
class GenericEmbeddedDocumentField(GenericEmbeddedField, AttributedDocumentField):
""" Field for GenericEmbeddedDocumentField.
Used internally by ``DocumentSerializer``.
"""
pass
class DynamicField(GenericField, AttributedDocumentField):
""" Field for DynamicDocuments.
Used internally by ``DynamicDocumentSerializer``.
"""
pass
class ReferenceField(serializers.Field):
""" Field for References.
Argument ``model`` or ``queryset`` should be given to specify referencing model.
Internal value: DBRef.
Representation: ``id_value``
Parsing: ``id_value`` or ``{ _id: id_value }``
Formatting and parsing the id_value is handled by ``.pk_field_class``. By default it is ObjectIdField, it inputs ``ObjectId`` type, and outputs ``str``.
Validation checks existance of referenced object.
"""
default_error_messages = {
'invalid_input': _('Invalid input. Expected `id_value` or `{ _id: id_value }`.'),
'invalid_id': _('Cannot parse "{pk_value}" as {pk_type}.'),
'not_found': _('Document with id={pk_value} does not exist.'),
}
queryset = None
pk_field_class = ObjectIdField
""" Serializer field class used to handle object ids.
ObjectIdField is the default. This attribute is dynamically overridden to
manage referenced models with a custom primary key.
"""
def __init__(self, model=None, queryset=None, **kwargs):
if model is not None:
self.queryset = model.objects
elif queryset is not None:
self.queryset = queryset
else:
self.queryset = None
self.pk_field = self.pk_field_class()
assert self.queryset is not None or kwargs.get('read_only', None), (
'Reference field must provide a `queryset` or `model` argument, '
'or set read_only=`True`.'
)
super(ReferenceField, self).__init__(**kwargs)
def run_validation(self, data=empty):
# We force empty strings to None values for relational fields.
if data == '':
data = None
return super(ReferenceField, self).run_validation(data)
def get_queryset(self):
queryset = self.queryset
if isinstance(queryset, (QuerySet, QuerySetManager)):
queryset = queryset.all()
return queryset
@property
def choices(self):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
return OrderedDict([
(
str(self.to_representation(item)),
self.display_value(item)
)
for item in queryset
])
@property
def grouped_choices(self):
return self.choices
def display_value(self, instance):
return str(instance)
def parse_id(self, value):
try:
return self.pk_field.to_internal_value(value)
except:
self.fail('invalid_id', pk_value=value, pk_type=self.pk_field_class.__name__)
def to_internal_value(self, value):
if isinstance(value, dict):
try:
doc_id = self.parse_id(value['_id'])
except KeyError:
self.fail('invalid_input')
else:
doc_id = self.parse_id(value)
try:
# Use the 'pk' attribute instead of 'id' as the second does not
# exist when the model has a custom primary key
return self.get_queryset().only('pk').get(pk=doc_id).to_dbref()
except DoesNotExist:
self.fail('not_found', pk_value=doc_id)
def to_representation(self, value):
assert isinstance(value, (Document, DBRef))
doc_id = value.id
return self.pk_field.to_representation(doc_id)
class ComboReferenceField(ReferenceField):
""" Field for References.
Can parse either reference or nested document data.
"""
default_error_messages = {
'invalid_input': _('Invalid input. Expected `id_value` or `{ _id: id_value }`, or `{ data }`.'),
}
def __init__(self, serializer, **kwargs):
self.serializer = serializer
self.model = serializer.Meta.model
if 'model' not in kwargs:
kwargs['model'] = self.model
super(ComboReferenceField, self).__init__(**kwargs)
def to_internal_value(self, value):
if not isinstance(value, dict) or list(value.keys()) == ['_id']:
return super(ComboReferenceField, self).to_internal_value(value)
if '_id' in value:
self.fail('invalid_input')
if 'id' in value:
return super(ComboReferenceField, self).to_internal_value(value['id'])
ser = self.serializer(data=value)
ser.is_valid(raise_exception=True)
obj = self.model(**ser.validated_data)
return obj
@classmethod
def get_depth(cls, obj):
if obj.parent is None:
return 0
if hasattr(obj.parent, 'Meta'):
return getattr(obj.parent.Meta, 'depth', 0)
return cls.get_depth(obj.parent)
def to_representation(self, value):
if self.get_depth(self) == 0:
return super(ComboReferenceField, self).to_representation(value)
assert isinstance(value, (Document, DBRef))
if isinstance(value, DBRef):
value = self.model._get_db().dereference(value)
ser = self.serializer(instance=value)
return ser.data
class GenericReferenceField(serializers.Field):
""" Field for GenericReferences.
Internal value: Document, retrieved with only id field. The mongengine does not support DBRef here.
Representation: ``{ _cls: str, _id: str }``.
Validation checks existance of given class and existance of referenced model.
"""
pk_field_class = ObjectIdField
"The same as for ReferenceField"
default_error_messages = {
'not_a_dict': serializers.DictField.default_error_messages['not_a_dict'],
'missing_items': _('Expected a dict with `_cls` and `_id` items.'),
'invalid_id': _('Cannot parse "{pk_value}" as {pk_type}.'),
'undefined_model': _('Document `{doc_cls}` has not been defined.'),
'undefined_collecion': _('No document defined for collection `{collection}`.'),
'not_found': _('Document with id={pk_value} does not exist.'),
}
def __init__(self, **kwargs):
self.pk_field = self.pk_field_class()
super(GenericReferenceField, self).__init__(**kwargs)
def parse_id(self, value):
try:
return self.pk_field.to_internal_value(value)
except:
self.fail('invalid_id', pk_value=repr(value), pk_type=self.pk_field_class.__name__)
def to_internal_value(self, value):
if not isinstance(value, dict):
self.fail('not_a_dict', input_type=type(value).__name__)
try:
doc_name = value['_cls']
doc_id = value['_id']
except KeyError:
self.fail('missing_items')
try:
doc_cls = get_document(doc_name)
except NotRegistered:
self.fail('undefined_model', doc_cls=doc_name)
try:
doc_id = self.pk_field.to_internal_value(doc_id)
except:
self.fail('invalid_id', pk_value=repr(doc_id), pk_type=self.pk_field_class.__name__)
try:
return doc_cls.objects.only('id').get(id=doc_id)
except DoesNotExist:
self.fail('not_found', pk_value=doc_id)
def to_representation(self, value):
assert isinstance(value, (Document, DBRef))
if isinstance(value, Document):
doc_id = value.id
doc_cls = value.__class__.__name__
if isinstance(value, DBRef): # hard case
doc_id = value.id
doc_collection = value.collection
class_match = [k for k, v in _document_registry.items() if v._get_collection_name() == doc_collection]
if len(class_match) != 1:
self.fail('unmapped_collection', collection=doc_collection)
doc_cls = class_match[0]
return {'_cls': doc_cls, '_id': self.pk_field.to_representation(doc_id)}
class MongoValidatingField(object):
mongo_field = me_fields.BaseField
"mongoengine field class used to validate value"
def run_validators(self, value):
try:
self.mongo_field().validate(value)
except MongoValidationError as e:
raise ValidationError(e.message)
super(MongoValidatingField, self).run_validators(value)
class GeoPointField(MongoValidatingField, serializers.Field):
""" Field for 2D point values.
Internal value and representation: ``[ x, y ]``
Validation is delegated to mongoengine field.
"""
default_error_messages = {
'not_a_list': _("Points must be a list of coordinates, instead got {input_value}."),
'not_2d': _("Point value must be a two-dimensional coordinates, instead got {input_value}."),
'not_float': _("Point coordinates must be float or int values, instead got {input_value}."),
}
mongo_field = me_fields.GeoPointField
def to_internal_value(self, value):
if not isinstance(value, list):
self.fail('not_a_list', input_value=repr(value))
if len(value) != 2:
self.fail('not_2d', input_value=repr(value))
if value == [None, None]:
return value
try:
return [float(value[0]), float(value[1])]
except ValueError:
self.fail('not_float', input_value=repr(value))
def to_representation(self, value):
return list(value)
class GeoJSONField(MongoValidatingField, serializers.Field):
""" Field for GeoJSON values.
Shouldbe specified with argument ``geo_type`` referencing to GeoJSON geometry type ('Point', 'LineSting', etc)
Internal value: ``[ coordinates ]`` (as required by mongoengine fields).
Representation: ``{ 'type': str, 'coordinates': [ coords ] }`` (GeoJSON geometry format).
Validation: delegated to corresponding mongoengine field.
"""
default_error_messages = {
'invalid_type': _("Geometry must be a geojson geometry or a geojson coordinates, got {input_value}."),
'invalid_geotype': _("Geometry expected to be '{exp_type}', got {geo_type}."),
}
valid_geo_types = {
'Point': me_fields.PointField,
'LineString': me_fields.LineStringField,
'Polygon': me_fields.PolygonField,
'MultiPoint': me_fields.MultiPointField,
'MultiLineString': me_fields.MultiLineStringField,
'MultiPolygon': me_fields.MultiPolygonField
}
def __init__(self, geo_type, *args, **kwargs):
assert geo_type in self.valid_geo_types
self.mongo_field = self.valid_geo_types[geo_type]
super(GeoJSONField, self).__init__(*args, **kwargs)
def to_internal_value(self, value):
if isinstance(value, list):
return value
if not isinstance(value, dict) or 'coordinates' not in value or 'type' not in value:
self.fail('invalid_type', input_value=repr(value))
if value['type'] != self.mongo_field._type:
self.fail('invalid_geotype', geo_type=repr(value['type']), exp_type=self.mongo_field._type)
return value['coordinates']
def to_representation(self, value):
if isinstance(value, dict):
val = value['coordinates']
else:
val = value
# return value
return {'type': self.mongo_field._type, 'coordinates': val}
class DictField(serializers.DictField):
default_error_messages = {
'not_a_dict': _('Expected a dictionary of items but got type "{input_type}".'),
'empty': _('This dict may not be empty.')
}
def __init__(self, *args, **kwargs):
self.allow_empty = kwargs.pop('allow_empty', True)
super(DictField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
"""
Dicts of native values <- Dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_dict(data)
if not isinstance(data, dict):
self.fail('not_a_dict', input_type=type(data).__name__)
if not self.allow_empty and len(data.keys()) == 0:
message = self.error_messages['empty']
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
})
return {
str(key): self.child.run_validation(value)
for key, value in data.items()
}
class FileField(serializers.FileField):
""" Field for files, stored in gridfs.
Corresponds to ``DRF.serializers.FileField``
Internal value: a file-like object.
For uploaded files it is a ``django.core.files.UploadedFile`` (provided by django and DRF parsers).
For gridfs files it is ``mongoengine.fields.GridFSProxy`` (provided by mongoengine).
Representation: None or str(grid_id)
"""
def to_representation(self, value):
return smart_str(value.grid_id) if hasattr(value, 'grid_id') else None
class ImageField(FileField):
""" Field for images, stored in gridfs.
Corresponds to ``DRF.serializers.ImageField``, the same way as ``FileField``
"""
|
|
import sys
import os
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from argparse import SUPPRESS
try:
from apex import amp
except ImportError:
amp = None
from dataset import LMDBDataset
from pixelsnail import PixelSNAIL
from scheduler import CycleScheduler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..'))
sys.path.append(lib_path)
lib_path2 = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path2)
import candle
additional_definitions = [
{'name': 'sched_mode',
'type': str,
'default': None,
'help': 'Mode of learning rate scheduler'},
{'name': 'lmdb_filename',
'type': str,
'default': SUPPRESS,
'help': 'lmdb dataset path'},
{'name': 'amp',
'type': str,
'default': 'O0',
'help': ''},
{'name': 'hier',
'type': str,
'default': 'top',
'help': ''},
{'name': 'channel',
'type': int,
'default': 256,
'help': ''},
{'name': 'n_res_block',
'type': int,
'default': 4,
'help': ''},
{'name': 'n_res_channel',
'type': int,
'default': 256,
'help': ''},
{'name': 'n_out_res_block',
'type': int,
'default': 0,
'help': ''},
{'name': 'n_cond_res_block',
'type': int,
'default': 3,
'help': ''},
{'name': 'ckpt_restart',
'type': str,
'default': None,
'help': 'Checkpoint to restart from'},
]
required = [
'batch_size',
'epochs',
'hier',
'learning_rate',
'channel',
'n_res_block',
'n_res_channel',
'n_out_res_block',
'n_cond_res_block',
'dropout',
'amp',
'sched_mode',
'lmdb_filename',
]
class TrPxSnBk(candle.Benchmark):
def set_locals(self):
"""Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing the additional parameters for the
benchmark.
"""
if required is not None:
self.required = set(required)
if additional_definitions is not None:
self.additional_definitions = additional_definitions
def initialize_parameters(default_model='train_pixelsnail_default_model.txt'):
# Build benchmark object
trpsn = TrPxSnBk(file_path, default_model, 'pytorch',
prog='train_pixelsnail_baseline',
desc='Histology train pixelsnail - Examples')
print("Created sample benchmark")
# Initialize parameters
gParameters = candle.finalize_parameters(trpsn)
print("Parameters initialized")
return gParameters
def train(args, epoch, loader, model, optimizer, scheduler, device):
loader = tqdm(loader)
criterion = nn.CrossEntropyLoss()
for i, (top, bottom, label) in enumerate(loader):
model.zero_grad()
top = top.to(device)
if args.hier == 'top':
target = top
out, _ = model(top)
elif args.hier == 'bottom':
bottom = bottom.to(device)
target = bottom
out, _ = model(bottom, condition=top)
loss = criterion(out, target)
loss.backward()
if scheduler is not None:
scheduler.step()
optimizer.step()
_, pred = out.max(1)
correct = (pred == target).float()
accuracy = correct.sum() / target.numel()
lr = optimizer.param_groups[0]['lr']
loader.set_description(
(
f'epoch: {epoch + 1}; loss: {loss.item():.5f}; '
f'acc: {accuracy:.5f}; lr: {lr:.5f}'
)
)
class PixelTransform:
def __init__(self):
pass
def __call__(self, input):
ar = np.array(input)
return torch.from_numpy(ar).long()
def run(params):
args = candle.ArgumentStruct(**params)
# Configure GPUs
ndevices = torch.cuda.device_count()
if ndevices < 1:
raise Exception('No CUDA gpus available')
device = 'cuda'
dataset = LMDBDataset(args.lmdb_filename)
loader = DataLoader(
dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True
)
ckpt = {}
if args.ckpt_restart is not None:
ckpt = torch.load(args.ckpt_restart)
args = ckpt['args']
if args.hier == 'top':
model = PixelSNAIL(
[32, 32],
512,
args.channel,
5,
4,
args.n_res_block,
args.n_res_channel,
dropout=args.dropout,
n_out_res_block=args.n_out_res_block,
)
elif args.hier == 'bottom':
model = PixelSNAIL(
[64, 64],
512,
args.channel,
5,
4,
args.n_res_block,
args.n_res_channel,
attention=False,
dropout=args.dropout,
n_cond_res_block=args.n_cond_res_block,
cond_res_channel=args.n_res_channel,
)
if 'model' in ckpt:
model.load_state_dict(ckpt['model'])
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
if amp is not None:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp)
model = nn.DataParallel(model)
model = model.to(device)
scheduler = None
if args.sched_mode == 'cycle':
scheduler = CycleScheduler(
optimizer, args.learning_rate, n_iter=len(loader) * args.epochs, momentum=None
)
for i in range(args.epochs):
train(args, i, loader, model, optimizer, scheduler, device)
torch.save(
{'model': model.module.state_dict(), 'args': args},
f'{args.ckpt_directory}/checkpoint/pixelsnail_{args.hier}_{str(i + 1).zfill(3)}.pt',
)
def main():
params = initialize_parameters()
run(params)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2011-2012 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2012 Barnstormer Softworks, Ltd.
import xmlrpclib
import jsonrpc
import logging
import datetime
import time
from foam.core.log import KeyAdapter
from foam.openflow.types import Port
import foam.core.allocation
MAXAGE = datetime.timedelta(hours=6)
class CachedSlice(object):
def __init__ (self, name):
self.name = name
self._time = datetime.datetime.now()
self._info = Connection.getSliceInfo(self.name)
def exists (self):
now = datetime.datetime.now()
if (now - self._time) > MAXAGE:
self._time = now
self._info = Connection.getSliceInfo(self.name)
if self._info is not None:
return True
else:
return False
class SliceCache(object):
def __init__ (self):
self._cache = {}
def exists (self, slice_name):
try:
return self._cache[slice_name].exists()
except KeyError, e:
sl = CachedSlice(slice_name)
self._cache[slice_name] = sl
return sl.exists()
def delete (self, slice_name):
try:
del self._cache[slice_name]
except:
return
def add (self, slice_name):
self._cache[slice_name] = CachedSlice(slice_name)
class _Connection(object):
def __init__ (self):
self.xmlcall = self._xmlcall_time
self.log = KeyAdapter("svc:FV", logging.getLogger('foam'))
self.plog = logging.getLogger("perf")
self.__host = ConfigDB.getConfigItemByKey("flowvisor.hostname").getValue()
self.__passwd = ConfigDB.getConfigItemByKey("flowvisor.passwd").getValue()
self.__jsonport = ConfigDB.getConfigItemByKey("flowvisor.json-port").getValue()
self.__xmlport = ConfigDB.getConfigItemByKey("flowvisor.xmlrpc-port").getValue()
self.__sliceCache = SliceCache()
self.buildConnections()
def updateConfig (self, info):
self.info = info
self.buildConnections()
self.rebuildCache()
def buildConnections (self):
self.xmlconn = xmlrpclib.ServerProxy("https://fvadmin:%s@%s:%d/xmlrpc" % (
self.__passwd, self.__host, self.__xmlport))
self.jsonconn = jsonrpc.ServiceProxy("https://fvadmin:%s@%s:%d" % (
self.__passwd, self.__host, self.__jsonport))
def _xmlcall_time (self, method, *args):
m = getattr(self.xmlconn, "api.%s" % (method))
t1 = time.time()
ret = m(*args)
dur = time.time() - t1
self.plog.info("FlowVisor:%s %.2f" % (method, dur * 1000))
return ret
def _xmlcall_fast (self, method, *args):
m = getattr(self.xmlconn, "api.%s" % (method))
return m(*args)
def rebuildCache (self):
self.__sliceCache = SliceCache()
def getDeviceList (self):
self.log.debug("XMLRPC:ListDevices")
dl = self.xmlcall("listDevices")
dl.sort()
return dl
def getLinkList (self):
self.log.debug("XMLRPC:getLinks")
return self.xmlcall("getLinks")
def getFVVersion (self):
self.log.debug("XMLRPC:getFVVersion")
self.fvversion = self.xmlcall("ping", "")
return self.fvversion
def getDevicePorts (self, dpid):
self.log.debug("XMLRPC:getDeviceInfo (%s)" % (dpid))
pinfoall = []
if "vertigo" in self.getFVVersion():
pinfoall = self.xmlcall("getVTPlannerPortInfo", dpid, "all")
portlist = []
dinfo = self.xmlcall("getDeviceInfo", dpid)
for portstr in dinfo["portNames"].split(","):
p = Port()
elems = portstr.split("(")
p.name = elems[0]
p.num = int(elems[1][:-1])
for pinfo in pinfoall:
pelems = pinfo.split(",")
if int(pelems[0]) == p.num:
p.features = pelems[2]
break
p.dpid = dpid
portlist.append(p)
return portlist
def getCombinedStats (self, slice_name):
return None
def deleteSlice (self, slice_name):
self.log.debug("XMLRPC:deleteSlice (%s)" % (slice_name))
self.xmlcall("deleteSlice", slice_name)
self.__sliceCache.delete(slice_name)
def sliceExists (self, slice_name):
return self.__sliceCache.exists(slice_name)
def getSliceInfo (self, slice_name):
self.log.debug("XMLRPC:getSliceInfo (%s)" % (slice_name))
try:
sinfo = self.xmlcall("getSliceInfo", slice_name)
return sinfo
except:
return None
def createSlice (self, slice_name, controller, email):
self.log.debug("XMLRPC:createSlice (%s, %s, %s)" % (slice_name, controller, email))
self.xmlcall("createSlice", slice_name, self.__passwd, controller, email)
self.__sliceCache.add(slice_name)
def changeFlowspace (self, opslist):
self.log.debug("XMLRPC:changeFlowSpace")
self.xmlcall("changeFlowSpace", opslist)
def addVirtualLink (self, slice_name,action):
self.log.debug("XMLRPC:addVirtualLink")
self.xmlcall("addLink", slice_name,action)
def updateInfo (self, key, value):
if key == "flowvisor.hostname":
self.__host = value
elif key == "flowvisor.passwd":
self.__passwd = value
elif key == "flowvisor.json-port":
self.__jsonport = value
elif key == "flowvisor.xmlrpc-port":
self.__xmlport = value
elif key == "flowvisor.record-rpc-timing":
v = coerceBool(key, value)
if v:
self.xmlcall = self._xmlcall_time
else:
self.xmlcall = self._xmlcall_fast
self.buildConnections()
return value
def updateInfo (key, value):
return Connection.updateInfo(key, value)
class FSAllocation(foam.core.allocation.Allocation):
def __init__ (self):
super(FSAllocation, self).__init__()
self._groups = {}
self._flowspecs = []
self._virtuallinks = []
self._controllers = []
def __str__ (self):
x = super(FSAllocation, self).__str__()
return "<FV:Allocation:\n Controllers:\n%s\n Groups:\n%s\n Flowspace:\n%s\n VirtualLinks:\n%s>\n" % (
"\n ".join([str(x) for x in self._controllers]),
"\n ".join(["%s: %s" % (k, "\n ".join(str(x) for x in v)) for k,v in self._groups.iteritems()]),
"\n ".join([str(x) for x in self._flowspecs]),
"\n ".join([str(x) for x in self._virtuallinks]))
def getDataDict (self, detail = True):
obj = super(FSAllocation, self).getDataDict(detail)
if detail:
dobj = {"controllers" : [x.__json__() for x in self._controllers],
"flowspace rules" : len(self.generateFlowEntries())}
obj.update(dobj)
return obj
def getGroups (self):
return self._groups
def getFlowspecs (self):
return self._flowspecs
def getVirtualLinks (self):
return self._virtuallinks
def getControllers (self):
return self._controllers
def addGroup (self, name, dplist = []):
if name is None:
raise NoGroupName()
if self._groups.has_key(name):
raise GroupNameAlreadyUsed(name)
self._groups[name] = dplist
def addDatapathToGroup (self, gname, dp):
self._groups.setdefault(gname, []).append(dp)
def addController (self, controller):
self._controllers.append(controller)
def addFlowSpec (self, fs):
self._flowspecs.append(fs)
def addVirtualLink (self, vl):
self._virtuallinks.append(vl)
def getGroupDatapaths (self, gname):
return self._groups[gname]
def validate (self):
cs = [x for x in self._controllers if x.type == "primary"]
if len(cs) == 0:
raise NoPrimaryController()
elif len(cs) > 1:
raise TooManyPrimaryControllers()
def createSlice (self):
cs = [x for x in self._controllers if x.type == "primary"]
if len(cs) == 0:
raise NoPrimaryController()
Connection.createSlice(str(self.getUUID()), cs[0].url, self.getEmail())
def generateFlowEntries (self, priority=100):
entries = []
for fs in self._flowspecs:
entries.extend(fs.generateFlowEntries(priority, self._groups.get(None, [])))
return entries
def insertFlowspace (self, priority):
flowspace = self.generateFlowEntries(priority)
action = "Slice:%s=4" % (self.getUUID())
ops = []
for entry in flowspace:
match = entry[2]
if match == "any":
match = "OFMatch[]"
ops.append({"operation" : "ADD", "dpid" : entry[0], "priority" : str(entry[1]),
"match" : match, "actions" : action})
Connection.changeFlowspace(ops)
def generateVLinkEntries (self):
entries = []
for fs in self._virtuallinks:
entries.extend(fs.generateVLinkEntries())
return entries
def insertVirtualLink (self):
vlinks = self.generateVLinkEntries()
slicename = "%s" % (self.getUUID())
for action in vlinks:
Connection.addVirtualLink(slicename,action)
from foam.core.configdb import ConfigDB, ConfigItem, coerceBool
citems = []
citems.append(ConfigItem().setKey("flowvisor.hostname").setValue(None)
.setDesc("Flowvisor hostname or IP address")
.setUpdateFuncName("foam.flowvisor.updateInfo"))
citems.append(ConfigItem().setKey("flowvisor.json-port").setValue(8081)
.setDesc("Flowvisor JSON RPC port")
.setUpdateFuncName("foam.flowvisor.updateInfo"))
citems.append(ConfigItem().setKey("flowvisor.xmlrpc-port").setValue(8080)
.setDesc("Flowvisor XMLRPC port")
.setUpdateFuncName("foam.flowvisor.updateInfo"))
citems.append(ConfigItem().setKey("flowvisor.passwd").setValue(None)
.setDesc("Flowvisor fvadmin password")
.setUpdateFuncName("foam.flowvisor.updateInfo"))
citems.append(ConfigItem().setKey("flowvisor.record-rpc-timing").setValue(True)
.setDesc("Record timing info for FlowVisor RPC calls")
.setUpdateFuncName("foam.flowvisor.updateInfo"))
[ConfigDB.installConfigItem(x) for x in citems]
del citems
Connection = _Connection()
|
|
import pickle
from flatdict import FlatDict
from openpnm.utils import NestedDict, sanitize_dict, Workspace
from openpnm.utils import logging
from openpnm.io import GenericIO
logger = logging.getLogger(__name__)
ws = Workspace()
class Dict(GenericIO):
r"""
Generates hierarchical ``dicts`` with a high degree of control over the
structure.
This is the most important class in the ``io`` module, since many other
classes use this to manipulate and format the data structures.
Also, it is possible to use Python's ``pickle`` module to save ``dicts``
to file.
"""
@classmethod
def from_dict(cls, dct, project=None, delim=' | '):
r"""
This method converts a correctly formatted dictionary into OpenPNM
objects, and returns a handle to the *project* containing them.
Parameters
----------
dct : dictionary
The Python dictionary containing the data. The nesting and
labeling of the dictionary is used to create the appropriate
OpenPNM objects.
project : OpenPNM Project Object
The project with which the created objects should be associated.
If not supplied, one will be created.
Returns
-------
An OpenPNM Project containing the objects created to store the given
data.
Notes
-----
The requirement of a *correctly formed* dictionary is rather strict,
and essentially means a dictionary produced by the ``to_dict`` method
of this class.
"""
if project is None:
project = ws.new_project()
# Uncategorize pore/throat and labels/properties, if present
fd = FlatDict(dct, delimiter=delim)
# If . is the delimiter, replace with | otherwise things break
if delim == '.':
delim = ' | '
for key in list(fd.keys()):
new_key = key.replace('.', delim)
fd[new_key] = fd.pop(key)
d = FlatDict(delimiter=delim)
for key in list(fd.keys()):
new_key = key.replace('pore' + delim, 'pore.')
new_key = new_key.replace('throat' + delim, 'throat.')
new_key = new_key.replace('labels' + delim, '')
new_key = new_key.replace('properties' + delim, '')
d[new_key] = fd.pop(key)
# Plase data into correctly categorized dicts, for later handling
objs = {'network': NestedDict(),
'geometry': NestedDict(),
'physics': NestedDict(),
'phase': NestedDict(),
'algorithm': NestedDict(),
'base': NestedDict()}
for item in d.keys():
path = item.split(delim)
if len(path) > 2:
if path[-3] in objs.keys():
# Item is categorized by type, so note it
objs[path[-3]][path[-2]][path[-1]] = d[item]
else:
# item is nested, not categorized; make it a base
objs['base'][path[-2]][path[-1]] = d[item]
else:
# If not categorized by type, make it a base
objs['base'][path[-2]][path[-1]] = d[item]
# Convert to OpenPNM Objects, attempting to infer type
for objtype in objs.keys():
for name in objs[objtype].keys():
# Create empty object, using dummy name to avoid error
obj = project._new_object(objtype=objtype, name='')
# Overwrite name
obj._set_name(name=name, validate=False)
# Update new object with data from dict
obj.update(objs[objtype][name])
return project
@classmethod
def to_dict(cls, network=None, phases=[], element=['pore', 'throat'],
interleave=True, flatten=True, categorize_by=[]):
r"""
Returns a single dictionary object containing data from the given
OpenPNM objects, with the keys organized differently depending on
optional arguments.
Parameters
----------
network : OpenPNM Network Object (optional)
The network containing the desired data
phases : list of OpenPNM Phase Objects (optional, default is none)
A list of phase objects whose data are to be included
element : string or list of strings
An indication of whether 'pore' and/or 'throat' data are desired.
The default is both.
interleave : boolean (default is ``True``)
When ``True`` (default) the data from all Geometry objects (and
Physics objects if ``phases`` are given) is interleaved into
a single array and stored as a network property (or Phase
property for Physics data). When ``False``, the data for each
object are stored under their own dictionary key, the structuring
of which depends on the value of the ``flatten`` argument.
flatten : boolean (default is ``True``)
When ``True``, all objects are accessible from the top level
of the dictionary. When ``False`` objects are nested under their
parent object. If ``interleave`` is ``True`` this argument is
ignored.
categorize_by : string or list of strings
Indicates how the dictionaries should be organized. The list can
contain any, all or none of the following strings:
**'object'** : If specified the dictionary keys will be stored
under a general level corresponding to their type (e.g.
'network/net_01/pore.all'). If ``interleave`` is ``True`` then
only the only categories are *network* and *phase*, since
*geometry* and *physics* data get stored under their respective
*network* and *phase*.
**'data'** : If specified the data arrays are additionally
categorized by ``label`` and ``property`` to separate *boolean*
from *numeric* data.
**'element'** : If specified the data arrays are
additionally categorized by ``pore`` and ``throat``, meaning
that the propnames are no longer prepended by a 'pore.' or
'throat.'
Returns
-------
A dictionary with the data stored in a hierarchical data structure, the
actual format of which depends on the arguments to the function.
Notes
-----
There is a handy package called *flatdict* that can be used to
access this dictionary using a single key such that:
``d[level_1][level_2] == d[level_1/level_2]``
Importantly, converting to a *flatdict* allows it be converted to an
*HDF5* file directly, since the hierarchy is dictated by the placement
of '/' characters.
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
delim = ' | '
d = NestedDict(delimiter=delim)
def build_path(obj, key):
propname = delim + key
prefix = 'root'
datatype = ''
arr = obj[key]
if 'object' in categorize_by:
prefix = obj._isa()
if 'element' in categorize_by:
propname = delim + key.replace('.', delim)
if 'data' in categorize_by:
if arr.dtype == bool:
datatype = delim + 'labels'
else:
datatype = delim + 'properties'
path = prefix + delim + obj.name + datatype + propname
return path
for net in network:
for key in net.keys(element=element, mode='all'):
path = build_path(obj=net, key=key)
d[path] = net[key]
for geo in project.geometries().values():
for key in geo.keys(element=element, mode='all'):
if interleave:
path = build_path(obj=net, key=key)
d[path] = net[key]
else:
path = build_path(obj=geo, key=key)
if flatten:
d[path] = geo[key]
elif 'object' in categorize_by:
path = path.split(delim)
path.insert(0, 'network')
path.insert(1, net.name)
path = delim.join(path)
else:
path = path.split(delim)
path.insert(1, net.name)
path = delim.join(path)
d[path] = geo[key]
for phase in phases:
for key in phase.keys(element=element, mode='all'):
path = build_path(obj=phase, key=key)
d[path] = phase[key]
for phys in project.find_physics(phase=phase):
if phys:
for key in phys.keys(element=element, mode='all'):
if interleave:
path = build_path(obj=phase, key=key)
d[path] = phase[key]
else:
path = build_path(obj=phys, key=key)
if flatten:
d[path] = phys[key]
elif 'object' in categorize_by:
path = path.split(delim)
path.insert(0, 'phase')
path.insert(1, phase.name)
path = delim.join(path)
else:
path = path.split(delim)
path.insert(1, phase.name)
path = delim.join(path)
d[path] = phys[key]
if 'root' in d.keys():
d = d['root']
if 'project' in categorize_by:
new_d = NestedDict()
new_d[project.name] = d
d = new_d
return d
@classmethod
def save(cls, dct, filename):
r"""
Saves data from the given dictionary into the specified file.
Parameters
----------
dct : dictionary
A dictionary to save to file, presumably obtained from the
``to_dict`` method of this class.
filename : string or path object
The filename to store the dictionary.
"""
fname = cls._parse_filename(filename=filename, ext='dct')
dct = sanitize_dict(dct)
with open(fname, 'wb') as f:
pickle.dump(dct, f)
@classmethod
def load(cls, filename):
r"""
Load data from the specified file into a Python dictionary
Parameters
----------
filename : string
The path to the file to be opened
Notes
-----
This returns a Python dictionary which can be converted into OpenPNM
objects using the ``from_dict`` method of this class.
"""
fname = cls._parse_filename(filename)
with open(fname, 'rb') as f:
dct = pickle.load(f)
return dct
|
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make flash" (Ctrl-T Ctrl-F)
# - Run "make app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <[email protected]>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.0"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix':
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._read_line = b""
self._gdb_buffer = b""
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
def handle_serial_input(self, data):
# this may need to be made more efficient, as it pushes out a byte
# at a time to the console
for b in data:
self.console.write_bytes(b)
if b == b'\n': # end of line
self.handle_serial_input_line(self._read_line.strip())
self._read_line = b""
else:
self._read_line += b
self.check_gdbstub_trigger(b)
def handle_serial_input_line(self, line):
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {make:7} Run 'make flash' to build & flash
--- {appmake:7} Run 'make app-flash to build & flash app
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
make=key_description(CTRL_F),
appmake=key_description(CTRL_A),
)
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to run 'make flash'.
--- Press {} to run 'make app-flash'.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
yellow_print("Running make %s..." % target)
p = subprocess.Popen([self.make,
target ])
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
def lookup_pc_address(self, pc_addr):
translation = subprocess.check_output(
["%saddr2line" % self.toolchain_prefix,
"-pfia", "-e", self.elf_file, pc_addr],
cwd=".")
if not "?? ??:0" in translation:
yellow_print(translation)
def check_gdbstub_trigger(self, c):
self._gdb_buffer = self._gdb_buffer[-6:] + c # keep the last 7 characters seen
m = re.match(b"\\$(T..)#(..)", self._gdb_buffer) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
subprocess.call(["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file], cwd=".")
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
self.prompt_next_action("gdb exited")
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self.output.write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self.output.write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
|
import matplotlib.pyplot as plt
from matplotlib import dates
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import copy
import calendar
import mysql.connector
timezone = -8
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#select data (spikes and fire times already rmoved)
SP2_data_query = ('SELECT UNIX_UTC_6h_midtime, meas_mean_mass_conc, meas_rel_err, GC_v10_default, GC_default_rel_err, cluster,cluster_number FROM whi_gc_and_sp2_6h_mass_concs WHERE RH_threshold = 90 ORDER BY UNIX_UTC_6h_midtime')
cursor.execute(SP2_data_query)
raw_data = cursor.fetchall()
SP2_6h_NPac = []
SP2_6h_SPac = []
SP2_6h_Cont = []
SP2_6h_LRT = []
SP2_6h_BB = []
GC2009_BC_concs_d = {}
GC2010_BC_concs_d = {}
GC2012_BC_concs_d = {}
for row in raw_data:
UTC_ts = row[0]
PST_date_time = datetime.utcfromtimestamp(UTC_ts) + timedelta(hours = timezone)
meas_mass_conc = float(row[1])
meas_rel_err = float(row[2])
meas_abs_err = meas_rel_err*meas_mass_conc
GC_mass_conc = row[3]
GC_rel_err = 0#row[4]
GC_abs_err = GC_rel_err*GC_mass_conc
cluster = row[5]
ratio = GC_mass_conc/meas_mass_conc
ratio_abs_err = (meas_rel_err + GC_rel_err)*ratio
cluster_number = row[6]
if cluster == 'NPac':# and cluster_number ==3:
SP2_6h_NPac.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err])
if cluster == 'SPac':
SP2_6h_SPac.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err])
if cluster == 'Cont':
SP2_6h_Cont.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err])
if cluster == 'GBPS':
SP2_6h_SPac.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err])
if cluster == 'LRT':
SP2_6h_LRT.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err])
if cluster == 'BB':# and cluster_number ==3:
SP2_6h_BB.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err])
if PST_date_time.year == 2009:
GC2009_BC_concs_d[PST_date_time]=[GC_mass_conc,GC_abs_err]
if PST_date_time.year == 2010:
GC2010_BC_concs_d[PST_date_time]=[GC_mass_conc,GC_abs_err]
if PST_date_time.year == 2012:
GC2012_BC_concs_d[PST_date_time]=[GC_mass_conc,GC_abs_err]
for dict in [GC2009_BC_concs_d,GC2010_BC_concs_d,GC2012_BC_concs_d]:
if dict == GC2009_BC_concs_d:
working_date = datetime.strptime('20090628', '%Y%m%d')
end_date = datetime.strptime('20090816', '%Y%m%d')
if dict == GC2010_BC_concs_d:
working_date = datetime.strptime('20100610', '%Y%m%d')
end_date = datetime.strptime('20100726', '%Y%m%d')
if dict == GC2012_BC_concs_d:
working_date = datetime.strptime('20120405', '%Y%m%d')
end_date = datetime.strptime('20120531', '%Y%m%d')
while working_date <= end_date:
date5 = datetime(working_date.year, working_date.month, working_date.day, 5)
date23 = datetime(working_date.year, working_date.month, working_date.day, 23)
if date5 not in dict:
dict[date5] = [np.nan,np.nan]
if date23 not in dict:
dict[date23] = [np.nan,np.nan]
working_date = working_date + timedelta(days=1)
GC2009_BC_concs = []
GC2010_BC_concs = []
GC2012_BC_concs = []
for date, mass_data in GC2009_BC_concs_d.iteritems():
mass_conc = mass_data[0]
neg_yerr = mass_data[1]
GC2009_BC_concs.append([date,mass_conc, neg_yerr])
for date, mass_data in GC2010_BC_concs_d.iteritems():
mass_conc = mass_data[0]
neg_yerr = mass_data[1]
GC2010_BC_concs.append([date,mass_conc, neg_yerr])
for date, mass_data in GC2012_BC_concs_d.iteritems():
mass_conc = mass_data[0]
neg_yerr = mass_data[1]
GC2012_BC_concs.append([date,mass_conc, neg_yerr])
GC2009_BC_concs.sort()
GC2010_BC_concs.sort()
GC2012_BC_concs.sort()
####################plotting
SP2_6h_NPac_date = [dates.date2num(row[0]) for row in SP2_6h_NPac]
SP2_6h_NPac_mass_conc = [row[1] for row in SP2_6h_NPac]
SP2_6h_NPac_abs_err = [row[2] for row in SP2_6h_NPac]
SP2_6h_SPac_date = [dates.date2num(row[0]) for row in SP2_6h_SPac]
SP2_6h_SPac_mass_conc = [row[1] for row in SP2_6h_SPac]
SP2_6h_SPac_abs_err = [row[2] for row in SP2_6h_SPac]
SP2_6h_Cont_date = [dates.date2num(row[0]) for row in SP2_6h_Cont]
SP2_6h_Cont_mass_conc = [row[1] for row in SP2_6h_Cont]
SP2_6h_Cont_abs_err = [row[2] for row in SP2_6h_Cont]
SP2_6h_LRT_date = [dates.date2num(row[0]) for row in SP2_6h_LRT]
SP2_6h_LRT_mass_conc = [row[1] for row in SP2_6h_LRT]
SP2_6h_LRT_abs_err = [row[2] for row in SP2_6h_LRT]
SP2_6h_BB_date = [dates.date2num(row[0]) for row in SP2_6h_BB]
SP2_6h_BB_mass_conc = [row[1] for row in SP2_6h_BB]
SP2_6h_BB_abs_err = [row[2] for row in SP2_6h_BB]
ratio_dates_NPac = [dates.date2num(row[0]) for row in SP2_6h_NPac]
ratio_mass_conc_NPac = [row[3] for row in SP2_6h_NPac]
ratio_err_NPac = [row[4] for row in SP2_6h_NPac]
ratio_dates_SPac = [dates.date2num(row[0]) for row in SP2_6h_SPac]
ratio_mass_conc_SPac = [row[3] for row in SP2_6h_SPac]
ratio_err_SPac = [row[4] for row in SP2_6h_SPac]
ratio_dates_Cont = [dates.date2num(row[0]) for row in SP2_6h_Cont]
ratio_mass_conc_Cont = [row[3] for row in SP2_6h_Cont]
ratio_err_Cont = [row[4] for row in SP2_6h_Cont]
ratio_dates_LRT = [dates.date2num(row[0]) for row in SP2_6h_LRT]
ratio_mass_conc_LRT = [row[3] for row in SP2_6h_LRT]
ratio_err_LRT = [row[4] for row in SP2_6h_LRT]
ratio_dates_BB = [dates.date2num(row[0]) for row in SP2_6h_BB]
ratio_mass_conc_BB = [row[3] for row in SP2_6h_BB]
ratio_err_BB = [row[4] for row in SP2_6h_BB]
newlist = []
ratio_dates_all = ratio_dates_NPac+ratio_dates_SPac+ratio_dates_Cont+ratio_dates_LRT
ratio_mass_conc_all = ratio_mass_conc_NPac+ratio_mass_conc_SPac+ratio_mass_conc_Cont+ratio_mass_conc_LRT
i = 0
for date in ratio_dates_all:
newlist.append([date,ratio_mass_conc_all[i]])
i+=1
newlist.sort()
all_dates = [row[0] for row in newlist ]
all_masses = [row[1] for row in newlist]
GC_6h_2009_date = [dates.date2num(row[0]) for row in GC2009_BC_concs]
GC_6h_2009_mass_conc = [row[1] for row in GC2009_BC_concs]
GC_6h_2009_neg_err = [row[2] for row in GC2009_BC_concs]
GC_6h_2009_pos_err = [row[2] for row in GC2009_BC_concs]
GC_6h_2010_date = [dates.date2num(row[0]) for row in GC2010_BC_concs]
GC_6h_2010_mass_conc = [row[1] for row in GC2010_BC_concs]
GC_6h_2010_neg_err = [row[2] for row in GC2010_BC_concs]
GC_6h_2010_pos_err = [row[2] for row in GC2010_BC_concs]
GC_6h_2012_date = [dates.date2num(row[0]) for row in GC2012_BC_concs]
GC_6h_2012_mass_conc = [row[1] for row in GC2012_BC_concs]
GC_6h_2012_neg_err = [row[2] for row in GC2012_BC_concs]
GC_6h_2012_pos_err = [row[2] for row in GC2012_BC_concs]
#fire times for plotting shaded areas
fire_span2_09s=datetime.strptime('2009/07/27', '%Y/%m/%d') #dates follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011
fire_span2_09f=datetime.strptime('2009/08/08', '%Y/%m/%d')
fire_span1_10s=datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M') #jason's BC clear report
fire_span1_10f=datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')
fire_alpha = 0.15
fire_color = '#990000'
###################plotting#####################
fig = plt.figure(figsize=(12,12))
hfmt = dates.DateFormatter('%b')
#hfmt = dates.DateFormatter('%m-%d')
display_month_interval = 1
max_display_conc = 301
startdate_2009 = '2009/06/25'
enddate_2009 = '2009/08/20'
startdate_2010 = '2010/06/05'
enddate_2010 = '2010/08/04'
startdate_2012 = '2012/03/29'
enddate_2012 = '2012/06/05'
ax7 = plt.subplot2grid((6,3), (0,0), colspan=1,rowspan = 2)
ax8 = plt.subplot2grid((6,3), (0,1), colspan=1,rowspan = 2)
ax9 = plt.subplot2grid((6,3), (0,2), colspan=1,rowspan = 2)
ax13 = plt.subplot2grid((6,3), (2,0), colspan=1,rowspan = 2)
ax14 = plt.subplot2grid((6,3), (2,1), colspan=1,rowspan = 2)
ax15 = plt.subplot2grid((6,3), (2,2), colspan=1,rowspan = 2)
ax10 = plt.subplot2grid((6,3), (4,0), colspan=1,rowspan = 2)
ax11 = plt.subplot2grid((6,3), (4,1), colspan=1,rowspan = 2, sharey=ax10)
ax12 = plt.subplot2grid((6,3), (4,2), colspan=1,rowspan = 2, sharey=ax10)
#combo
ax7.plot(GC_6h_2009_date,GC_6h_2009_mass_conc,color = 'darkgrey', alpha = 1, marker = 'o', markeredgecolor='darkgrey')
ax7.errorbar(SP2_6h_NPac_date,SP2_6h_NPac_mass_conc,yerr = SP2_6h_NPac_abs_err, color='cyan', alpha = 1, fmt = '*')
ax7.errorbar(SP2_6h_SPac_date,SP2_6h_SPac_mass_conc,yerr = SP2_6h_SPac_abs_err, color='green', alpha = 1, fmt = 'o')
ax7.errorbar(SP2_6h_Cont_date,SP2_6h_Cont_mass_conc,yerr = SP2_6h_Cont_abs_err, color='magenta', alpha = 1, fmt = '>')
ax7.errorbar(SP2_6h_LRT_date,SP2_6h_LRT_mass_conc,yerr = SP2_6h_LRT_abs_err, color='blue', alpha = 1, fmt = 's')
ax7.errorbar(SP2_6h_BB_date,SP2_6h_BB_mass_conc,yerr = SP2_6h_BB_abs_err, color='grey', alpha = 1, fmt = '<')
ax7.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax7.xaxis.set_visible(False)
ax7.yaxis.set_visible(True)
ax7.set_ylabel('rBC mass concentration (ng/m3 - STP)')
ax7.set_ylim(0, 700)
ax7.set_xlim(dates.date2num(datetime.strptime(startdate_2009, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2009, '%Y/%m/%d')))
ax7.axvspan(dates.date2num(fire_span2_09s),dates.date2num(fire_span2_09f), facecolor=fire_color, alpha=fire_alpha)
ax7.text(0.1, 0.9,'2009', transform=ax7.transAxes)
ax8.plot(GC_6h_2010_date,GC_6h_2010_mass_conc, color = 'darkgrey', alpha = 1, marker = 'o', markeredgecolor='darkgrey')
ax8.errorbar(SP2_6h_NPac_date,SP2_6h_NPac_mass_conc,yerr = SP2_6h_NPac_abs_err, color='cyan', alpha = 1, fmt = '*', label = 'N. Pacific')
ax8.errorbar(SP2_6h_SPac_date,SP2_6h_SPac_mass_conc,yerr = SP2_6h_SPac_abs_err, color='green', alpha = 1, fmt = 'o', label = 'S. Pacific')
ax8.errorbar(SP2_6h_Cont_date,SP2_6h_Cont_mass_conc,yerr = SP2_6h_Cont_abs_err, color='magenta', alpha = 1, fmt = '>', label = 'N. Canada')
ax8.errorbar(SP2_6h_LRT_date,SP2_6h_LRT_mass_conc,yerr = SP2_6h_LRT_abs_err, color='blue', alpha = 1, fmt = 's', label = 'W. Pacific/Asia')
ax8.errorbar(SP2_6h_BB_date,SP2_6h_BB_mass_conc,yerr = SP2_6h_BB_abs_err, color='grey', alpha = 1, fmt = '<', label = 'local BB')
ax8.xaxis.set_major_formatter(hfmt)
ax8.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax8.xaxis.set_visible(False)
ax8.yaxis.set_visible(True)
ax8.yaxis.set_ticks(np.arange(0, max_display_conc, 100))
ax8.set_yticklabels([])
ax8.set_xlabel('month')
ax8.set_ylim(0, max_display_conc)
ax8.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax8.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax8.text(0.1, 0.9,'2010', transform=ax8.transAxes)
ax9.plot(GC_6h_2012_date,GC_6h_2012_mass_conc, color = 'darkgrey', alpha = 1, marker = 'o', markeredgecolor='darkgrey')
ax9.errorbar(SP2_6h_NPac_date,SP2_6h_NPac_mass_conc,yerr = SP2_6h_NPac_abs_err, color='cyan', alpha = 1, fmt = '*', label = 'NPac')
ax9.errorbar(SP2_6h_SPac_date,SP2_6h_SPac_mass_conc,yerr = SP2_6h_SPac_abs_err, color='green', alpha = 1, fmt = 'o', label = 'SPac')
ax9.errorbar(SP2_6h_Cont_date,SP2_6h_Cont_mass_conc,yerr = SP2_6h_Cont_abs_err, color='magenta', alpha = 1, fmt = '>', label = 'Cont')
ax9.errorbar(SP2_6h_LRT_date,SP2_6h_LRT_mass_conc,yerr = SP2_6h_LRT_abs_err, color='blue', alpha = 1, fmt = 's', label = 'LRT')
ax9.errorbar(SP2_6h_BB_date,SP2_6h_BB_mass_conc,yerr = SP2_6h_BB_abs_err, color='grey', alpha = 1, fmt = 's', label = 'BB')
ax9.xaxis.set_major_formatter(hfmt)
ax9.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax9.xaxis.set_visible(False)
ax9.yaxis.set_visible(True)
ax9.yaxis.set_ticks(np.arange(0, max_display_conc, 100))
ax9.yaxis.tick_right()
ax9.set_ylim(0, max_display_conc)
ax9.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
ax9.text(0.1, 0.9,'2012', transform=ax9.transAxes)
legend = ax8.legend(loc='upper center', bbox_to_anchor=(0.5, 1.275), ncol=3, numpoints=1)
#ratios
ax10.errorbar(ratio_dates_SPac,ratio_mass_conc_SPac,yerr = ratio_err_SPac, color='green', alpha = 1, fmt = 'o')
ax10.errorbar(ratio_dates_NPac,ratio_mass_conc_NPac,yerr = ratio_err_NPac, color='cyan', alpha = 1, fmt = '*')
ax10.errorbar(ratio_dates_Cont,ratio_mass_conc_Cont,yerr = ratio_err_Cont, color='magenta', alpha = 1, fmt = '>')
ax10.errorbar(ratio_dates_LRT,ratio_mass_conc_LRT,yerr = ratio_err_LRT, color='blue', alpha = 1, fmt = 's')
ax10.errorbar(ratio_dates_BB,ratio_mass_conc_BB,yerr = ratio_err_BB, color='grey', alpha = 1, fmt = '<')
#ax10.plot(all_dates,all_masses,color='grey')
ax10.xaxis.set_major_formatter(hfmt)
ax10.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax10.xaxis.set_minor_locator(dates.DayLocator(interval = 2))
ax10.xaxis.set_visible(True)
ax10.yaxis.set_visible(True)
ax10.set_ylabel('GEOS-Chem/Measurements')
#ax10.set_ylim(0, 70)
ax10.set_xlim(dates.date2num(datetime.strptime(startdate_2009, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2009, '%Y/%m/%d')))
ax10.axhline(y=1,color = 'grey', linestyle = '--')
ax10.axvspan(dates.date2num(fire_span2_09s),dates.date2num(fire_span2_09f), facecolor=fire_color, alpha=fire_alpha)
ax10.set_yscale('log')
ax11.errorbar(ratio_dates_SPac,ratio_mass_conc_SPac,yerr = ratio_err_SPac, color='green', alpha = 1, fmt = 'o')
ax11.errorbar(ratio_dates_NPac,ratio_mass_conc_NPac,yerr = ratio_err_NPac, color='cyan', alpha = 1, fmt = '*')
ax11.errorbar(ratio_dates_Cont,ratio_mass_conc_Cont,yerr = ratio_err_Cont, color='magenta', alpha = 1, fmt = '>')
ax11.errorbar(ratio_dates_LRT,ratio_mass_conc_LRT,yerr = ratio_err_LRT, color='blue', alpha = 1, fmt = 's')
ax11.errorbar(ratio_dates_BB,ratio_mass_conc_BB,yerr = ratio_err_BB, color='grey', alpha = 1, fmt = '<')
#ax11.plot(all_dates,all_masses,color='grey')
ax11.xaxis.set_major_formatter(hfmt)
ax11.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax11.xaxis.set_minor_locator(dates.DayLocator(interval = 2))
ax11.xaxis.set_visible(True)
ax11.yaxis.set_visible(False)
ax11.set_xlabel('month')
ax11.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax11.axhline(y=1,color = 'grey', linestyle = '--')
ax11.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax11.set_yscale('log')
ax12.errorbar(ratio_dates_SPac,ratio_mass_conc_SPac,yerr = ratio_err_SPac, color='green', alpha = 1, fmt = 'o')
ax12.errorbar(ratio_dates_NPac,ratio_mass_conc_NPac,yerr = ratio_err_NPac, color='cyan', alpha = 1, fmt = '*')
ax12.errorbar(ratio_dates_Cont,ratio_mass_conc_Cont,yerr = ratio_err_Cont, color='magenta', alpha = 1, fmt = '>')
ax12.errorbar(ratio_dates_LRT,ratio_mass_conc_LRT,yerr = ratio_err_LRT, color='blue', alpha = 1, fmt = 's')
ax12.errorbar(ratio_dates_BB,ratio_mass_conc_BB,yerr = ratio_err_BB, color='grey', alpha = 1, fmt = '<')
#ax12.plot(all_dates,all_masses,color='grey')
ax12.xaxis.set_major_formatter(hfmt)
ax12.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax12.xaxis.set_minor_locator(dates.DayLocator(interval = 2))
ax12.xaxis.set_visible(True)
ax12.yaxis.set_visible(True)
ax12.yaxis.tick_right()
#ax12.spines['top'].set_visible(False)
#ax12.xaxis.tick_bottom()
ax12.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
ax12.axhline(y=1,color = 'grey', linestyle = '--')
ax12.set_yscale('log')
#legend = ax12.legend(loc='upper right', shadow=False)
plt.subplots_adjust(hspace=0.08)
plt.subplots_adjust(wspace=0.05)
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/')
plt.savefig('timeseries - FT only GEOS-Chem v10 v measurements - db - default 6h - RH90 - three row.png', bbox_extra_artists=(legend,), bbox_inches='tight',dpi=600)
plt.show()
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os.path
from robotide.lib.robot.errors import DataError
from robotide.lib.robot.utils import (get_error_details, is_string, is_list_like,
is_dict_like, split_args_from_name_or_path,
type_name, Importer)
from .loggerhelper import AbstractLoggerProxy
from .logger import LOGGER
class _RecursionAvoidingMetaclass(type):
"""Metaclass to wrap listener methods so that they cannot cause recursion.
Recursion would otherwise happen if one listener logs something and that
message is received and logged again by log_message or message method.
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
if not attr.startswith('_') and inspect.isroutine(value):
dct[attr] = cls._wrap_listener_method(value)
dct['_calling_method'] = False
return type.__new__(cls, name, bases, dct)
@staticmethod
def _wrap_listener_method(method):
def wrapped(self, *args):
if not self._calling_method:
self._calling_method = True
method(self, *args)
self._calling_method = False
return wrapped
class Listeners(object):
__metaclass__ = _RecursionAvoidingMetaclass
_start_attrs = ('id', 'doc', 'starttime', 'longname')
_end_attrs = _start_attrs + ('endtime', 'elapsedtime', 'status', 'message')
_kw_extra_attrs = ('args', 'assign', 'kwname', 'libname',
'-id', '-longname', '-message')
def __init__(self, listeners):
self._listeners = self._import_listeners(listeners)
self._running_test = False
self._setup_or_teardown_type = None
def __nonzero__(self):
return bool(self._listeners)
def _import_listeners(self, listener_data):
listeners = []
for listener in listener_data:
try:
listeners.append(ListenerProxy(listener))
except DataError as err:
if not is_string(listener):
listener = type_name(listener)
LOGGER.error("Taking listener '%s' into use failed: %s"
% (listener, unicode(err)))
return listeners
def start_suite(self, suite):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_suite, suite.name, suite.doc)
else:
attrs = self._get_start_attrs(suite, 'metadata')
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.start_suite, suite.name, attrs)
def _get_suite_attrs(self, suite):
return {
'tests' : [t.name for t in suite.tests],
'suites': [s.name for s in suite.suites],
'totaltests': suite.test_count,
'source': suite.source or ''
}
def end_suite(self, suite):
for listener in self._listeners:
self._notify_end_suite(listener, suite)
def _notify_end_suite(self, listener, suite):
if listener.version == 1:
listener.call_method(listener.end_suite, suite.status,
suite.full_message)
else:
attrs = self._get_end_attrs(suite, 'metadata')
attrs['statistics'] = suite.stat_message
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.end_suite, suite.name, attrs)
def start_test(self, test):
self._running_test = True
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_test, test.name, test.doc,
list(test.tags))
else:
attrs = self._get_start_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.start_test, test.name, attrs)
def end_test(self, test):
self._running_test = False
for listener in self._listeners:
self._notify_end_test(listener, test)
def _notify_end_test(self, listener, test):
if listener.version == 1:
listener.call_method(listener.end_test, test.status, test.message)
else:
attrs = self._get_end_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.end_test, test.name, attrs)
def start_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_keyword, kw.name, kw.args)
else:
attrs = self._get_start_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=True)
listener.call_method(listener.start_keyword, kw.name, attrs)
def end_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.end_keyword, kw.status)
else:
attrs = self._get_end_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=False)
listener.call_method(listener.end_keyword, kw.name, attrs)
def _get_keyword_type(self, kw, start=True):
# When running setup or teardown, only the top level keyword has type
# set to setup/teardown but we want to pass that type also to all
# start/end_keyword listener methods called below that keyword.
if kw.type == 'kw':
return self._setup_or_teardown_type or 'Keyword'
kw_type = self._get_setup_or_teardown_type(kw)
self._setup_or_teardown_type = kw_type if start else None
return kw_type
def _get_setup_or_teardown_type(self, kw):
return '%s %s' % (('Test' if self._running_test else 'Suite'),
kw.type.title())
def imported(self, import_type, name, attrs):
for listener in self._listeners:
method = getattr(listener, '%s_import' % import_type.lower())
listener.call_method(method, name, attrs)
def log_message(self, msg):
for listener in self._listeners:
if listener.version == 2:
listener.call_method(listener.log_message, self._create_msg_dict(msg))
def message(self, msg):
for listener in self._listeners:
if listener.version == 2:
listener.call_method(listener.message, self._create_msg_dict(msg))
def _create_msg_dict(self, msg):
return {'timestamp': msg.timestamp, 'message': msg.message,
'level': msg.level, 'html': 'yes' if msg.html else 'no'}
def output_file(self, file_type, path):
for listener in self._listeners:
method = getattr(listener, '%s_file' % file_type.lower())
listener.call_method(method, path)
def close(self):
for listener in self._listeners:
listener.call_method(listener.close)
def _get_start_attrs(self, item, *extra):
return self._get_attrs(item, self._start_attrs, extra)
def _get_end_attrs(self, item, *extra):
return self._get_attrs(item, self._end_attrs, extra)
def _get_attrs(self, item, default, extra):
names = self._get_attr_names(default, extra)
return dict((n, self._get_attr_value(item, n)) for n in names)
def _get_attr_names(self, default, extra):
names = list(default)
for name in extra:
if not name.startswith('-'):
names.append(name)
elif name[1:] in names:
names.remove(name[1:])
return names
def _get_attr_value(self, item, name):
value = getattr(item, name)
return self._take_copy_of_mutable_value(value)
def _take_copy_of_mutable_value(self, value):
if is_dict_like(value):
return dict(value)
if is_list_like(value):
return list(value)
return value
class ListenerProxy(AbstractLoggerProxy):
_methods = ['start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'output_file', 'report_file', 'log_file', 'debug_file',
'xunit_file', 'close', 'library_import', 'resource_import',
'variables_import']
def __init__(self, listener):
if is_string(listener):
name, args = split_args_from_name_or_path(listener)
listener = self._import_listener(name, args)
else:
name = type_name(listener)
AbstractLoggerProxy.__init__(self, listener)
self.name = name
self.version = self._get_version(listener)
if self.version == 1:
LOGGER.warn("Listener '%s' uses deprecated API version 1. "
"Switch to API version 2 instead." % self.name)
def _import_listener(self, name, args):
importer = Importer('listener')
return importer.import_class_or_module(os.path.normpath(name),
instantiate_with_args=args)
def _get_version(self, listener):
try:
return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1))
except ValueError:
return 1
def call_method(self, method, *args):
try:
method(*args)
except:
message, details = get_error_details()
LOGGER.error("Calling listener method '%s' of listener '%s' "
"failed: %s" % (method.__name__, self.name, message))
LOGGER.info("Details:\n%s" % details)
|
|
"""
This module implements the base model class. All model things inherit from this class.
"""
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
import h2o
import imp, traceback
from ..utils.shared_utils import can_use_pandas
class ModelBase(object):
def __init__(self):
self._id = None
self._model_json = None
self._metrics_class = None
self._is_xvalidated = False
self._xval_keys = None
self._parms = {} # internal, for object recycle
self.parms = {} # external
self._estimator_type = "unsupervised"
self._future = False # used by __repr__/show to query job state
self._job = None # used when _future is True
@property
def model_id(self):
"""
:return: Retrieve this model's identifier.
"""
return self._id
@model_id.setter
def model_id(self, value):
oldname = self.model_id
self._id = value
h2o.rapids("(rename \"{}\" \"{}\")".format(oldname, value))
@property
def params(self):
"""
Get the parameters and the actual/default values only.
:return: A dictionary of parameters used to build this model.
"""
params = {}
for p in self.parms:
params[p] = {"default":self.parms[p]["default_value"], "actual":self.parms[p]["actual_value"]}
return params
@property
def full_parameters(self):
"""
Get the full specification of all parameters.
:return: a dictionary of parameters used to build this model.
"""
return self.parms
@property
def type(self):
"""Get the type of model built as a string.
Returns
-------
"classifier" or "regressor" or "unsupervised"
"""
return self._estimator_type
def __repr__(self):
# PUBDEV-2278: using <method>? from IPython caused everything to dump
stk = traceback.extract_stack()
if not ("IPython" in stk[-2][0] and "info" == stk[-2][2]):
self.show()
return ""
def predict(self, test_data):
"""
Predict on a dataset.
Parameters
----------
test_data: H2OFrame
Data on which to make predictions.
Returns
-------
A new H2OFrame of predictions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = h2o.H2OConnection.post_json("Predictions/models/" + self.model_id + "/frames/" + test_data.frame_id)
# prediction_frame_id = j["predictions_frame"] #j["model_metrics"][0]["predictions"]["frame_id"]["name"]
return h2o.get_frame(j["predictions_frame"]["name"])
def is_cross_validated(self):
"""
:return: True if the model was cross-validated.
"""
return self._is_xvalidated
def xval_keys(self):
"""
:return: The model keys for the cross-validated model.
"""
return self._xval_keys
def get_xval_models(self,key=None):
"""
Return a Model object.
:param key: If None, return all cross-validated models; otherwise return the model that key points to.
:return: A model or list of models.
"""
return h2o.get_model(key) if key is not None else [h2o.get_model(k) for k in self._xval_keys]
@property
def xvals(self):
"""
Return a list of the cross-validated models.
:return: A list of models
"""
return self.get_xval_models()
def deepfeatures(self, test_data, layer):
"""
Return hidden layer details
:param test_data: Data to create a feature space on
:param layer: 0 index hidden layer
"""
if test_data is None: raise ValueError("Must specify test data")
j = h2o.H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data.frame_id, deep_features_hidden_layer=layer)
return h2o.get_frame(j["predictions_frame"]["name"])
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:return: an H2OFrame which represents the weight matrix identified by matrix_id
"""
num_weight_matrices = len(self._model_json['output']['weights'])
if matrix_id not in list(range(num_weight_matrices)):
raise ValueError("Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} "
"was requested.".format(num_weight_matrices, matrix_id))
return h2o.get_frame(self._model_json['output']['weights'][matrix_id]['URL'].split('/')[3])
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:return: an H2OFrame which represents the bias vector identified by vector_id
"""
num_bias_vectors = len(self._model_json['output']['biases'])
if vector_id not in list(range(num_bias_vectors)):
raise ValueError("Bias vector does not exist. Model has {0} bias vectors (0-based indexing), but vector {1} "
"was requested.".format(num_bias_vectors, vector_id))
return h2o.get_frame(self._model_json['output']['biases'][vector_id]['URL'].split('/')[3])
def normmul(self):
"""
Normalization/Standardization multipliers for numeric predictors
"""
return self._model_json['output']['normmul']
def normsub(self):
"""
Normalization/Standardization offsets for numeric predictors
"""
return self._model_json['output']['normsub']
def respmul(self):
"""
Normalization/Standardization multipliers for numeric response
"""
return self._model_json['output']['normrespmul']
def respsub(self):
"""
Normalization/Standardization offsets for numeric response
"""
return self._model_json['output']['normrespsub']
def catoffsets(self):
"""
Categorical offsets for one-hot encoding
"""
return self._model_json['output']['catoffsets']
def model_performance(self, test_data=None, train=False, valid=False):
"""
Generate model metrics for this model on test_data.
Parameters
----------
test_data: H2OFrame, optional
Data set for which model metrics shall be computed against. Both train and valid arguments are ignored if test_data is not None.
train: boolean, optional
Report the training metrics for the model. If the test_data is the training data, the training metrics are returned.
valid: boolean, optional
Report the validation metrics for the model. If train and valid are True, then it defaults to True.
Returns
-------
An object of class H2OModelMetrics.
"""
if test_data is None:
if not train and not valid: train = True # default to train
if train: return self._model_json["output"]["training_metrics"]
if valid: return self._model_json["output"]["validation_metrics"]
else: # cases dealing with test_data not None
if not isinstance(test_data, h2o.H2OFrame):
raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data))
res = h2o.H2OConnection.post_json("ModelMetrics/models/" + self.model_id + "/frames/" + test_data.frame_id)
# FIXME need to do the client-side filtering... PUBDEV-874: https://0xdata.atlassian.net/browse/PUBDEV-874
raw_metrics = None
for mm in res["model_metrics"]:
if not mm["frame"] == None and mm["frame"]["name"] == test_data.frame_id:
raw_metrics = mm
break
return self._metrics_class(raw_metrics,algo=self._model_json["algo"])
def score_history(self):
"""
Retrieve Model Score History
Returns
-------
The score history as an H2OTwoDimTable.
"""
model = self._model_json["output"]
if 'scoring_history' in list(model.keys()) and model["scoring_history"] != None:
s = model["scoring_history"]
if can_use_pandas():
import pandas
pandas.options.display.max_rows = 20
return pandas.DataFrame(s.cell_values,columns=s.col_header)
return s
else: print("No score history for this model")
def summary(self):
"""
Print a detailed summary of the model.
"""
model = self._model_json["output"]
if model["model_summary"]:
model["model_summary"].show() # H2OTwoDimTable object
def show(self):
"""
Print innards of model, without regards to type
"""
if self._future:
self._job.poll_once()
return
if self._model_json is None:
print("No model trained yet")
return
if self.model_id is None:
print("This H2OEstimator has been removed.")
return
model = self._model_json["output"]
print("Model Details")
print("=============")
print(self.__class__.__name__, ": ", self._model_json["algo_full_name"])
print("Model Key: ", self._id)
self.summary()
print()
# training metrics
tm = model["training_metrics"]
if tm: tm.show()
vm = model["validation_metrics"]
if vm: vm.show()
xm = model["cross_validation_metrics"]
if xm: xm.show()
if "scoring_history" in list(model.keys()) and model["scoring_history"]: model["scoring_history"].show()
if "variable_importances" in list(model.keys()) and model["variable_importances"]: model["variable_importances"].show()
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list
Parameters
----------
use_pandas: boolean, optional
If True, then the variable importances will be returned as a pandas data frame.
Returns
-------
A list or Pandas DataFrame.
"""
model = self._model_json["output"]
if "variable_importances" in list(model.keys()) and model["variable_importances"]:
vals = model["variable_importances"].cell_values
header=model["variable_importances"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have variable importances")
def residual_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param train: Get the residual deviance for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_deviance() if train else self._model_json["output"]["validation_metrics"].residual_deviance()
def residual_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the residual dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].residual_degrees_of_freedom()
def null_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param: train Get the null deviance for the training set. If both train and valid are False, then train is selected by default.
:param: valid Get the null deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_deviance() if train else self._model_json["output"]["validation_metrics"].null_deviance()
def null_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the null dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the null dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].null_degrees_of_freedom()
def pprint_coef(self):
"""
Pretty print the coefficents table (includes normalized coefficients)
"""
print(self._model_json["output"]["coefficients_table"]) # will return None if no coefs!
def coef(self):
"""
:return: Return the coefficients for this model.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[1] for a in tbl}
def coef_norm(self):
"""
:return: Return the normalized coefficients
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[2] for a in tbl}
def r2(self, train=False, valid=False, xval=False):
"""
Return the R^2 for this regression model.
The R^2 value is defined to be 1 - MSE/var,
where var is computed as sigma*sigma.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the R^2 value for the training data.
:param valid: If valid is True, then return the R^2 value for the validation data.
:param xval: If xval is True, then return the R^2 value for the cross validation data.
:return: The R^2 for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.r2()
return list(m.values())[0] if len(m) == 1 else m
def mse(self, train=False, valid=False, xval=False):
"""
Get the MSE(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
Parameters
----------
train : bool, default=True
If train is True, then return the MSE value for the training data.
valid : bool, default=True
If valid is True, then return the MSE value for the validation data.
xval : bool, default=True
If xval is True, then return the MSE value for the cross validation data.
Returns
-------
The MSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.mse()
return list(m.values())[0] if len(m) == 1 else m
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Log Loss value for the training data.
:param valid: If valid is True, then return the Log Loss value for the validation data.
:param xval: If xval is True, then return the Log Loss value for the cross validation data.
:return: The Log Loss for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.logloss()
return list(m.values())[0] if len(m) == 1 else m
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Mean Residual Deviance value for the training data.
:param valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:return: The Mean Residual Deviance for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.mean_residual_deviance()
return list(m.values())[0] if len(m) == 1 else m
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AUC value for the training data.
:param valid: If valid is True, then return the AUC value for the validation data.
:param xval: If xval is True, then return the AUC value for the validation data.
:return: The AUC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.auc()
return list(m.values())[0] if len(m) == 1 else m
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AIC value for the training data.
:param valid: If valid is True, then return the AIC value for the validation data.
:param xval: If xval is True, then return the AIC value for the validation data.
:return: The AIC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.aic()
return list(m.values())[0] if len(m) == 1 else m
def giniCoef(self, train=False, valid=False, xval=False):
"""
Get the Gini Coefficient(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Gini Coefficient value for the training data.
:param valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:return: The Gini Coefficient for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.giniCoef()
return list(m.values())[0] if len(m) == 1 else m
def download_pojo(self,path=""):
"""
Download the POJO for this model to the directory specified by path (no trailing slash!).
If path is "", then dump to screen.
:param model: Retrieve this model's scoring POJO.
:param path: An absolute path to the directory where POJO should be saved.
:return: None
"""
h2o.download_pojo(self,path) # call the "package" function
@staticmethod
def _get_metrics(o, train, valid, xval):
metrics = {}
if train: metrics["train"] = o._model_json["output"]["training_metrics"]
if valid: metrics["valid"] = o._model_json["output"]["validation_metrics"]
if xval : metrics["xval"] = o._model_json["output"]["cross_validation_metrics"]
if len(metrics) == 0: metrics["train"] = o._model_json["output"]["training_metrics"]
return metrics
# Delete from cluster as model goes out of scope
# def __del__(self):
# h2o.remove(self._id)
def _plot(self, timestep, metric, **kwargs):
# check for matplotlib. exit if absent
try:
imp.find_module('matplotlib')
import matplotlib
if 'server' in list(kwargs.keys()) and kwargs['server']: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
scoring_history = self.score_history()
# Separate functionality for GLM since its output is different from other algos
if self._model_json["algo"] == "glm":
# GLM has only one timestep option, which is `iteration`
timestep = "iteration"
if metric == "AUTO": metric = "log_likelihood"
elif metric not in ("log_likelihood", "objective"):
raise ValueError("for GLM, metric must be one of: log_likelihood, objective")
plt.xlabel(timestep)
plt.ylabel(metric)
plt.title("Validation Scoring History")
plt.plot(scoring_history[timestep], scoring_history[metric])
elif self._model_json["algo"] in ("deeplearning", "drf", "gbm"):
# Set timestep
if self._model_json["algo"] in ("gbm", "drf"):
if timestep == "AUTO": timestep = "number_of_trees"
elif timestep not in ("duration","number_of_trees"):
raise ValueError("timestep for gbm or drf must be one of: duration, number_of_trees")
else: #self._model_json["algo"] == "deeplearning":
# Delete first row of DL scoring history since it contains NAs & NaNs
if scoring_history["samples"][0] == 0:
scoring_history = scoring_history[1:]
if timestep == "AUTO": timestep = "epochs"
elif timestep not in ("epochs","samples","duration"):
raise ValueError("timestep for deeplearning must be one of: epochs, samples, duration")
training_metric = "training_{}".format(metric)
validation_metric = "validation_{}".format(metric)
if timestep == "duration":
dur_colname = "duration_{}".format(scoring_history["duration"][1].split()[1])
scoring_history[dur_colname] = [str(x).split()[0] for x in scoring_history["duration"]]
timestep = dur_colname
if can_use_pandas():
valid = validation_metric in list(scoring_history)
ylim = (scoring_history[[training_metric, validation_metric]].min().min(), scoring_history[[training_metric, validation_metric]].max().max()) if valid \
else (scoring_history[training_metric].min(), scoring_history[training_metric].max())
else:
valid = validation_metric in scoring_history.col_header
ylim = (min(min(scoring_history[[training_metric, validation_metric]])), max(max(scoring_history[[training_metric, validation_metric]]))) if valid \
else (min(scoring_history[training_metric]), max(scoring_history[training_metric]))
if ylim[0] == ylim[1]: ylim = (0,1)
if valid: # Training and validation scoring history
plt.xlabel(timestep)
plt.ylabel(metric)
plt.title("Scoring History")
plt.ylim(ylim)
plt.plot(scoring_history[timestep], scoring_history[training_metric], label="Training")
plt.plot(scoring_history[timestep], scoring_history[validation_metric], color="orange", label="Validation")
plt.legend()
else: # Training scoring history only
plt.xlabel(timestep)
plt.ylabel(training_metric)
plt.title("Training Scoring History")
plt.ylim(ylim)
plt.plot(scoring_history[timestep], scoring_history[training_metric])
else: # algo is not glm, deeplearning, drf, gbm
raise ValueError("Plotting not implemented for this type of model")
if "server" not in list(kwargs.keys()) or not kwargs["server"]: plt.show()
@staticmethod
def _check_targets(y_actual, y_predicted):
"""Check that y_actual and y_predicted have the same length.
:param y_actual: An H2OFrame
:param y_predicted: An H2OFrame
:return: None
"""
if len(y_actual) != len(y_predicted):
raise ValueError("Row mismatch: [{},{}]".format(len(y_actual),len(y_predicted)))
|
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import codecs
from collections import OrderedDict
import logging
import os.path
from os.path import dirname
from os.path import join
import re
from lxml import etree
from pymaven import pom
from pymaven import artifact
from commoncode import filetype
from commoncode import fileutils
from packagedcode import models
from typecode import contenttype
from textcode import analysis
logger = logging.getLogger(__name__)
# import sys
# logging.basicConfig(stream=sys.stdout)
# logger.setLevel(logging.DEBUG)
"""
Support Maven2 POMs.
Attempts to resolve Maven properties when possible.
"""
class MavenPomPackage(models.Package):
metafiles = ('.pom', 'pom.xml',)
extensions = ('.pom', '.xml',)
repo_types = (models.repo_maven,)
type = models.StringType(default='Apache Maven POM')
packaging = models.StringType(default=models.as_archive)
primary_language = models.StringType(default='Java')
@classmethod
def recognize(cls, location):
return parse(location)
class ParentPom(artifact.Artifact):
"""
A minimal Artifact subclass used to store parent poms when no POM file is available for these.
"""
def __init__(self, coordinate):
super(ParentPom, self).__init__(coordinate)
# add empty, pom.Pom-class-like empty attributes
self.client = None
self.dependencies = {}
self.dependency_management = {}
self.parent = None
self.properties = {}
# TODO: ????
# self.pom_data/self._xml = None
def to_dict(self):
"""
Return a mapping representing this POM
"""
return OrderedDict([
('group_id', self.group_id),
('artifact_id', self.artifact_id),
('version', str(self.version) if self.version else None),
('classifier', self.classifier),
('type', self.type),
])
STRIP_NAMESPACE_RE = re.compile(r"<project(.|\s)*?>", re.UNICODE)
class MavenPom(pom.Pom):
def __init__(self, location):
# NOTE: most of this is copied over from Pom.__init__
try:
with codecs.open(location, 'rb', encoding='UTF-8') as fh:
xml = fh.read()
except UnicodeDecodeError as _a:
xml = analysis.unicode_text(location)
xml = xml[xml.find('<project'):]
xml = STRIP_NAMESPACE_RE.sub('<project>', xml, 1)
parser = etree.XMLParser(
recover=True,
remove_comments=True,
remove_pis=True,
remove_blank_text=True, resolve_entities=False
)
self._xml = etree.fromstring(xml, parser=parser)
# FXIME: we do not use a client for now. there are pending issues at pymaven to address this
self._client = None
self.model_version = self._get_attribute('modelVersion')
self.group_id = self._get_attribute('groupId')
self.artifact_id = self._get_attribute('artifactId')
self.version = self._get_attribute('version')
self.classifier = self._get_attribute('classifier')
self.packaging = self._get_attribute('packaging') or 'jar'
self.name = self._get_attribute('name')
self.description = self._get_attribute('description')
self.inception_year = self._get_attribute('inceptionYear')
self.url = self._get_attribute('url')
self.organization_name = self._get_attribute('organization/name')
self.organization_url = self._get_attribute('organization/url')
self.licenses = list(self._find_licenses())
self.developers = list(self._find_parties('developers/developer'))
self.contributors = list(self._find_parties('contributors/contributor'))
self.mailing_lists = list(self._find_mailing_lists())
self.scm = self._find_scm()
self.issue_management = self._find_issue_management()
self.ci_management = self._find_ci_management()
self.distribution_management = self._find_distribution_management()
self.repositories = list(self._find_repositories('repositories/repository'))
self.plugin_repositories = list(self._find_repositories('pluginRepositories/pluginRepository'))
self.modules = self._get_attributes_list('modules/module')
# FIXME: this attribute should be collected with the parent but
# is not retrieved yet by pymaven it points to the relative path
# where to find the full parent POM
self.parent_relative_path = self._get_attribute('relativePath') # or '../pom.xml'
# FIXME: Other types that are not collected for now (or
# indirectly through dependencies management) include: build,
# reporting, profiles, etc
# dynamic attributes
self._parent = None
self._dep_mgmt = None
self._dependencies = None
self._properties = None
def _extra_properties(self):
"""
Return a mapping of extra properties
"""
properties = {}
properties['classifier'] = self.classifier
properties['project.classifier'] = self.classifier
properties['pom.classifier'] = self.classifier
properties['packaging'] = self.packaging
properties['project.packaging'] = self.packaging
properties['pom.packaging'] = self.packaging
properties['organization.name'] = self.organization_name
properties['project.organization.name'] = self.organization_name
properties['pom.organization.name'] = self.organization_name
properties['organization.url'] = self.organization_url
properties['project.organization.url'] = self.organization_url
properties['pom.organization.url'] = self.organization_url
# TODO: collect props defined in a properties file
# see https://maven.apache.org/shared/maven-archiver/#class_archive
# afaik this only applies for POMs stored inside a JAR
return properties
@classmethod
def _replace_props(cls, text, properties):
def subfunc(matchobj):
"""Return the replacement value for a matched property key."""
key = matchobj.group(1)
# does this key contain a substring?
real_key, start_end = _get_substring_expression(key)
if not start_end:
value = properties.get(key)
return value
# apply the substring transform
value = properties.get(real_key)
if not value:
return value
start, end = start_end
return substring(value, start, end)
result = pom.PROPERTY_RE.sub(subfunc, text)
while result and pom.PROPERTY_RE.match(result):
result = pom.PROPERTY_RE.sub(subfunc, result)
if not result:
result = text
return result.strip()
def _replace_properties(self, text, properties=None):
# copied from pymavem.pom.Pom
if properties is None:
properties = self.properties
return MavenPom._replace_props(text, properties)
def resolve(self):
"""
Resolve POM Maven "properties" in attribute values and inherit
from parent. Update the POM attributes in place.
"""
# inherit first to get essential parent properties
self._inherit_from_parent()
# then collect properties + extra
properties = dict(self.properties)
properties.update(self._extra_properties())
# these attributes are plain strings
plain_attributes = [
'group_id',
'version',
'classifier',
'packaging',
'name',
'description',
'inception_year',
'url',
'organization_name',
'organization_url',
]
for attr in plain_attributes:
attr_val = getattr(self, attr, None)
if not attr_val:
continue
resolved = self._replace_properties(attr_val, properties)
setattr(self, attr, resolved)
# these attributes are mappings
mapping_attributes = [
'scm',
'issue_management',
'ci_management',
]
for map_attr in mapping_attributes:
mapping = getattr(self, map_attr, {})
if not mapping:
continue
for key, value in mapping.items():
if not value:
continue
mapping[key] = self._replace_properties(value, properties)
# these attributes are lists of mappings
mappings_list_attributes = [
'repositories',
'plugin_repositories',
]
for lmap_attr in mappings_list_attributes:
lmapping = getattr(self, lmap_attr, [])
if not lmapping:
continue
for mapping in lmapping:
for key, value in mapping.items():
if not value:
continue
mapping[key] = self._replace_properties(value, properties)
# these attributes are complex nested and lists mappings
# TODO: add:
# nest dicts
# 'distribution_management',
# nest lists
# 'mailing_lists',
def _inherit_from_parent(self):
"""
Update attributes using inheritance from parent attributes. For
instance, the parent group_id is used if group_id is not defined.
"""
# TODO: there are more attributes (all) that can be inherited
if not self.parent:
return
if self.group_id is None and self.parent.group_id:
self.group_id = self.parent.group_id
if self.version is None and self.parent.version:
self.version = str(self.parent.version)
if not self.classifier is None and self.parent.classifier:
self.classifier = self.parent.classifier
# special handling for URLs: see
# http://maven.apache.org/ref/3.5.0/maven-model-builder/index.html#Inheritance_Assembly
# Notice that the 5 URLs from the model:
# project.url,
# project.scm.connection, project.scm.developerConnection, project.scm.url
# project.distributionManagement.site.url)
# ... have a special inheritance handling: if not configured in
# current model, the inherited value is the parent's one with
# current artifact id appended.
if (self.url is None
and hasattr(self.parent, 'url')
and getattr(self.parent, 'url', None)
and self.artifact_id):
self.url = self.parent.url + self.artifact_id
parent_scm = getattr(self.parent, 'scm', None)
if self.scm and parent_scm and self.artifact_id:
ps_url = parent_scm.get('url')
if not self.scm.get('url') and ps_url:
self.scm['url'] = ps_url + self.artifact_id
ps_connection = parent_scm.get('connection')
if not self.scm.get('connection') and ps_connection:
self.scm['connection'] = ps_connection + self.artifact_id
ps_devconnection = parent_scm.get('developer_connection')
if not self.scm.get('developer_connection') and ps_devconnection:
self.scm['developer_connection'] = ps_devconnection + self.artifact_id
# TODO: distribution_management.site.url
def _pom_factory(self, group_id, artifact_id, version):
return ParentPom('%s:%s:pom:%s' % (group_id, artifact_id, version))
def _get_attribute(self, xpath, xml=None):
"""Return a single value text attribute for a given xpath or None."""
if xml is None:
xml = self._xml
attr = xml.findtext(xpath)
return attr and attr.strip() or None
def _get_attributes_list(self, xpath, xml=None):
"""Return a list of text attribute values for a given xpath or None."""
if xml is None:
xml = self._xml
attrs = xml.findall(xpath)
attrs = [attr.text for attr in attrs]
return [attr.strip() for attr in attrs if attr and attr.strip()]
def _find_licenses(self):
"""Return an iterable of license mappings."""
for lic in self._xml.findall('licenses/license'):
yield OrderedDict([
('name', self._get_attribute('name', lic)),
('url', self._get_attribute('url', lic)),
('comments', self._get_attribute('comments', lic)),
# arcane and seldom used
('distribution', self._get_attribute('distribution', lic)),
])
def _find_parties(self, key='developers/developer'):
"""Return an iterable of party mappings for a given xpath."""
for party in self._xml.findall(key):
yield OrderedDict([
('id', self._get_attribute('id', party)),
('name', self._get_attribute('name', party)),
('email', self._get_attribute('email', party)),
('url', self._get_attribute('url', party)),
('organization', self._get_attribute('organization', party)),
('organization_url', self._get_attribute('organizationUrl', party)),
('roles', [role.findtext('.') for role in party.findall('roles/role')]),
])
def _find_mailing_lists(self):
"""Return an iterable of mailing lists mappings."""
for ml in self._xml.findall('mailingLists/mailingList'):
archive_url = self._get_attribute('archive', ml)
# TODO: add 'otherArchives/otherArchive' as lists?
yield OrderedDict([
('name', self._get_attribute('name', ml)),
('archive_url', archive_url),
])
def _find_scm(self):
"""Return a version control/scm mapping."""
scm = self._xml.find('scm')
if scm is None:
return {}
return OrderedDict([
('connection', self._get_attribute('connection', scm)),
('developer_connection', self._get_attribute('developer_connection', scm)),
('url', self._get_attribute('url', scm)),
('tag', self._get_attribute('tag', scm)),
])
def _find_issue_management(self):
"""Return an issue management mapping."""
imgt = self._xml.find('issueManagement')
if imgt is None:
return {}
return OrderedDict([
('system', self._get_attribute('system', imgt)),
('url', self._get_attribute('url', imgt)),
])
def _find_ci_management(self):
"""Return a CI mapping."""
cimgt = self._xml.find('ciManagement')
if cimgt is None:
return {}
return OrderedDict([
('system', self._get_attribute('system', cimgt)),
('url', self._get_attribute('url', cimgt)),
])
def _find_repository(self, xpath, xml=None):
"""Return a repository mapping for an xpath."""
if xml is None:
xml = self._xml
repo = xml.find(xpath)
if repo is None:
return {}
return OrderedDict([
('id', self._get_attribute('id', repo)),
('name', self._get_attribute('name', repo)),
('url', self._get_attribute('url', repo)),
])
def _find_distribution_management(self):
"""Return a distribution management mapping."""
dmgt = self._xml.find('distributionManagement')
if dmgt is None:
return {}
return OrderedDict([
('download_url', self._get_attribute('distributionManagement/downloadUrl')),
('site', self._find_repository('distributionManagement/site')),
('repository', self._find_repository('distributionManagement/repository')),
('snapshot_repository', self._find_repository('distributionManagement/snapshotRepository'))
])
def _find_repositories(self, key='repositories/repository'):
"""Return an iterable or repository mappings for an xpath."""
for repo in self._xml.findall(key):
rep = self._find_repository('.', repo)
if rep:
yield rep
def to_dict(self):
"""
Return a mapping representing this POM.
"""
dependencies = OrderedDict()
for scope, deps in self.dependencies.items():
dependencies[scope] = [
OrderedDict([
('group_id', gid),
('artifact_id', aid),
('version', version),
('required', required),
])
for ((gid, aid, version), required) in deps]
return OrderedDict([
('model_version', self.model_version),
('group_id', self.group_id),
('artifact_id', self.artifact_id),
('version', self.version),
('classifier', self.classifier),
('packaging ', self.packaging),
('parent', self.parent.to_dict() if self.parent else {}),
('name', self.name),
('description', self.description),
('inception_year', self.inception_year),
('url', self.url),
('organization_name', self.organization_name),
('organization_url', self.organization_url),
('licenses', self.licenses or []),
('developers', self.developers or []),
('contributors', self.contributors or []),
('modules', self.modules or []),
('mailing_lists', self.mailing_lists),
('scm', self.scm),
('issue_management', self.issue_management),
('ci_management', self.ci_management),
('distribution_management', self.distribution_management),
('repositories', self.repositories),
('plugin_repositories', self.plugin_repositories),
# FIXME: move to proper place in sequeence of attributes
('dependencies', dependencies or {}),
])
def _get_substring_expression(text):
"""
Return a tuple of (text, start/end) such that:
- if there is a substring() expression in text, the returned text
has been stripped from it and start/end is a tuple representing
slice indexes for the substring expression.
- if there is no substring() expression in text, text is returned
as-is and start/end is None.
For example:
>>> assert ('pom.artifactId', (8, None)) == _get_substring_expression('pom.artifactId.substring(8)')
>>> assert ('pom.artifactId', None) == _get_substring_expression('pom.artifactId')
"""
key, _, start_end = text.partition('.substring(')
if not start_end:
return text, None
start_end = start_end.rstrip(')')
start_end = [se.strip() for se in start_end.split(',')]
# we cannot parse less than 1 and more than 2 slice indexes
if len(start_end) not in (1, 2):
return text, None
# we cannot parse slice indexes that are not numbers
if not all(se.isdigit() for se in start_end):
return text, None
start_end = [int(se) for se in start_end]
if len(start_end) == 1:
start = start_end[0]
end = None
else:
start, end = start_end
return key, (start, end)
def substring(s, start, end):
"""
Return a slice of s based on start and end indexes (that can be None).
"""
startless = start is None
endless = end is None
if startless and endless:
return s
if endless:
return s[start:]
if startless:
return s[:end]
return s[start:end]
def is_pom(location):
"""
Return True if the file at location is highly likely to be a POM.
"""
if (not filetype.is_file(location)
or not location.endswith(('.pom', 'pom.xml', 'project.xml',))):
return
T = contenttype.get_type(location)
# logger.debug('location: %(location)r, T: %(T)r)' % locals())
if T.is_text and ('xml' in T.filetype_file.lower()
or 'sgml' in T.filetype_file.lower()
or 'xml' in T.filetype_pygment.lower()
or 'genshi' in T.filetype_pygment.lower()):
# check the POM version in the first 100 lines
with codecs.open(location, encoding='utf-8') as pom:
for n, line in enumerate(pom):
if n > 100:
break
if any(x in line for x in
('http://maven.apache.org/POM/4.0.0', '<modelVersion>',)):
return True
def parse_pom(location, check_is_pom=False):
"""
Return a MavenPom object from the Maven POM file at location.
"""
pom = _get_mavenpom(location, check_is_pom)
if not pom:
return {}
return pom.to_dict()
def _get_mavenpom(location, check_is_pom=False):
if check_is_pom and not is_pom(location):
return
pom = MavenPom(location)
pom.resolve()
if check_is_pom and not (pom.model_version and pom.group_id and pom.artifact_id):
return
return pom
def parse(location, check_is_pom=True):
"""
Parse a pom file at location and return a Package or None.
"""
mavenpom = _get_mavenpom(location, check_is_pom=check_is_pom)
if not mavenpom:
return
pom = mavenpom.to_dict()
licenses = []
for lic in pom['licenses']:
licenses.append(models.AssertedLicense(
license=lic['name'],
url=lic['url'],
notice=lic['comments']
))
# FIXME: we are skipping all the organization related fields, roles and the id
authors = []
for dev in pom['developers']:
authors.append(models.Party(
type=models.party_person,
name=dev['name'],
email=dev['email'],
url=dev['url'],
))
# FIXME: we are skipping all the organization related fields and roles
contributors = []
for cont in pom['contributors']:
contributors.append(models.Party(
type=models.party_person,
name=cont['name'],
email=cont['email'],
url=cont['url'],
))
name = pom['organization_name']
url = pom['organization_url']
if name or url:
owners = [models.Party(type=models.party_org, name=name, url=url)]
else:
owners = []
dependencies = OrderedDict()
for scope, deps in pom['dependencies'].items():
scoped_deps = dependencies[scope] = []
for dep in deps:
scoped_deps.append(models.Dependency(
name='{group_id}:{artifact_id}'.format(**dep),
version_constraint=dep['version'],
))
# FIXME: there are still a lot of other data to map in a Package
package = MavenPomPackage(
# FIXME: what is this location about?
location=location,
name='{group_id}:{artifact_id}'.format(**pom),
version=pom['version'],
summary=pom['name'],
description=pom['description'],
homepage_url=pom['url'],
asserted_licenses=licenses,
authors=authors,
owners=owners,
contributors=contributors,
dependencies=dependencies,
)
return package
class MavenRecognizer(object):
"""
A package recognizer for Maven-based packages.
"""
def __init__(self):
return NotImplementedError()
def recon(self, location):
for f in os.listdir(location):
loc = join(location, f)
if not filetype.is_file(loc):
continue
# a pom is an xml doc
if not is_pom(location):
continue
if f == 'pom.xml':
# first case: a maven pom.xml inside a META-INF directory
# such as in META-INF/maven/log4j/log4j/pom.xml
# the directory tree has a fixed depth
# as is: META-INF/maven/groupid/artifactid/pom.xml
# this will typically be inside a binary jar, so we should find
# a typical structure above
try:
gggp = dirname(dirname(dirname(dirname(loc))))
if fileutils.file_name(gggp) == 'META-INF':
# recon here: the root of the component is the parent of
# META-INF, return that, with a type and the POM
# metafile to parse.
pass
except:
pass
# second case: a maven pom.xml at the root of component
# development tree we should find a few extra clues in the
# conventional directory structure below for now we take this as
# being the component root. return that, with a type and the POM
# metafile to parse.
pass
elif f.endswith('.pom'):
# first case: a maven repo layout
# the jars are side-by-side with the pom
# check if there are side-by-side artifacts
jar = loc.replace('.pom', '.jar')
if os.path.exists(jar):
# return that, with a type and the POM metafile to parse.
pass
# second case: a maven .pom nested in META-INF
|
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
from .base import BaseAction, plan, build_walker
from .base import STACK_POLL_TIME
from ..providers.base import Template
from stacker.hooks import utils
from ..exceptions import (
MissingParameterException,
StackDidNotChange,
StackDoesNotExist,
CancelExecution,
)
from ..status import (
NotSubmittedStatus,
NotUpdatedStatus,
DidNotChangeStatus,
SubmittedStatus,
CompleteStatus,
FailedStatus,
SkippedStatus,
PENDING,
WAITING,
SUBMITTED,
INTERRUPTED
)
logger = logging.getLogger(__name__)
def build_stack_tags(stack):
"""Builds a common set of tags to attach to a stack"""
return [{'Key': t[0], 'Value': t[1]} for t in stack.tags.items()]
def should_update(stack):
"""Tests whether a stack should be submitted for updates to CF.
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be updated, return True.
"""
if stack.locked:
if not stack.force:
logger.debug("Stack %s locked and not in --force list. "
"Refusing to update.", stack.name)
return False
else:
logger.debug("Stack %s locked, but is in --force "
"list.", stack.name)
return True
def should_submit(stack):
"""Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True.
"""
if stack.enabled:
return True
logger.debug("Stack %s is not enabled. Skipping.", stack.name)
return False
def should_ensure_cfn_bucket(outline, dump):
"""Test whether access to the cloudformation template bucket is required
Args:
outline (bool): The outline action.
dump (bool): The dump action.
Returns:
bool: If access to CF bucket is needed, return True.
"""
return not outline and not dump
def _resolve_parameters(parameters, blueprint):
"""Resolves CloudFormation Parameters for a given blueprint.
Given a list of parameters, handles:
- discard any parameters that the blueprint does not use
- discard any empty values
- convert booleans to strings suitable for CloudFormation
Args:
parameters (dict): A dictionary of parameters provided by the
stack definition
blueprint (:class:`stacker.blueprint.base.Blueprint`): A Blueprint
object that is having the parameters applied to it.
Returns:
dict: The resolved parameters.
"""
params = {}
param_defs = blueprint.get_parameter_definitions()
for key, value in parameters.items():
if key not in param_defs:
logger.debug("Blueprint %s does not use parameter %s.",
blueprint.name, key)
continue
if value is None:
logger.debug("Got None value for parameter %s, not submitting it "
"to cloudformation, default value should be used.",
key)
continue
if isinstance(value, bool):
logger.debug("Converting parameter %s boolean \"%s\" to string.",
key, value)
value = str(value).lower()
params[key] = value
return params
class UsePreviousParameterValue(object):
""" A simple class used to indicate a Parameter should use it's existng
value.
"""
pass
def _handle_missing_parameters(parameter_values, all_params, required_params,
existing_stack=None):
"""Handles any missing parameters.
If an existing_stack is provided, look up missing parameters there.
Args:
parameter_values (dict): key/value dictionary of stack definition
parameters
all_params (list): A list of all the parameters used by the
template/blueprint.
required_params (list): A list of all the parameters required by the
template/blueprint.
existing_stack (dict): A dict representation of the stack. If
provided, will be searched for any missing parameters.
Returns:
list of tuples: The final list of key/value pairs returned as a
list of tuples.
Raises:
MissingParameterException: Raised if a required parameter is
still missing.
"""
missing_params = list(set(all_params) - set(parameter_values.keys()))
if existing_stack and 'Parameters' in existing_stack:
stack_parameters = [
p["ParameterKey"] for p in existing_stack["Parameters"]
]
for p in missing_params:
if p in stack_parameters:
logger.debug(
"Using previous value for parameter %s from existing "
"stack",
p
)
parameter_values[p] = UsePreviousParameterValue
final_missing = list(set(required_params) - set(parameter_values.keys()))
if final_missing:
raise MissingParameterException(final_missing)
return list(parameter_values.items())
def handle_hooks(stage, hooks, provider, context, dump, outline):
"""Handle pre/post hooks.
Args:
stage (str): The name of the hook stage - pre_build/post_build.
hooks (list): A list of dictionaries containing the hooks to execute.
provider (:class:`stacker.provider.base.BaseProvider`): The provider
the current stack is using.
context (:class:`stacker.context.Context`): The current stacker
context.
dump (bool): Whether running with dump set or not.
outline (bool): Whether running with outline set or not.
"""
if not outline and not dump and hooks:
utils.handle_hooks(
stage=stage,
hooks=hooks,
provider=provider,
context=context
)
class Action(BaseAction):
"""Responsible for building & coordinating CloudFormation stacks.
Generates the build plan based on stack dependencies (these dependencies
are determined automatically based on output lookups from other stacks).
The plan can then either be printed out as an outline or executed. If
executed, each stack will get launched in order which entails:
- Pushing the generated CloudFormation template to S3 if it has changed
- Submitting either a build or update of the given stack to the
:class:`stacker.provider.base.Provider`.
"""
def build_parameters(self, stack, provider_stack=None):
"""Builds the CloudFormation Parameters for our stack.
Args:
stack (:class:`stacker.stack.Stack`): A stacker stack
provider_stack (dict): An optional Stacker provider object
Returns:
dict: The parameters for the given stack
"""
resolved = _resolve_parameters(stack.parameter_values, stack.blueprint)
required_parameters = list(stack.required_parameter_definitions)
all_parameters = list(stack.all_parameter_definitions)
parameters = _handle_missing_parameters(resolved, all_parameters,
required_parameters,
provider_stack)
param_list = []
for key, value in parameters:
param_dict = {"ParameterKey": key}
if value is UsePreviousParameterValue:
param_dict["UsePreviousValue"] = True
else:
param_dict["ParameterValue"] = str(value)
param_list.append(param_dict)
return param_list
def _launch_stack(self, stack, **kwargs):
"""Handles the creating or updating of a stack in CloudFormation.
Also makes sure that we don't try to create or update a stack while
it is already updating or creating.
"""
old_status = kwargs.get("status")
wait_time = 0 if old_status is PENDING else STACK_POLL_TIME
if self.cancel.wait(wait_time):
return INTERRUPTED
if not should_submit(stack):
return NotSubmittedStatus()
provider = self.build_provider(stack)
try:
provider_stack = provider.get_stack(stack.fqn)
except StackDoesNotExist:
provider_stack = None
if provider_stack and not should_update(stack):
stack.set_outputs(
self.provider.get_output_dict(provider_stack))
return NotUpdatedStatus()
recreate = False
if provider_stack and old_status == SUBMITTED:
logger.debug(
"Stack %s provider status: %s",
stack.fqn,
provider.get_stack_status(provider_stack),
)
if provider.is_stack_rolling_back(provider_stack):
if 'rolling back' in old_status.reason:
return old_status
logger.debug("Stack %s entered a roll back", stack.fqn)
if 'updating' in old_status.reason:
reason = 'rolling back update'
else:
reason = 'rolling back new stack'
return SubmittedStatus(reason)
elif provider.is_stack_in_progress(provider_stack):
logger.debug("Stack %s in progress.", stack.fqn)
return old_status
elif provider.is_stack_destroyed(provider_stack):
logger.debug("Stack %s finished deleting", stack.fqn)
recreate = True
# Continue with creation afterwards
# Failure must be checked *before* completion, as both will be true
# when completing a rollback, and we don't want to consider it as
# a successful update.
elif provider.is_stack_failed(provider_stack):
reason = old_status.reason
if 'rolling' in reason:
reason = reason.replace('rolling', 'rolled')
status_reason = provider.get_rollback_status_reason(stack.fqn)
logger.info(
"%s Stack Roll Back Reason: " + status_reason, stack.fqn)
return FailedStatus(reason)
elif provider.is_stack_completed(provider_stack):
stack.set_outputs(
provider.get_output_dict(provider_stack))
return CompleteStatus(old_status.reason)
else:
return old_status
logger.debug("Resolving stack %s", stack.fqn)
stack.resolve(self.context, self.provider)
logger.debug("Launching stack %s now.", stack.fqn)
template = self._template(stack.blueprint)
stack_policy = self._stack_policy(stack)
tags = build_stack_tags(stack)
parameters = self.build_parameters(stack, provider_stack)
force_change_set = stack.blueprint.requires_change_set
if recreate:
logger.debug("Re-creating stack: %s", stack.fqn)
provider.create_stack(stack.fqn, template, parameters,
tags, stack_policy=stack_policy)
return SubmittedStatus("re-creating stack")
elif not provider_stack:
logger.debug("Creating new stack: %s", stack.fqn)
provider.create_stack(stack.fqn, template, parameters, tags,
force_change_set,
stack_policy=stack_policy,
notification_arns=stack.notification_arns)
return SubmittedStatus("creating new stack")
try:
wait = stack.in_progress_behavior == "wait"
if wait and provider.is_stack_in_progress(provider_stack):
return WAITING
if provider.prepare_stack_for_update(provider_stack, tags):
existing_params = provider_stack.get('Parameters', [])
provider.update_stack(
stack.fqn,
template,
existing_params,
parameters,
tags,
force_interactive=stack.protected,
force_change_set=force_change_set,
stack_policy=stack_policy,
notification_arns=stack.notification_arns
)
logger.debug("Updating existing stack: %s", stack.fqn)
return SubmittedStatus("updating existing stack")
else:
return SubmittedStatus("destroying stack for re-creation")
except CancelExecution:
stack.set_outputs(provider.get_output_dict(provider_stack))
return SkippedStatus(reason="canceled execution")
except StackDidNotChange:
stack.set_outputs(provider.get_output_dict(provider_stack))
return DidNotChangeStatus()
def _template(self, blueprint):
"""Generates a suitable template based on whether or not an S3 bucket
is set.
If an S3 bucket is set, then the template will be uploaded to S3 first,
and CreateStack/UpdateStack operations will use the uploaded template.
If not bucket is set, then the template will be inlined.
"""
if self.bucket_name:
return Template(url=self.s3_stack_push(blueprint))
else:
return Template(body=blueprint.rendered)
def _stack_policy(self, stack):
"""Returns a Template object for the stacks stack policy, or None if
the stack doesn't have a stack policy."""
if stack.stack_policy:
return Template(body=stack.stack_policy)
def _generate_plan(self, tail=False):
return plan(
description="Create/Update stacks",
stack_action=self._launch_stack,
tail=self._tail_stack if tail else None,
context=self.context)
def pre_run(self, outline=False, dump=False, *args, **kwargs):
"""Any steps that need to be taken prior to running the action."""
if should_ensure_cfn_bucket(outline, dump):
self.ensure_cfn_bucket()
hooks = self.context.config.pre_build
handle_hooks(
"pre_build",
hooks,
self.provider,
self.context,
dump,
outline
)
def run(self, concurrency=0, outline=False,
tail=False, dump=False, *args, **kwargs):
"""Kicks off the build/update of the stacks in the stack_definitions.
This is the main entry point for the Builder.
"""
plan = self._generate_plan(tail=tail)
if not plan.keys():
logger.warn('WARNING: No stacks detected (error in config?)')
if not outline and not dump:
plan.outline(logging.DEBUG)
logger.debug("Launching stacks: %s", ", ".join(plan.keys()))
walker = build_walker(concurrency)
plan.execute(walker)
else:
if outline:
plan.outline()
if dump:
plan.dump(directory=dump, context=self.context,
provider=self.provider)
def post_run(self, outline=False, dump=False, *args, **kwargs):
"""Any steps that need to be taken after running the action."""
hooks = self.context.config.post_build
handle_hooks(
"post_build",
hooks,
self.provider,
self.context,
dump,
outline
)
|
|
#!/usr/bin/env python
import argparse
import os.path
import re
import subprocess
import sys
import threading
import traceback
import tkinter as tk
import tkinter.filedialog
import tkinter.messagebox
import tkinter.scrolledtext
import tkinter.simpledialog
from truce.catcher import Catcher
VERSION = [0, 2, 1]
ABANDON_MSG = 'Abandon unsaved changes?'
def signature():
return '{} {}.{}.{}'.format(os.path.basename(sys.argv[0]), *VERSION)
ABOUT = """{}
http://github.com/jangler/truce-py""".format(signature())
class App(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.pack(expand=1, fill='both')
self.createWidgets()
self.filename = None
self.regexp = None
self.settitle()
master.protocol("WM_DELETE_WINDOW", self.quit)
def createWidgets(self):
self.menu = tk.Menu(self)
filemenu = tk.Menu(self.menu, tearoff=0)
filemenu.add_command(label='New', underline=0, command=self.newfile,
accelerator='Ctrl+N')
self.bind_all('<Control-n>', self.newfile)
filemenu.add_command(label='Open...', underline=0, command=self.open,
accelerator='Ctrl+O')
self.bind_all('<Control-o>', self.open)
filemenu.add_separator()
filemenu.add_command(label='Save', underline=0, command=self.save,
accelerator='Ctrl+S')
self.bind_all('<Control-s>', self.save)
filemenu.add_command(label='Save As...', underline=5,
command=self.saveas, accelerator='Ctrl+Shift+S')
self.bind_all('<Control-S>', self.saveas)
filemenu.add_separator()
filemenu.add_command(label='Quit', underline=0, accelerator='Ctrl+Q',
command=self.quit)
self.bind_all('<Control-q>', self.quit)
filemenu.add_command(label='Force Quit', underline=0,
accelerator='Ctrl+Shift+Q',
command=self.powerquit)
self.bind_all('<Control-Q>', self.powerquit)
self.menu.add_cascade(label='File', underline=0, menu=filemenu)
editmenu = tk.Menu(self.menu, tearoff=0)
editmenu.add_command(label='Undo', underline=0, command=self.undo,
accelerator='Ctrl+Z')
editmenu.add_command(label='Redo', underline=0, command=self.redo,
accelerator='Ctrl+Y')
editmenu.add_separator()
editmenu.add_command(label='Pipe...', underline=0, command=self.pipe,
accelerator='Ctrl+P')
self.bind_all('<Control-p>', self.pipe)
self.menu.add_cascade(label='Edit', underline=0, menu=editmenu)
selectmenu = tk.Menu(self.menu, tearoff=0)
selectmenu.add_command(label='All', underline=0,
command=self.selectall, accelerator='Ctrl+A')
self.bind_all('<Control-a>', self.selectall)
selectmenu.add_separator()
selectmenu.add_command(label='Find...', underline=0, command=self.find,
accelerator='Ctrl+F')
self.bind_all('<Control-f>', self.find)
self.bind_all('<Control-slash>', self.find)
selectmenu.add_command(label='Next Match', underline=0,
command=self.nextmatch, accelerator='Alt+N')
self.bind_all('<Alt-n>', self.nextmatch)
selectmenu.add_command(label='Previous Match', underline=0,
command=self.prevmatch,
accelerator='Alt+Shift+N')
self.bind_all('<Alt-N>', self.prevmatch)
selectmenu.add_separator()
selectmenu.add_command(label='Go to Line...', underline=0,
command=self.gotoline, accelerator='Ctrl+G')
self.bind_all('<Control-g>', self.gotoline)
self.menu.add_cascade(label='Select', underline=0, menu=selectmenu)
helpmenu = tk.Menu(self.menu, tearoff=0)
helpmenu.add_command(label='About...', underline=0, command=self.about)
self.menu.add_cascade(label='Help', underline=0, menu=helpmenu)
root.config(menu=self.menu)
barframe = tk.Frame(self)
barframe.pack(side='bottom', fill='x')
self.status = tkinter.Label(barframe, text='', relief='sunken',
anchor='w')
self.status.pack(side='left', fill='x', expand=1)
self.rowcol = tkinter.Label(barframe, text='', relief='sunken',
anchor='e')
self.rowcol.pack(side='right')
self.bind_all('<Key>', self.refresh)
self.bind_all('<Button-1>', self.refresh)
self.bind_all('<ButtonRelease-1>', self.refresh)
self.textin = tk.Text(self, height=0, undo=1)
self.textin.bind('<Return>', self.sendtext)
# self.textin.pack(side='bottom', fill='x')
self.textout = tkinter.scrolledtext.ScrolledText(self, undo=1)
self.textout.bind('<Return>', self.autoindent)
self.textout.pack(side='bottom', expand=1, fill='both')
for widget in (self.textin, self.textout):
widget.bind('<Control-z>', self.undo)
widget.bind('<Control-y>', self.redo)
widget.bind('<Control-Z>', self.redo)
widget.bind('<Control-o>', self.open)
widget.bind('<Control-v>', self.deletesel)
widget.bind('<Control-a>', self.selectall)
widget.bind('<Control-f>', self.find)
widget.bind('<Control-slash>', self.find)
widget.bind('<Alt-n>', self.nextmatch)
widget.bind('<Alt-N>', self.prevmatch)
widget.bind('<Control-n>', self.newfile)
widget.bind('<Control-p>', self.pipe)
widget.bind('<Control-w>', self.deleteword)
widget.bind('<Control-u>', self.deleteline)
def refresh(self, event=None):
self.rowcol['text'] = self.textout.index('insert').replace('.', ', ')
def about(self):
tkinter.messagebox.showinfo('About', ABOUT)
def state(self, text=''):
self.status['text'] = text
def settitle(self):
if self.filename:
self.master.title(os.path.basename(self.filename))
else:
self.master.title(signature())
def abandon(self):
if not (self.filename and self.textout.edit_modified()):
return True
elif tkinter.messagebox.askokcancel(ABANDON_MSG, ABANDON_MSG):
return True
return False
def error(self, e):
self.state()
traceback.print_exc()
tkinter.messagebox.showerror(type(e).__name__, str(e))
def newfile(self, event=None):
if not self.abandon():
return 'break'
self.state()
self.textout.delete('1.0', 'end')
self.textout.edit_modified(0)
self.textout.edit_reset()
self.filename = None
self.settitle()
def readin(self, filename, quiet=False):
self.state('Opening...')
try:
with open(filename) as f:
text = f.read()
except Exception as e:
if quiet:
self.state('New file "{}".'.format(os.path.basename(filename)))
else:
self.error(e)
return
self.textout.replace('1.0', 'end', text)
self.textout.delete('end - 1 char', 'end') # delete extra newline
self.state('Opened "{}".'.format(os.path.basename(filename)))
self.textout.edit_modified(0)
self.textout.edit_reset()
self.filename = filename
self.settitle()
self.textout.mark_set('insert', '1.0')
self.textout.see('insert')
def open(self, event=None):
if self.abandon():
filename = tkinter.filedialog.askopenfilename()
if filename:
self.readin(filename)
self.refresh()
return 'break'
def save(self, event=None):
if self.filename:
self.writeout(self.filename)
else:
self.saveas(event)
def saveas(self, event=None):
filename = tkinter.filedialog.asksaveasfilename()
if filename:
self.writeout(filename)
def writeout(self, filename):
self.state('Saving...')
try:
with open(filename, 'w') as f:
f.write(self.textout.get('1.0', 'end'))
self.state('Saved "{}".'.format(os.path.basename(filename)))
self.textout.edit_modified(0)
self.filename = filename
self.settitle()
except Exception as e:
self.error(e)
def search(self, regexp, backwards=False):
widget = self.geteditfocus()
offset = '-1c' if backwards else '+1c'
index = widget.search(regexp, 'insert{}'.format(offset),
backwards=backwards, regexp=True)
if index:
widget.mark_set('insert', index)
widget.see(index)
widget.tag_remove('sel', '1.0', 'end')
text = widget.get('insert', 'end')
match = re.match(regexp, text, flags=re.MULTILINE)
if match:
length = len(match.group(0))
widget.tag_add('sel', 'insert',
'insert+{}c'.format(length))
self.state()
else:
self.state('No matches for "{}".'.format(regexp))
self.regexp = regexp
def find(self, event=None):
widget = self.geteditfocus()
try:
regexp = tkinter.simpledialog.askstring(
'Find', 'Search for regexp:')
if regexp:
self.search(regexp)
except tkinter.TclError:
pass
widget.focus()
self.refresh()
return 'break'
def refind(self, backwards=False):
if self.regexp:
self.search(self.regexp, backwards=backwards)
else:
self.state('Nothing to find.')
def nextmatch(self, event=None):
self.refind()
self.refresh()
return 'break'
def prevmatch(self, event=None):
self.refind(backwards=True)
self.refresh()
return 'break'
def gotoline(self, event=None):
try:
line = tkinter.simpledialog.askinteger(
'Go to Line', 'Go to line number:')
if line or line == 0:
index = '{}.end'.format(line)
self.textout.mark_set('insert', index)
self.textout.see(index)
self.textout.tag_remove('sel', '1.0', 'end')
except tkinter.TclError:
pass
self.textout.focus()
def sendtext(self, event):
self.textout.insert('end', self.textin.get('1.0', 'end'))
self.textin.delete('1.0', 'end')
self.refresh()
return 'break'
def autoindent(self, event):
line = self.textout.get('insert linestart', 'insert lineend')
indent = re.match('^[\t ]*', line).group(0)
if re.match('^( |\t)+$', line):
self.textout.replace('insert linestart', 'insert lineend',
'\n' + indent)
else:
self.textout.insert('insert', '\n' + indent)
self.textout.see('insert')
self.refresh()
return 'break'
def geteditfocus(self):
widget = self.focus_get()
if widget not in (self.textin, self.textout):
widget = self.textout
return widget
def deletesel(self, event):
try:
event.widget.delete('sel.first', 'sel.last')
except tkinter.TclError:
pass
def backup(self, widget, dist, rule):
while True:
c = widget.get('insert-{}c'.format(dist + 1),
'insert-{}c'.format(dist))
if not c or not rule(c):
break
dist += 1
return dist, c
def deleteword(self, event):
dist, char = self.backup(event.widget, 0, lambda c: c.isspace())
wordrule = lambda c: c.isalnum() or c == '_'
nonwordrule = lambda c: not (c.isalnum() or c == '_' or c.isspace())
if char.isalnum() or char == '_':
dist, _ = self.backup(event.widget, dist, wordrule)
dist, _ = self.backup(event.widget, dist, nonwordrule)
else:
dist, _ = self.backup(event.widget, dist, nonwordrule)
dist, _ = self.backup(event.widget, dist, wordrule)
event.widget.delete('insert-{}c'.format(dist), 'insert')
def deleteline(self, event):
event.widget.delete('insert linestart', 'insert')
def selectall(self, event=None):
widget = self.geteditfocus()
widget.tag_add('sel', '1.0', 'end')
self.refresh()
return 'break'
def pipecmd(self, widget, cmd, intext):
self.state('Running `{}`...'.format(cmd))
try:
text = subprocess.check_output(cmd, input=intext, shell=True,
universal_newlines=True, timeout=5)
except subprocess.SubprocessError as e:
self.error(e)
return
if text.endswith('\n'):
text = text[:len(text)-1]
try:
widget.mark_set('insert', 'sel.first')
widget.replace('sel.first', 'sel.last', text)
widget.tag_add('sel', 'insert-{}c'.format(len(text)), 'insert')
except tkinter.TclError:
widget.insert('insert', text)
self.state()
def pipe(self, event=None):
widget = self.geteditfocus()
try:
cmd = tkinter.simpledialog.askstring(
'Pipe', 'Pipe selection through command:')
if cmd:
intext = ''
try:
intext = widget.get('sel.first', 'sel.last')
except tkinter.TclError:
pass
threading.Thread(target=self.pipecmd,
args=[widget, cmd, intext]).start()
except tkinter.TclError:
pass
widget.focus()
self.refresh()
return 'break'
def undo(self, event=None):
widget = self.geteditfocus()
try:
widget.edit_undo()
self.state()
except tkinter.TclError as e:
self.state('{}.'.format(str(e).capitalize()))
self.refresh()
return 'break'
def redo(self, event=None):
widget = self.geteditfocus()
try:
widget.edit_redo()
self.state()
except tkinter.TclError as e:
self.state('{}.'.format(str(e).capitalize()))
self.refresh()
return 'break'
def quit(self, event=None):
if self.abandon():
super().quit()
def powerquit(self, event=None):
super().quit()
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=signature())
parser.add_argument('file', type=str, nargs='?', help='file to edit')
return parser.parse_args()
def main():
global root
args = parseargs()
root = tk.Tk()
tk.CallWrapper = Catcher
app = App(master=root)
def excepthook(exctype, value, traceback):
app.error(value)
sys.excepthook = excepthook
if args.file:
app.readin(args.file, quiet=True)
app.filename = args.file
app.settitle()
try:
app.mainloop()
except KeyboardInterrupt:
super(tk.Frame, app).quit()
print()
|
|
# -*- coding: utf-8 -*-
'''
IO data from/to multiple sensors
Written by Laurent Fournier, October 2016
'''
from copy import deepcopy
from multiprocessing import Process
from Queue import Queue
from threading import Timer, Thread
import os, sys
import argparse
import datetime
import json
import subprocess
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
# Kivy libraries
from kivy.app import App
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.properties import (ListProperty, NumericProperty, ObjectProperty, OptionProperty, StringProperty)
from kivy.storage.jsonstore import JsonStore
from kivy.config import Config
Config.set('graphics', 'width', '800')
Config.set('graphics', 'height', '416')
Config.set('graphics', 'multisamples', 0)
Config.set('graphics', 'fullscreen', 'auto')
from kivy.uix.accordion import Accordion, AccordionItem
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.listview import ListItemButton
from kivy.uix.modalview import ModalView
from kivy.uix.popup import Popup
from kivy.uix.settings import Settings, SettingsWithSidebar
from kivy.uix.switch import Switch
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.widget import Widget
# External libraries
import log_manager as lm
#-------------------------------------------------------------
#--------------------- Configurations ------------------------
#-------------------------------------------------------------
############
# Terminal #
############
parser = argparse.ArgumentParser(description = '')
parser.add_argument('-d', '--debug', type=bool, help='Stderr outputs', default=False, choices=[True])
args = parser.parse_args()
############
# Settings #
############
DEBUG = args.debug
CONFIG = False
BAUD = 9600
TIMEOUT = 5.0
PORT = [ '/dev/ttyUSB0', '/dev/ttyUSB1', '/dev/ttyUSB2', '/dev/ttyUSB3',
'0x64', '0x65' ]
DEVICE = [ '820', '840', '6262', '7000',
'i2c1', 'i2c2' ]
args_device = { 'port':PORT[0], 'baud':BAUD, 'timeout':TIMEOUT,
'config':CONFIG, 'debug':DEBUG, 'device':DEVICE[0],
'state':False}
q_data = Queue()
q_header = Queue()
devices = []
exitFlag = False
li8xFlag = False
li6xFlag = False
i2cFlag = False
probeCnt = -1
#-------------------------------------------------------------
#-------------------- Debugging program ----------------------
#-------------------------------------------------------------
def DebugApp():
global exitFlag, li8xFlag, li6xFlag, i2cFlag, probeCnt
global DEBUG, CONFIG, BAUD, TIMEOUT, PORT, DEVICE
global args_device, q_data, q_header, devices
os.system('clear')
while not exitFlag:
if (li8xFlag is True): print ("Li820: Active")
else: print ("Li820: Inactive")
if (li6xFlag is True): print ("Li6262: Active")
else: print ("Li6262: Inactive")
if (i2cFlag is True): print ("I2C: Active")
else: print ("I2C: Inactive")
print ("____________________________________________________________\n")
user_input = raw_input("\t|-----------------|\n"
"\t| 0. Execute |\n"
"\t| --------------- |\n"
"\t| 1. Licor 820 |\n"
"\t| 2. Licor 6262 |\n"
"\t| 3. I2C |\n"
"\t| --------------- |\n"
"\t| Q. Exit Program |\n"
"\t|-----------------|\n")
os.system('clear')
if user_input is '0':
logger = lm.logManager((q_data, q_header), devices, DEBUG)
logger.start()
t_logg = Process(target=logger.read)
t_logg.start()
elif user_input is '1':
probeCnt += 1
args_device['id'] = probeCnt
args_device['name'] = 'Licor820'
args_device['port'] = PORT[0]
args_device['device'] = DEVICE[0]
devices.append(deepcopy(args_device))
if not li8xFlag: li8xFlag = True
else: li8xFlag = False
elif user_input is '2':
probeCnt += 1
args_device['id'] = probeCnt
args_device['name'] = 'Licor6262'
args_device['port'] = PORT[1]
args_device['device'] = DEVICE[2]
devices.append(deepcopy(args_device))
if not li6xFlag: li6xFlag = True
else: li6xFlag = False
elif user_input is '3':
probeCnt += 1
args_device['id'] = probeCnt
args_device['name'] = 'I2C'
args_device['port'] = PORT[4]
args_device['device'] = DEVICE[4]
devices.append(deepcopy(args_device))
if not i2cFlag: i2cFlag = True
else: i2cFlag = False
elif user_input is 'q' or 'Q':
t_logg.terminate()
logger.stop()
exitFlag = True
else: pass
#-------------------------------------------------------------
#----------------------- Main program ------------------------
#-------------------------------------------------------------
class ThreadData(BoxLayout):
abs
class ThreadPlot(BoxLayout):
abs
class AgusRoot(TabbedPanel):
label_wid1 = ObjectProperty();
switch_wid1 = ObjectProperty();
spin_wid1 = ObjectProperty()
label_wid2 = ObjectProperty();
switch_wid2 = ObjectProperty();
spin_wid2 = ObjectProperty()
carousel = ObjectProperty()
info1 = StringProperty()
info2 = StringProperty()
no_Device = False
def get_probes(self):
self.set_device(devices[0], 'state', self.switch_wid1.active)
self.set_device(devices[0], 'port', self.spin_wid1.text)
self.label_wid1.text = 'Licor {} - {} - {}'.format(self.get_device(devices[0], 'device'), self.switch_wid1.active, self.get_device(devices[0], 'port'))
self.info1 = str(self.get_device(devices[0], 'state'))
self.set_device(devices[1], 'state', self.switch_wid2.active)
self.set_device(devices[1], 'port', self.spin_wid2.text)
self.label_wid2.text = 'Licor {} - {} - {}'.format(self.get_device(devices[1], 'device'), self.switch_wid2.active, self.get_device(devices[1], 'port'))
self.info2 = str(self.get_device(devices[1], 'state'))
self.set_probes()
def set_probes(self):
try:
self.logger = lm.logManager((q_data, q_header), devices, DEBUG)
self.logger.start()
self.t_logg = Process(target=logger.read)
self.t_logg.start()
except:
no_Device = True
finally:
pass
def exit_app(self):
self.t_logg.terminate()
self.logger.stop()
sys.exit()
def get_ports(self):
# get active ports as text label:
result1_1 = self.get_device(devices[0], 'port')
result1_2 = self.get_device(devices[1], 'port')
self.spin_wid1.text = str(result1_1)
self.spin_wid2.text = str(result1_2)
# get all other ports as list of values:
result2 = PORT
self.spin_wid1.values = map(str, result2)
self.spin_wid2.values = map(str, result2)
def set_ports(self):
todo
def get_data(self):
todo
def set_data(self):
todo
def get_device(self, device, tag):
return device[tag]
def set_device(self, device, tag, content):
device[tag] = content
def open_fexpl(self):
subprocess.Popen(['xdg-open', "logs"])
class AgusApp(App):
def build_config(self, config):
config.setdefaults('General', {'gps' : 'Enabled', 'fullscreen' : 'True'})
def build_settings(self, settings):
settings.add_json_panel("a.gus", self.config, data = """
[
{"type": "options", "title": "GPS", "section": "General", "key": "gps", "options": ["Enabled", "Disabled"]},
{"type": "options", "title": "Fullscreen", "section": "General", "key": "fullscreen", "options": ["True", "False"]}
]""")
def on_config_change(self, config, section, key, value):
if config is self.config:
if (key == "gps"):
try:
'''self.root.current_weather.update_weather()
self.root.forecast.update_weather()'''
except AttributeError:
pass
if __name__ == '__main__':
if DEBUG:
DebugApp()
else:
args_device['name'] = 'Licor820'
args_device['port'] = PORT[0]
args_device['device'] = DEVICE[0]
devices.append(deepcopy(args_device))
args_device['name'] = 'Licor6262'
args_device['port'] = PORT[1]
args_device['device'] = DEVICE[2]
devices.append(deepcopy(args_device))
AgusApp().run()
|
|
# Copyright 2015-2018 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from fake_switches.juniper.juniper_netconf_datastore import resolve_new_value, NS_JUNOS, resolve_operation, parse_range, \
val, _restore_protocols_specific_data
from fake_switches.juniper_qfx_copper.juniper_qfx_copper_netconf_datastore import JuniperQfxCopperNetconfDatastore
from fake_switches.netconf import NetconfError, XML_ATTRIBUTES, first
from fake_switches.switch_configuration import AggregatedPort, VlanPort
from netaddr import IPNetwork
class JuniperMxNetconfDatastore(JuniperQfxCopperNetconfDatastore):
VLANS_COLLECTION = "bridge-domains"
VLANS_COLLECTION_OBJ = "domain"
ETHERNET_SWITCHING_TAG = "bridge"
MAX_AGGREGATED_ETHERNET_INTERFACES = 4091
ETHER_OPTIONS_TAG = "gigether-options"
MAX_PHYSICAL_PORT_NUMBER = 63
MAX_MTU = 16360
def parse_vlan_members(self, port, port_attributes):
vlan_node = first(port_attributes.xpath("vlan-id"))
if vlan_node is not None:
if resolve_operation(vlan_node) == "delete":
port.access_vlan = None
else:
port.access_vlan = vlan_node.text
for member in port_attributes.xpath("vlan-id-list"):
if resolve_operation(member) == "delete":
if member.text:
port.trunk_vlans.remove(int(member.text))
if len(port.trunk_vlans) == 0:
port.trunk_vlans = None
else:
port.trunk_vlans = None
else:
if port.trunk_vlans is None:
port.trunk_vlans = []
port.trunk_vlans += parse_range(member.text)
def ethernet_switching_to_etree(self, port, interface_data):
ethernet_switching = []
if port.mode is not None:
ethernet_switching.append({self.PORT_MODE_TAG: port.mode})
if port.access_vlan:
ethernet_switching.append({"vlan-id": str(port.access_vlan)})
if port.trunk_vlans:
ethernet_switching += [{"vlan-id-list": str(v)} for v in port.trunk_vlans]
if ethernet_switching:
interface_data.append({"unit": {
"name": "0",
"family": {
self.ETHERNET_SWITCHING_TAG: ethernet_switching
}
}})
def member_list_trunk_vlan_error(self, port):
return FailingCommitResults([TrunkShouldHaveVlanMembers(interface=port.name),
ConfigurationCheckOutFailed()])
def validate_vlan_config(self, port, vlan_list):
pass
def parse_interface(self, conf, interface_node):
port_name = val(interface_node, "name")
if port_name == "irb":
operation = resolve_operation(interface_node)
if operation == "delete":
for port in conf.get_vlan_ports():
conf.remove_port(port)
else:
self.parse_vlan_interfaces(conf, interface_node)
else:
super(JuniperMxNetconfDatastore, self).parse_interface(conf, interface_node)
def parse_vlan_interfaces(self, conf, interface_node):
for unit_node in interface_node.xpath("unit/name/.."):
unit_id = val(unit_node, "name")
port_name = "irb.{}".format(unit_id)
port = conf.get_port(port_name)
if port is None:
linked_vlan = find_vlan_with_routing_interface(conf, port_name)
port = self.original_configuration.new("VlanPort",
vlan_id=linked_vlan.number if linked_vlan else None,
name=port_name)
port.vendor_specific["irb-unit"] = unit_id
conf.add_port(port)
inet = first(unit_node.xpath("family/inet".format(self.ETHERNET_SWITCHING_TAG)))
if inet is not None:
if first(inet.xpath("no-redirects")) is not None:
if resolve_operation(first(inet.xpath("no-redirects"))) == "delete":
port.ip_redirect = True
else:
port.ip_redirect = False
for address in inet.xpath("address/name/.."):
ip = IPNetwork(val(address, "name"))
if resolve_operation(address) == "delete":
port.remove_ip(ip)
else:
port.add_ip(ip)
for vrrp_node in address.xpath("vrrp-group/name/.."):
group_id = val(vrrp_node, "name")
vrrp_group = port.get_vrrp_group(group_id)
if vrrp_group is not None and resolve_operation(vrrp_node) == "delete":
port.vrrps.remove(vrrp_group)
else:
if vrrp_group is None:
vrrp_group = self.original_configuration.new("VRRP", group_id=group_id)
port.vrrps.append(vrrp_group)
vrrp_group.related_ip_network = ip
vrrp_group.ip_addresses = [vip.text for vip in vrrp_node.xpath("virtual-address")
if resolve_operation(vip) != "delete"]
vrrp_group.priority = resolve_new_value(vrrp_node, "priority", vrrp_group.priority)
vrrp_group.preempt_delay_minimum = resolve_new_value(vrrp_node, "preempt/hold-time", vrrp_group.preempt_delay_minimum)
if resolve_operation(first(vrrp_node.xpath("preempt"))) == "delete":
vrrp_group.preempt_delay_minimum = None
if first(vrrp_node.xpath("accept-data")) is not None:
if resolve_operation(first(vrrp_node.xpath("accept-data"))) == "delete":
vrrp_group.vendor_specific.pop("accept-data")
else:
vrrp_group.vendor_specific["accept-data"] = True
elif "accept-data" in vrrp_group.vendor_specific:
vrrp_group.vendor_specific.pop("accept-data")
vrrp_group.vendor_specific["authentication-type"] = resolve_new_value(vrrp_node, "authentication-type", vrrp_group.vendor_specific.get("authentication-type"))
if vrrp_group.vendor_specific["authentication-type"] is None:
vrrp_group.vendor_specific.pop("authentication-type")
vrrp_group.authentication = resolve_new_value(vrrp_node, "authentication-key", vrrp_group.authentication)
track = first(vrrp_node.xpath("track"))
if track is not None:
if resolve_operation(track) == "delete":
vrrp_group.track = {}
else:
vrrp_group.track = {val(track, "route/route_address"): val(track, "route/priority-cost")}
def _validate(self, conf):
ips = []
for p in conf.ports:
if hasattr(p, "ips"):
ips = ips + p.ips
if not len(ips) == len(set(ips)):
raise IpAlreadyInUse("Overlapping subnet is configured")
return super(JuniperMxNetconfDatastore, self)._validate(conf)
def handle_interface_operation(self, conf, operation, port):
if operation == 'delete' and isinstance(port, AggregatedPort):
conf.remove_port(port)
elif operation in ("delete", "replace"):
backup = deepcopy(vars(port))
port.reset()
_restore_protocols_specific_data(backup, port)
def parse_vlan_attributes(self, conf, vlan, vlan_node):
vlan.number = resolve_new_value(vlan_node, "vlan-id", vlan.number, transformer=int)
vlan.description = resolve_new_value(vlan_node, "description", vlan.description)
vlan.vendor_specific["linked-port-vlan"] = resolve_new_value(vlan_node, "routing-interface",
vlan.vendor_specific.get("linked-port-vlan"))
if vlan.vendor_specific["linked-port-vlan"]:
for port in conf.get_vlan_ports():
if port.name == vlan.vendor_specific["linked-port-vlan"]:
port.vlan_id = vlan.number
def _extract_interfaces(self, source):
interfaces = []
vlan_ports = []
for port in source.ports:
if isinstance(port, VlanPort):
vlan_ports.append(port)
else:
interface_node = self.interface_to_etree(port)
if interface_node:
interfaces.append({"interface": interface_node})
interface_node = self.to_irb_interfaces(vlan_ports)
if interface_node:
interfaces.append({"interface": interface_node})
return interfaces
def to_irb_interfaces(self, vlan_ports):
units = []
for vlan_port in vlan_ports:
unit = {
"name": vlan_port.vendor_specific["irb-unit"]
}
inet = []
if vlan_port.ips:
inet.extend([{"address": self._address_etree(ip, vlan_port)} for ip in vlan_port.ips])
if vlan_port.ip_redirect is False:
inet.append({"no-redirects": {}})
if inet:
unit["family"] = {
"inet": inet
}
units.append({"unit": unit})
if units:
units.insert(0, {"name": "irb"})
return units
else:
return None
def _address_etree(self, ip, port):
out = [{"name": str(ip)}]
for vrrp in port.vrrps:
if vrrp.related_ip_network == ip:
vrrp_etree = [{"name": vrrp.group_id}]
for ip_address in vrrp.ip_addresses:
vrrp_etree.append({"virtual-address": ip_address})
if vrrp.priority is not None:
vrrp_etree.append({"priority": vrrp.priority})
if vrrp.preempt_delay_minimum is not None:
vrrp_etree.append({"preempt": {"hold-time": vrrp.preempt_delay_minimum}})
if vrrp.vendor_specific.get("accept-data") is not None:
vrrp_etree.append({"accept-data": ""})
if vrrp.vendor_specific.get("authentication-type") is not None:
vrrp_etree.append({"authentication-type": vrrp.vendor_specific.get("authentication-type")})
if vrrp.authentication is not None:
vrrp_etree.append({"authentication-key": "this is {} but hashed".format(vrrp.authentication)})
for address, decrement in vrrp.track.items():
vrrp_etree.append({
"track": {
"route": {
"route_address": address,
"routing-instance": 'default',
"priority-cost": decrement,
}}})
out.append({"vrrp-group": vrrp_etree})
return out
def vlan_to_etree(self, vlan):
etree = super(JuniperMxNetconfDatastore, self).vlan_to_etree(vlan)
if vlan.vendor_specific.get("linked-port-vlan"):
etree.append({"routing-interface": vlan.vendor_specific.get("linked-port-vlan")})
return etree
def find_vlan_with_routing_interface(conf, interface_name):
for vlan in conf.vlans:
if vlan.vendor_specific.get("linked-port-vlan") == interface_name:
return vlan
return None
class TrunkShouldHaveVlanMembers(NetconfError):
def __init__(self, interface):
super(TrunkShouldHaveVlanMembers, self).__init__(msg="mgd: 'interface-mode trunk' must be defined with either "
"'vlan-id-list','isid-list', 'inner-vlan-id-list' or the "
"interface must be configured for 'protocols mvrp'",
severity='error',
err_type='application',
tag='invalid-value',
info={'bad-element': 'interface-mode trunk'},
path='[edit interfaces {} unit 0 family bridge interface-mode]'.format(
interface))
class ConfigurationCheckOutFailed(NetconfError):
def __init__(self):
super(ConfigurationCheckOutFailed, self).__init__(msg='commit failed: (statements constraint check failed)',
severity='error',
err_type='protocol',
tag='operation-failed',
info=None)
class FailingCommitResults(NetconfError):
def __init__(self, netconf_errors):
self.netconf_errors = netconf_errors
def to_dict(self):
return {
'commit-results': {
'routing-engine': [
{XML_ATTRIBUTES: {"{" + NS_JUNOS + "}style": "show-name"}},
{'name': "re0"},
] + [e.to_dict() for e in self.netconf_errors]
}
}
class IpAlreadyInUse(NetconfError):
def __init__(self, message):
super(IpAlreadyInUse, self).__init__(message,
severity="error",
err_type="protocol",
tag="operation-not-supported")
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import inspect
import logging
import random
log = logging.getLogger()
DEFAULT_SHERIFF_DELAY = 20
DEFAULT_NUM_BULLETS = 5
DEFAULT_HEALTH = 5
MAX_SCENES = 350 # ~150 words per scene
# Initiatives
HIGH_INITIATIVE = 30
MEDIUM_INITIATIVE = 20
DEFAULT_INITIATIVE = 10
GUN_DAMAGE = {'miss': {'health': 0,
'message': 'MISSED'},
'nick': {'health': -1,
'message': '{} NICKED'},
'hit': {'health': -2,
'message': '{} HIT'}}
class Stage(object):
"""The world model"""
elapsed_time = 0
current_scene = 0
@property
def actors(self):
"""Returns all the objects in the world that are people"""
return [obj for obj in self.objects if hasattr(obj, 'body')]
def find(self, obj_name):
"""Find an object by name in the world and return the object"""
return next(obj for obj in self.objects + self.places if obj.name == obj_name)
def __init__(self):
self.objects = []
self.places = []
stage = Stage()
def check_initiative(actors):
"""For each actor, find out who gets to move next"""
return max(actors, key=lambda x: x.initiative(), default=actors[0])
def action(actor):
"""At each step, evaluate what happens next"""
# By default, let the current actor do his thing
log.debug("Starting action for actor %s", actor)
actor.set_starting_location(actor.default_location)
actor.act()
stage.elapsed_time += 1
# Determine who acts next
next_actor = check_initiative(stage.actors)
if next_actor.escaped:
return next_actor
# If it's the same actor, just call this again
if next_actor == actor:
return action(actor)
return next_actor
class Thing(object):
"""An object with a name"""
location = None
def move_to(self, place):
"""Move an object from a current container (if it has one) to a new one."""
# Drop it from its current location if it has one
if self.location:
self.location = None
self.location = place
def __init__(self, name, preposition='on'):
stage.objects.append(self)
self.name = name
self.preposition = preposition
def __repr__(self):
return self.name
def __str__(self):
return self.name
def status(self):
if self.location and not isinstance(self.location, Person): # Don't print the status of body parts
if isinstance(self.location, Place):
return "the {} is {} the {}".format(self.name, self.location.preposition, self.location.name).capitalize()
if isinstance(self.location, Thing):
return "the {} is {} the {}".format(self.name, self.location.preposition, self.location.name).capitalize()
class Place(Thing):
"""A Place never has a location, and it doesn't print itself out in the world description."""
is_open = True
is_openable = False
def __init__(self, name=None):
super(Place, self).__init__(name)
stage.places.append(self)
class Door(Place):
"""A door is a place that can be open or closed. If it's open, we'll print a different message when the actor
moves through it than an ordinary place"""
is_openable = True
is_open = False
def close(self):
print("close door")
self.is_open = False
def open(self):
print("open door")
self.is_open = True
class Person(Thing):
"""A person who has hands and a location and will exhibit behavior"""
stage = None # Hook for the world model
enemy = None # Kinda cheating but makes things easy
default_location = None
health = 0 # -1 is dead, but we'll revive them on init
is_dead = False
inebriation = 0
def initiative(self):
"""Return a value representative of how much this actor wants to do something based on their state"""
if self.is_dead: # If they're already dead they're pretty lacking in initiative
return -9999
# If they _just_ died, give them a huge initiative bonus so we "cut" to their scene
if self.health <= 0:
return 9999
actor_initiative = random.randrange(0, DEFAULT_INITIATIVE)
if len(self.path) > 0: # Actor really wants to be somewhere
actor_initiative += HIGH_INITIATIVE
#log.debug("+ %s init change for path movement: %s/%s", self.name, HIGH_INITIATIVE, actor_initiative)
# If they're injured they're pretty mad
injury_bonus = DEFAULT_HEALTH - self.health
actor_initiative += injury_bonus
#log.debug("+ %s init change for injury bonus: %s/%s", self.name, injury_bonus, actor_initiative)
# They're also more excited if they're almost out of bullets
if self.get_if_held(Gun):
bullet_bonus = 10 if self.get_if_held(Gun).num_bullets == 1 else 0
actor_initiative += bullet_bonus
#log.debug("- %s init change for bullet bonus: %s/%s", self.name, bullet_bonus, actor_initiative)
return max(1, actor_initiative)
def act(self):
"""Do whatever is the next queued event"""
# If the actor just died, oops
if self.health <= 0:
print("{} dies.".format(self.name))
self.is_dead = True
return
# If there's a queued event, hit that first
if len(self.queue) > 0:
cmd, *args = self.queue[0]
log.debug("Running queued command: %s %s", cmd, args)
if args:
cmd(*args)
else:
cmd()
self.queue = self.queue[1:]
return
# If there's a target location, try to go there
if len(self.path) > 0:
log.debug("Got a path event, walking it")
next_location = self.path[0]
if self.go(next_location):
# If going there was successful, set their new location and drop it from the path
self.path = self.path[1:]
return
# If the enemy is present, try to kill them!
if self.enemy_is_present():
# If we don't have the gun, go find it!
if isinstance(self, Sheriff): # Lame
gun = stage.find("sheriff's gun")
else:
gun = stage.find("gun")
if self.get_if_held(gun):
self.shoot(self.enemy)
else:
# Immediately go to the location where the gun is (unless the location is a supporter)
target_location = gun.location
self.go(target_location)
# ...then queue taking the gun and shooting it!
self.queue.append((self.shoot, self.enemy))
self.queue.append((self.take, gun))
return
# If the enemy is dead, take the money and run
if self.enemy.is_dead:
# Blow out the gun if we still have it
gun = self.get_if_held(Gun)
holster = self.get_if_held(Holster)
if gun and not gun.location == holster:
print("blow out barrel")
self.queue.append((self.drop, gun, holster))
return True
log.debug("*** Trying to get the money")
money = self.stage.find('money')
if self.location == money.location:
return self.take(money)
# End game! Flee with the money!
if self.get_if_held('money'):
self.path = ['door', None]
self.escaped = True
# Random behaviors
weighted_choice = [('drink', 5), ('wander', 3), ('check', 1), ('lean', 1), ('count', 1), ('drop', 1)]
choice = random.choice([val for val, cnt in weighted_choice for i in range(cnt)])
log.debug("%s chose to %s", self.name, choice)
if choice == 'drink':
# Try to drink from the glass if we're holding it
glass = stage.find('glass')
if self.get_if_held('glass'):
# ...and it's full, just drink from it
if glass.full:
glass.drink(self)
return True
# If not, try to pour a glass from the bottle
else:
bottle = stage.find('bottle')
if self.get_if_held(bottle):
bottle.pour(glass)
# Be sure to add queued events in reverse order because queues
self.queue.append((glass.drink, self))
self.queue.append((self.take, glass))
return True
# If we don't have the bottle and can reach it, take it and
# then queue pouring it and drinking from it
else:
if self.can_reach_obj(bottle):
self.take(bottle)
self.queue.append((glass.drink, self))
self.queue.append((self.take, glass))
self.queue.append((bottle.pour, glass))
return True
# If we don't have the glass, try to get it
else:
if self.can_reach_obj(glass):
self.take(glass)
return True
elif choice == 'wander':
return self.go_to_random_location()
elif choice == 'check':
if self.get_if_held(Gun):
print("check gun")
return True
elif choice == 'count':
if self.can_reach_obj(stage.find('money')):
print("count money")
return True
elif choice == 'lean':
if self.location == stage.find('window'):
print('lean on window and look')
return True
elif choice == 'drop': # Drop a random object that isn't the gun
obj = self.get_held_obj(self.right_hand)
if obj and not isinstance(obj, Gun):
self.drop(obj, self.location)
return True
else:
obj = self.get_held_obj(self.left_hand)
if obj and not isinstance(obj, Gun):
self.drop(obj, self.location)
return True
# If we fell threw and did nothing, try again
return self.act()
def can_reach_obj(self, obj):
"""True if the Person can reach the object in question. The object must be either directly
in the same location, or on a visible supporter in the location"""
if self.location == obj.location:
return True
if hasattr(obj.location, 'location') and obj.location.location == self.location:
return True
def take(self, obj):
"""Try to take an object. If there's no hand available, drop an object and queue taking
the object. Return True if the object was taken or False if no hands available."""
free_hand = self.free_hand()
if free_hand:
print("pick up the {} with the {}".format(obj, free_hand))
obj.move_to(free_hand)
return True
else:
# Drop the thing in a random hand and queue picking up the thing
self.drop(self.get_held_obj(random.choice((self.right_hand, self.left_hand))), self.location)
self.queue.append((self.take, obj))
def go_to_random_location(self):
"""Randomly go to a location that isn't the current one"""
location = random.choice([place for place in stage.places if place != self.location and not isinstance(place, Door)])
self.go(location)
def enemy_is_present(self):
"""Is the enemy visible and suitably shootable?"""
return self.enemy.location != None and self.enemy.is_alive
def shoot(self, target, aimed=False):
"""Shoot first, ask questions never"""
gun = self.get_if_held(Gun)
if gun:
# Usually we'll aim and then fire, sometimes we'll just fire
if not aimed:
if random.randint(0, 5) > 1:
print("aim")
self.queue.append((self.shoot, target, True))
return False
print("fire")
log.debug("%s is trying to shoot %s", self.name, target.name)
hit_weight = self.starting_hit_weight()
if gun.num_bullets == 1:
hit_weight += 1
if self.health < DEFAULT_HEALTH:
hit_weight += 1
weighted_hit_or_miss = [('miss', 3), ('nick', 3 * hit_weight), ('hit', 1 * hit_weight)]
hit_or_nick = random.choice([val for val, cnt in weighted_hit_or_miss for i in range(cnt)])
print(GUN_DAMAGE[hit_or_nick]['message'].format(target.name))
target.health += GUN_DAMAGE[hit_or_nick]['health']
gun.num_bullets -= 1
return True
def starting_hit_weight(self):
"""Return a state-dependent starting weight that can increase or decrease the likelihood of
the actor making a successful shot."""
return 1
def go(self, location):
"""Try to move to the next location. If that location can be opened, like a door, open it first.
Otherwise, set the new location. If `location` is a string, find the
name of that location in the world."""
if isinstance(location, str):
location = self.stage.find(location)
log.debug("Trying to go to next location %s", location)
if location.is_openable and not location.is_open:
location.open()
return False
if location.is_openable and location.is_open:
print("go through {}".format(location))
self.queue.append((location.close,))
else:
print("go to {}".format(location))
self.location = location
return True
def get_if_held(self, obj_name):
"""Does the actor have the object name, object, or classname in any of its body parts? If so, return the container where it is"""
# First check if it's a classname (like Gun)
if inspect.isclass(obj_name):
# Check all the world models for objects of this type and try to find a match
for obj in stage.objects:
if isinstance(obj, obj_name) and obj.location in self.parts:
return obj
if isinstance(obj_name, str):
# If not, try to find the named object
obj = self.stage.find(obj_name)
else:
obj = obj_name
if obj.location in self.parts:
return obj
def get_held_obj(self, part):
"""Get the object held by a given body part. Returns None if the body part isn't holding anything"""
for obj in stage.objects:
if obj.location == part:
return obj
def free_hand(self):
"""Return the hand that isn't holding anything"""
right_free = True
left_free = True
for obj in stage.objects:
if obj.location == self.right_hand:
right_free = False
elif obj.location == self.left_hand:
left_free = False
if right_free:
return self.right_hand
if left_free:
return self.left_hand
@property
def is_alive(self):
return self.health > 0
def set_starting_location(self, location):
"""Setting the starting location changes the world model and also prints an explicit
message. It's idempotent and so safe to call in a loop because I'm lazy"""
if location and not self.location:
self.location = location
print("(The {} is at the {}.)".format(self.name, self.location.name))
def drop(self, obj, target):
"""Drop an object in a place or on a supporting object. Is a no-op if the actor doesn't have the object."""
if self.get_if_held(obj.name):
print("put {} {} {}".format(obj.name, target.preposition, target.name))
obj.move_to(target)
def __init__(self, name):
super(Person, self).__init__(name)
self.health = DEFAULT_HEALTH
self.path = [] # A path of Places the person is currently walking
self.queue = [] # A queue of functions to call next
self.right_hand = Thing("{}'s right hand".format(self.name), preposition='in')
self.left_hand = Thing("{}'s left hand".format(self.name), preposition='in')
self.body = Thing("{}".format(self.name))
self.parts = [self.left_hand, self.right_hand, self.body]
self.escaped = False # The final endgame state
class Robber(Person):
"""The Robber wants to deposit the money, drink, kill the sheriff, and escape with the money"""
def initiative(self):
actor_initiative = super(Robber, self).initiative()
# If the Robber has the money and the Sheriff is alive,
# the Robber wants to drop the money in the Corner
if self.get_if_held('money') and self.enemy.is_alive:
actor_initiative += HIGH_INITIATIVE
log.debug("%s is returning initiative %s", self.name, actor_initiative)
return actor_initiative
def act(self):
"""A set of conditions of high priority; these actions will be executed first"""
if self.location.name == 'corner' and self.get_if_held('money') and self.enemy.is_alive:
money = self.get_if_held('money')
self.drop(money, self.location)
return True
return super(Robber, self).act()
def starting_hit_weight(self):
"""The Robber (but _not_ the Sheriff) is a better shot if he's drunk"""
return self.inebriation + 2
class Sheriff(Person):
"""The Sheriff wants to kill the Robber and leave with the money. He does not get a drink bonus and arrives
on a delay."""
def __init__(self, name, delay):
super(Sheriff, self).__init__(name)
self.delay = delay
def initiative(self):
actor_initiative = super(Sheriff, self).initiative()
# The Sheriff is subject to the global timer and will do nothing until it expires
if self.stage.elapsed_time < self.delay:
actor_initiative = 0
elif self.location == None:
# If they haven't moved, tell them they want to move to the table
actor_initiative += HIGH_INITIATIVE
log.debug("%s is returning initiative %s", self.name, actor_initiative)
return actor_initiative
def act(self):
"""The Sheriff wants to get in the house right away"""
if self.location == None:
self.path = ['window', 'door']
return super(Sheriff, self).act()
def starting_hit_weight(self):
"""The Sheriff (but _not_ the Robber) is a better shot if he's injured"""
weight = 1
if self.health < DEFAULT_HEALTH:
weight += 3
return weight
class Gun(Thing):
"""A Gun is an object with a distinct property of being shootable and having a number of bullets"""
num_bullets = 0
def __init__(self, name):
super(Gun, self).__init__(name)
self.num_bullets = DEFAULT_NUM_BULLETS
class Holster(Thing):
def __init__(self, name, preposition='in'):
super(Holster, self).__init__(name, preposition=preposition)
class Container(Thing):
"""A Container is a vessel that can contain a thing (whisky)"""
volume = 0
def __init__(self, name):
super(Container, self).__init__(name)
@property
def full(self):
"""A container is 'full' if it contains any volume"""
return self.volume > 0
def pour(self, new_container):
"""Pouring from a full container into an empty container makes
the other container full. It doesn't make the source container
any less full because magic. If the source container is empty,
this is a no-op. Returns True if the pour succeeded."""
if self.full:
print("pour")
new_container.volume = 3
return True
def drink(self, actor):
"""Drinking from a full container changes the inebriation status
of the actor. Drinking from an empty glass has no effect.
Returns True if the drink succeeded."""
if self.full:
print("take a drink from {}".format(self))
actor.inebriation += 1
self.volume -= 1
return True
def init(delay):
"""Initialize the starting conditions"""
# Humans
robber = Robber('robber')
robber_gun = Gun('gun')
robber_gun.move_to(robber.right_hand)
money = Thing('money')
money.move_to(robber.left_hand)
robber_holster = Holster('holster')
robber_holster.move_to(robber.body)
robber.stage = stage # A mechanism to get ahold of the world state
sheriff = Sheriff('sheriff', delay=delay)
sheriff_gun = Gun("sheriff's gun")
sheriff_gun.move_to(sheriff.right_hand)
holster = Holster("sheriff's holster")
holster.move_to(sheriff.body)
sheriff.stage = stage
robber.enemy = sheriff
sheriff.enemy = robber
# Places
window = Place('window')
table = Place('table')
door = Door('door')
corner = Place('corner')
sheriff.default_location = None # nowhere
robber.default_location = window
robber.path = [door, corner]
# Objects
glass = Container('glass')
bottle = Container('bottle')
bottle.volume = 10
glass.move_to(table)
bottle.move_to(table)
stage.current_scene += 1
loop()
def loop():
"""Main story loop, initialized by the delay before the sheriff arrives"""
# Start with the world status
print ("\nAct 1 Scene {}\n\n".format(stage.current_scene))
for obj in stage.objects:
if not isinstance(obj, Person) and obj.status():
print(obj.status() + '.', end=" ")
print()
next_actor = stage.actors[0]
while True:
print()
print(next_actor.name.upper())
next_actor = action(next_actor)
if next_actor.escaped:
print("CURTAIN")
stage.objects = []
stage.places = []
break
if __name__ == '__main__':
delay = input('Select arrival time for SHERIFF or ENTER for default: ') or DEFAULT_SHERIFF_DELAY
print("""
SAGA III
An Original Play
by
A Computer """)
for i in range(0, MAX_SCENES):
init(delay=int(delay))
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glanceclient.exc
import novaclient.exceptions as nova_exc
from oslo_utils import timeutils
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from magnum.api.controllers import base
from magnum.api.controllers import link
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.api import expose
from magnum.api import validation
from magnum.common import clients
from magnum.common import exception
from magnum.common import policy
from magnum import objects
class BayModelPatchType(types.JsonPatchType):
pass
class BayModel(base.APIBase):
"""API representation of a baymodel.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a baymodel.
"""
_coe = None
def _get_coe(self):
return self._coe
def _set_coe(self, value):
if value and self._coe != value:
self._coe = value
elif value == wtypes.Unset:
self._coe = wtypes.Unset
uuid = types.uuid
"""Unique UUID for this baymodel"""
name = wtypes.StringType(min_length=1, max_length=255)
"""The name of the bay model"""
coe = wsme.wsproperty(wtypes.text, _get_coe, _set_coe, mandatory=True)
"""The Container Orchestration Engine for this bay model"""
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The image name or UUID to use as a base image for this baymodel"""
flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of this bay model"""
master_flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of the master node for this bay model"""
dns_nameserver = wtypes.IPv4AddressType()
"""The DNS nameserver address"""
keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The name or id of the nova ssh keypair"""
external_network_id = wtypes.StringType(min_length=1, max_length=255)
"""The external network to attach the Bay"""
fixed_network = wtypes.StringType(min_length=1, max_length=255)
"""The fixed network name to attach the Bay"""
network_driver = wtypes.StringType(min_length=1, max_length=255)
"""The name of the driver used for instantiating container networks"""
apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535)
"""The API server port for k8s"""
docker_volume_size = wtypes.IntegerType(minimum=1)
"""The size in GB of the docker volume"""
ssh_authorized_key = wtypes.StringType(min_length=1)
"""The SSH Authorized Key"""
cluster_distro = wtypes.StringType(min_length=1, max_length=255)
"""The Cluster distro for the bay, ex - coreos, fedora-atomic."""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated baymodel links"""
http_proxy = wtypes.StringType(min_length=1, max_length=255)
"""http_proxy for the bay """
https_proxy = wtypes.StringType(min_length=1, max_length=255)
"""https_proxy for the bay """
no_proxy = wtypes.StringType(min_length=1, max_length=255)
"""Its comma separated list of ip for which proxies should not
used in the bay"""
registry_enabled = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the docker registry is enabled"""
labels = wtypes.DictType(str, str)
"""One or more key/value pairs"""
tls_disabled = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the TLS should be disabled"""
public = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the baymodel is public or not."""
server_type = wsme.wsattr(wtypes.StringType(min_length=1,
max_length=255),
default='vm')
"""Server type for this bay model """
def __init__(self, **kwargs):
self.fields = []
for field in objects.BayModel.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(baymodel, url, expand=True):
if not expand:
baymodel.unset_fields_except(['uuid', 'name', 'image_id',
'apiserver_port', 'coe'])
baymodel.links = [link.Link.make_link('self', url,
'baymodels', baymodel.uuid),
link.Link.make_link('bookmark', url,
'baymodels', baymodel.uuid,
bookmark=True)]
return baymodel
@classmethod
def convert_with_links(cls, rpc_baymodel, expand=True):
baymodel = BayModel(**rpc_baymodel.as_dict())
return cls._convert_with_links(baymodel, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(
uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='example',
image_id='Fedora-k8s',
flavor_id='m1.small',
master_flavor_id='m1.small',
dns_nameserver='8.8.1.1',
keypair_id='keypair1',
external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba',
fixed_network='private',
network_driver='libnetwork',
apiserver_port=8080,
docker_volume_size=25,
cluster_distro='fedora-atomic',
ssh_authorized_key='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAB',
coe='kubernetes',
http_proxy='http://proxy.com:123',
https_proxy='https://proxy.com:123',
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
labels={'key1': 'val1', 'key2': 'val2'},
server_type='vm',
created_at=timeutils.utcnow(),
updated_at=timeutils.utcnow(),
public=False),
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
class BayModelCollection(collection.Collection):
"""API representation of a collection of baymodels."""
baymodels = [BayModel]
"""A list containing baymodels objects"""
def __init__(self, **kwargs):
self._type = 'baymodels'
@staticmethod
def convert_with_links(rpc_baymodels, limit, url=None, expand=False,
**kwargs):
collection = BayModelCollection()
collection.baymodels = [BayModel.convert_with_links(p, expand)
for p in rpc_baymodels]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.baymodels = [BayModel.sample(expand=False)]
return sample
class BayModelsController(rest.RestController):
"""REST controller for BayModels."""
_custom_actions = {
'detail': ['GET'],
}
def _get_baymodels_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.BayModel.get_by_uuid(pecan.request.context,
marker)
baymodels = objects.BayModel.list(pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return BayModelCollection.convert_with_links(baymodels, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
def _get_image_data(self, context, image_ident):
"""Retrieves os_distro and other metadata from the Glance image.
:param image_ident: image id or name of baymodel.
"""
try:
cli = clients.OpenStackClients(context)
return api_utils.get_openstack_resource(cli.glance().images,
image_ident, 'images')
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_ident)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_ident)
@policy.enforce_wsgi("baymodel")
@expose.expose(BayModelCollection, types.uuid, int, wtypes.text,
wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of baymodels.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_baymodels_collection(marker, limit, sort_key,
sort_dir)
@policy.enforce_wsgi("baymodel")
@expose.expose(BayModelCollection, types.uuid, int, wtypes.text,
wtypes.text)
def detail(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of baymodels with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "baymodels":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['baymodels', 'detail'])
return self._get_baymodels_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@policy.enforce_wsgi("baymodel", "get")
@expose.expose(BayModel, types.uuid_or_name)
def get_one(self, baymodel_ident):
"""Retrieve information about the given baymodel.
:param baymodel_ident: UUID or logical name of a baymodel.
"""
rpc_baymodel = api_utils.get_rpc_resource('BayModel', baymodel_ident)
return BayModel.convert_with_links(rpc_baymodel)
def check_keypair_exists(self, context, keypair):
"""Checks the existence of the keypair"""
cli = clients.OpenStackClients(context)
try:
cli.nova().keypairs.get(keypair)
except nova_exc.NotFound:
raise exception.KeyPairNotFound(keypair=keypair)
@policy.enforce_wsgi("baymodel", "create")
@expose.expose(BayModel, body=BayModel, status_code=201)
@validation.enforce_network_driver_types_create()
def post(self, baymodel):
"""Create a new baymodel.
:param baymodel: a baymodel within the request body.
"""
baymodel_dict = baymodel.as_dict()
context = pecan.request.context
self.check_keypair_exists(context, baymodel_dict['keypair_id'])
baymodel_dict['project_id'] = context.project_id
baymodel_dict['user_id'] = context.user_id
image_data = self._get_image_data(context, baymodel_dict['image_id'])
if image_data.get('os_distro'):
baymodel_dict['cluster_distro'] = image_data['os_distro']
else:
raise exception.OSDistroFieldNotFound(
image_id=baymodel_dict['image_id'])
# check permissions for making baymodel public
if baymodel_dict['public']:
if not policy.enforce(context, "baymodel:publish", None,
do_raise=False):
raise exception.BaymodelPublishDenied()
new_baymodel = objects.BayModel(context, **baymodel_dict)
new_baymodel.create()
# Set the HTTP Location Header
pecan.response.location = link.build_url('baymodels',
new_baymodel.uuid)
return BayModel.convert_with_links(new_baymodel)
@policy.enforce_wsgi("baymodel", "update")
@wsme.validate(types.uuid_or_name, [BayModelPatchType])
@expose.expose(BayModel, types.uuid_or_name, body=[BayModelPatchType])
@validation.enforce_network_driver_types_update()
def patch(self, baymodel_ident, patch):
"""Update an existing baymodel.
:param baymodel_ident: UUID or logic name of a baymodel.
:param patch: a json PATCH document to apply to this baymodel.
"""
context = pecan.request.context
rpc_baymodel = api_utils.get_rpc_resource('BayModel', baymodel_ident)
try:
baymodel_dict = rpc_baymodel.as_dict()
baymodel = BayModel(**api_utils.apply_jsonpatch(
baymodel_dict,
patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# check permissions when updating baymodel public flag
if rpc_baymodel.public != baymodel.public:
if not policy.enforce(context, "baymodel:publish", None,
do_raise=False):
raise exception.BaymodelPublishDenied()
# Update only the fields that have changed
for field in objects.BayModel.fields:
try:
patch_val = getattr(baymodel, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_baymodel[field] != patch_val:
rpc_baymodel[field] = patch_val
rpc_baymodel.save()
return BayModel.convert_with_links(rpc_baymodel)
@policy.enforce_wsgi("baymodel")
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, baymodel_ident):
"""Delete a baymodel.
:param baymodel_ident: UUID or logical name of a baymodel.
"""
rpc_baymodel = api_utils.get_rpc_resource('BayModel', baymodel_ident)
rpc_baymodel.destroy()
|
|
# -*- coding: utf-8
from apis.specs import (RestartPolicy, DependencyPolicy)
from apis.specs import (ImSpec, Dependency, LogConfigSpec, ContainerSpec,
PodSpec, PodGroupSpec, AppSpec)
def test_ImSpec_smoke():
s = ImSpec()
assert s.CreateAt is None
assert s.Name == ""
def test_Dependency_smoke():
d = Dependency()
assert d.PodName == ""
assert d.Policy == DependencyPolicy.NamespaceLevel
def test_Dependency_util_smoke():
d1 = Dependency()
d1.PodName = 'hello.foo.bar'
d1.Policy = DependencyPolicy.NodeLevel
d2 = d1.clone()
assert d1 != d2
assert d2.equals(d1)
def test_LogConfigSpec_smoke():
l = LogConfigSpec()
assert l.Type == ''
assert l.Config == {}
def test_LogConfigSpec_verify_params_smoke():
l = LogConfigSpec()
l.Type = None
assert not l.verify_params()
l.Type = 'logd'
l.Config = {}
assert l.verify_params()
def test_ContainerSpec_smoke():
s = ContainerSpec()
assert s.Name == ""
assert s.Namespace == ""
assert s.CreateAt is None
assert s.Version == 0
assert s.Image == ""
assert s.Command == []
assert s.LogConfig is None
def test_ContainerSpec_verify_params_smoke():
s = ContainerSpec()
assert not s.verify_params()
s.Image = 'hello/release-123-456'
assert s.verify_params()
s.LogConfig = LogConfigSpec()
assert s.verify_params()
def test_ContainerSpec_util_smoke():
s1 = ContainerSpec()
s2 = s1.clone()
assert s1 != s2
assert s1.equals(s2)
s1.LogConfig = LogConfigSpec()
s2.LogConfig = LogConfigSpec()
s1.LogConfig.Type = 'syslogd'
assert not s1.equals(s2)
def test_PodSpec_smoke():
s = PodSpec()
s.Containers = [ContainerSpec()]
s.Dependencies = [Dependency()]
assert s.Name == ""
assert s.Annotation == ""
def test_PodSpec_util_smoke():
s1 = PodSpec()
s1.Containers = [ContainerSpec()]
s1.Dependencies = [Dependency()]
s2 = s1.clone()
assert s1 != s2
assert s1.equals(s2)
assert s1.Containers[0] != s2.Containers[0]
assert s1.Containers[0].equals(s2.Containers[0])
assert s1.Dependencies[0] != s2.Dependencies[0]
assert s1.Dependencies[0].equals(s2.Dependencies[0])
def test_PodSpec_verify_params_smoke():
c = ContainerSpec()
d = Dependency()
s = PodSpec()
s.Containers = [c]
s.Dependencies = [d]
assert not s.verify_params()
s.Name = "web"
s.Namespace = "hello.foo.bar"
assert not s.verify_params()
c.Image = "hello/release-123-456"
assert s.verify_params()
def test_PodGroupSpec_smoke():
p = PodSpec()
p.Containers = [ContainerSpec()]
p.Dependencies = [Dependency()]
s = PodGroupSpec()
s.Pod = p
assert s.NumInstances == 0
assert s.RestartPolicy == RestartPolicy.Never
def test_PodGroupSpec_util_smoke():
c = ContainerSpec()
d = Dependency()
p = PodSpec()
p.Containers = [c]
p.Dependencies = [d]
s1 = PodGroupSpec()
s1.Pod = p
s1.NumInstances = 1
s1.RestartPolicy = RestartPolicy.OnFail
s2 = s1.clone()
assert s1 != s2
assert s1.equals(s2)
p1 = s1.Pod
p2 = s2.Pod
assert p1 != p2
assert p1.equals(p2)
assert p1.Containers[0] != p2.Containers[0]
assert p1.Containers[0].equals(p2.Containers[0])
assert p1.Dependencies[0] != p2.Dependencies[0]
assert p1.Dependencies[0].equals(p2.Dependencies[0])
def test_PodGroupSpec_verify_params_smoke():
c = ContainerSpec()
d = Dependency()
p = PodSpec()
p.Containers = [c]
p.Dependencies = [d]
s = PodGroupSpec()
s.Pod = p
s.NumInstances = 1
s.RestartPolicy = RestartPolicy.OnFail
assert not s.verify_params()
s.Name = "web"
s.Namespace = "hello.foo.bar"
assert not s.verify_params()
p.Name = "web"
p.Namespace = "hello.foo.bar"
assert not s.verify_params()
c.Image = "hello/release-123-456"
assert s.verify_params()
def test_AppSpec_smoke():
p = PodSpec()
p.Containers = [ContainerSpec()]
p.Dependencies = [Dependency()]
pg = PodGroupSpec()
pg.Pod = p
a = AppSpec()
a.PodGroups = [pg]
assert a.AppName == ""
def test_AppSpec_util_smoke():
c = ContainerSpec()
d = Dependency()
p = PodSpec()
p.Containers = [c]
p.Dependencies = [d]
pg = PodGroupSpec()
pg.Pod = p
pg.NumInstances = 1
pg.RestartPolicy = RestartPolicy.OnFail
a1 = AppSpec()
a1.AppName = "hello"
a1.PodGroups = [pg]
a2 = a1.clone()
assert a1 != a2
assert a1.equals(a2)
pg1 = a1.PodGroups[0]
pg2 = a2.PodGroups[0]
assert pg1 != pg2
assert pg1.equals(pg2)
def test_AppSpec_verify_params_smoke():
c = ContainerSpec()
d = Dependency()
p = PodSpec()
p.Containers = [c]
p.Dependencies = [d]
pg = PodGroupSpec()
pg.Pod = p
pg.NumInstances = 1
pg.RestartPolicy = RestartPolicy.OnFail
a = AppSpec()
a.PodGroups = [pg]
assert not a.verify_params()
a.AppName = "hello"
assert not a.verify_params()
pg.Name = "web"
pg.Namespace = "hello.foo.bar"
assert not a.verify_params()
p.Name = "web"
p.Namespace = "hello.foo.bar"
assert not a.verify_params()
c.Image = "hello/release-123-456"
assert a.verify_params()
|
|
# for the API Fetch
import urllib.request
import json
import sys
# for the socket check
import socket
import random
import time
#FIXED
#get a new blog from the frontier
def get_blog_from_frontier(host,port):
#connect to the frontier to get a socket to communicate with
connection_success = False
connection_success_fails = 0
while not connection_success:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
connection_success = True
except Exception as e:
print("Could Not Link To Socket " + str(port))
connection_success_fails += 1
if connection_success_fails > 5:
print("Max Link Fails to socket " + str(port))
return False,None
time.sleep(.1*random.randint(1,5))
pass
#build our queue_blogs json
input_data = {"request_type":"new_blog_request",}
#send it our payload
try:
send_data = json.dumps(input_data)
s.send(str.encode(send_data))
s.shutdown(socket.SHUT_WR)
except Exception as e:
print("Could Not Send Payload: " + str(e))
return False,None
#recieve the response
data = bytes([])
while True:
new_data = s.recv(1024)
if not new_data: break
data += new_data
s.close()
try:
data = str(data,'UTF-8')
except Exception as e:
print("Bytes Return on Socket Request Malformed")
return False,None
#load the data using json load
try:
json_data = json.loads(data)
except Exception as e:
print("Json Return on Socket Request Malformed" + str(data))
return False,None
#extract the new blog from the json
try:
if not json_data["worked"]:
return False,None
return True,json_data["new_blog"]
except Exception as e:
print("Json Return on New Blog Request Failed: " + str(e))
return False, None
#FIXED
# get the blogs from notes
def get_blogs_from_notes(blog_name,api_key,offset=None,limit=None):
def form_post(post):
formed_post = {}
formed_post["blog_name"] = post["blog_name"]
formed_post["post_id"] = post["id"]
formed_post["post_link"] = post["post_url"]
formed_post["timestamp"] = post["timestamp"]
formed_post["note_count"] = post["note_count"]
formed_post["tags"] = post["tags"]
formed_post["type"] = post["type"]
formed_post["title"] = ""
if "title" in post:
formed_post["title"] = post["title"]
try:
if formed_post["type"] == "text":
formed_post["content"] = str(post["body"])
elif formed_post["type"] == "photo":
#consider photoset
formed_post["title"] = post["caption"]
formed_post["content"] = post["photos"][0]["original_size"]["url"]
elif formed_post["type"] == "quote":
formed_post["content"] = str(post["text"]) + str(post["source"])
elif formed_post["type"] == "link":
formed_post["content"] = post["url"]
elif formed_post["type"] == "chat":
formed_post["content"] = str(post["body"])
elif formed_post["type"] == "audio":
formed_post["content"] = post["audio_url"]
elif formed_post["type"] == "video":
formed_post["title"] = post["caption"]
formed_post["content"] = post["permalink_url"]
elif formed_post["type"] == "answer":
formed_post["content"] = "WOW"
else:
raise Exception
except Exception as e:
#answer posts are going to be thrown out
print(formed_post)
print("Invalid post type found, something bad has happened")
return False
return formed_post
def get_notes_from_post(post,postid):
note_list = []
for item in post["notes"]:
note = {}
note["timestamp"] = item["timestamp"]
note["blog_name"] = item["blog_name"]
note["type"] = item["type"]
note["post_id"] = postid
note_list.append(note)
return note_list
#return list
blogs = []
links = []
posts = []
note_list = []
# build url for api
try:
authentication = '?api_key=' + api_key
url = 'http://api.tumblr.com/v2/blog/' + blog_name +".tumblr.com"
parameters = "¬es_info=true&reblog_info=true"
if limit != None:
parameters += '&limit='+str(int(limit))
if offset != None:
parameters += '&offset='+str(int(offset))
url += '/posts'+ authentication + parameters
except Exception as e:
print("Could not build")
return False,[],[],[],[]
# retrieve html
try:
response = urllib.request.urlopen(url)
html = response.read()
except Exception as e:
print("Could not get Html",str(url))
return False,[],[],[],[]
# parse html into json
try:
x = json.loads(html.decode('UTF-8'))
except Exception as e:
print("Could not Parse to Json")
return False,[],[],[],[]
# look for "unique blogs"
try:
if "response" in x:
if "posts" in x["response"]:
for a in x["response"]["posts"]:
post = form_post(a)
if post != False:
posts.append(post)
if "notes" in a:
note_list += get_notes_from_post(a,post["post_id"])
for b in a["notes"]:
if "blog_name" in b:
if b["blog_name"] not in blogs:
blogs.append(b["blog_name"])
links.append(b["blog_url"])
except Exception as e:
print("Could Not Parse Json into Unique Blogs")
return False,[],[],[],[]
# return list of unique blogs in a list
return True,list(blogs),list(links),list(posts),list(note_list)
#FIXED
# sends the blogs to the frontier
def send_blogs_to_DB(host,port,blogs,links):
#connect to the frontier to get a socket to communicate with
connection_success = False
connection_success_fails = 0
while not connection_success:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
connection_success = True
except Exception as e:
print("Could Not Link To Socket " + str(port))
connection_success_fails += 1
if connection_success_fails > 5:
print("Max Link Fails to socket " + str(port))
return False
time.sleep(.1*random.randint(1,5))
pass
#build our queue_blogs json
input_data = {
"request_type": "save_blogs",
"blogs": blogs,
"links":links,
}
#send it our payload
try:
send_data = json.dumps(input_data)
s.send(str.encode(send_data))
s.shutdown(socket.SHUT_WR)
s.close()
except Exception as e:
print("Could Not Send Payload: " + str(e))
return False
return True
#FIXED
#send posts to db
def send_posts_to_DB(host,port,posts):
#connect to the frontier to get a socket to communicate with
connection_success = False
connection_success_fails = 0
while not connection_success:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
connection_success = True
except Exception as e:
print("Could Not Link To Socket " + str(port))
connection_success_fails += 1
if connection_success_fails > 5:
print("Max Link Fails to socket " + str(port))
return False
time.sleep(.1*random.randint(1,5))
pass
#build our queue_blogs json
input_data = {
"request_type": "save_posts",
"posts": posts,
}
#send it our payload
try:
send_data = json.dumps(input_data)
s.send(str.encode(send_data))
s.shutdown(socket.SHUT_WR)
s.close()
except Exception as e:
print("Could Not Send Payload: " + str(e))
return False
return True
#FIXED
#send notes to db
def send_notes_to_DB(host,port,notes):
#connect to the frontier to get a socket to communicate with
connection_success = False
connection_success_fails = 0
while not connection_success:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
connection_success = True
except Exception as e:
print("Could Not Link To Socket " + str(port))
connection_success_fails += 1
if connection_success_fails > 5:
print("Max Link Fails to socket " + str(port))
return False
time.sleep(.1*random.randint(1,5))
pass
#build our queue_blogs json
input_data = {
"request_type": "save_notes",
"notes": notes,
}
#send it our payload
try:
send_data = json.dumps(input_data)
s.send(str.encode(send_data))
s.shutdown(socket.SHUT_WR)
s.close()
except Exception as e:
print("Could Not Send Payload: " + str(e))
return False
return True
#FIXED
# sends the blogs to the frontier
def send_blogs_to_frontier(host,port,blogs):
#connect to the frontier to get a socket to communicate with
connection_success = False
connection_success_fails = 0
while not connection_success:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
connection_success = True
except Exception as e:
print("Could Not Link To Socket " + str(port))
connection_success_fails += 1
if connection_success_fails > 5:
print("Max Link Fails to socket " + str(port))
return False
time.sleep(.1*random.randint(1,5))
pass
#build our queue_blogs json
input_data = {
"request_type": "queue_blogs",
"blog_list": blogs,
}
#send it our payload
try:
send_data = json.dumps(input_data)
s.send(str.encode(send_data))
s.shutdown(socket.SHUT_WR)
s.close()
except Exception as e:
print("Could Not Send Payload: " + str(e))
return False
return True
#FIXED
# gets an api key from the frontier
def get_api_key_from_frontier(host,port):
connection_success = False
connection_success_fails = 0
while not connection_success:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
connection_success = True
except Exception as e:
print("Could Not Link To Socket " + str(port))
connection_success_fails += 1
if connection_success_fails > 5:
print("Max Link Fails to socket " + str(port))
return False,None
time.sleep(random.randrange(.1,.5))
pass
#build our queue_blogs json
input_data = {"request_type":"api_key_get",}
#send it our payload
try:
send_data = json.dumps(input_data)
s.send(str.encode(send_data))
s.shutdown(socket.SHUT_WR)
except Exception as e:
print("Could Not Send Payload: " + str(e))
return False,None
#recieve the response
data = bytes([])
while True:
new_data = s.recv(1024)
if not new_data: break
data += new_data
s.close()
try:
data = str(data,'UTF-8')
except Exception as e:
print("Bytes Return on Socket Request Malformed")
return False,None
#load the data using json load
try:
json_data = json.loads(data)
except Exception as e:
print("Json Return on Socket Request Malformed" + str(data))
return False,None
#extract the new blog from the json
try:
if not json_data["worked"]:
return False,None
return True,json_data["new_api_key"]
except Exception as e:
print("Json Return on New API Key Failed: " + str(e))
return False, None
if __name__ == "__main__":
host = 'helix.vis.uky.edu'
port = 6666
db_host = '172.31.40.208'
db_port = 6667
# first try to get the api key from frontier
try:
ret = False
while not ret:
ret,api_key = get_api_key_from_frontier(host,port)
except Exception as e:
print ("Could not get an API Key")
try:
#first send a starting blog to the frontier
fail_count = 0
while True:
seed_blogs = ["just1boi"]
ret = send_blogs_to_frontier(host,port,seed_blogs)
if ret:
break
fail_count += 1
if fail_count > 10:
print("Failed on Send Blogs, Number of Blogs Visited: " + str(blogs_visited))
sys.exit()
time.sleep(.1)
except Exception as e:
print("Could not add to the queue")
sys.exit()
try:
blogs_visited = 0
# main loop
while True:
#get a new blog from the frontier
fail_count = 0
new_blog = ''
print("Get a New Blog From our Frontier")
while True:
ret,new_blog = get_blog_from_frontier(host,port)
if ret:
break
fail_count += 1
if fail_count > 10:
print("Failed on Frontier New Blog Access, Number of Blogs Visited: " + str(blogs_visited))
sys.exit()
time.sleep(.1)
#get the blogs from the notes of the new blog
fail_count = 0
insert_blogs = []
print("Get Notes From Tumblr")
while True:
ret,insert_blogs,insert_links,insert_posts,insert_notes = get_blogs_from_notes(new_blog,api_key)
if ret:
break
fail_count += 1
if fail_count > 10:
print("Failed on tumblr access, Number of Blogs Visited: " + str(blogs_visited))
sys.exit()
time.sleep(1)
#insert blogs into db
fail_count = 0
print("Insert New Blogs to our database")
while True:
ret = send_blogs_to_DB(db_host,db_port,insert_blogs,insert_links)
if ret:
break
fail_count += 1
if fail_count > 10:
print("Failed on Send Blogs, Number of Blogs Visited: " + str(blogs_visited))
sys.exit()
time.sleep(.1)
blogs_visited += 1
if blogs_visited %10 == 0:
print("Visited " + str(blogs_visited) + " blogs successfully")
#insert posts into db
fail_count = 0
print("Insert New Posts to our database")
while True:
ret = send_posts_to_DB(db_host,db_port,insert_posts)
if ret:
break
fail_count += 1
if fail_count > 10:
print("Failed on Send Posts, Number of Blogs Visited: " + str(blogs_visited))
sys.exit()
time.sleep(.1)
blogs_visited += 1
if blogs_visited %10 == 0:
print("Visited " + str(blogs_visited) + " blogs successfully")
#insert notes into DB
fail_count = 0
print("Insert New Notes to our database")
while True:
print (insert_notes)
ret = send_notes_to_DB(db_host,db_port,insert_notes)
if ret:
break
fail_count += 1
if fail_count > 10:
print("Failed on Send Notes, Number of Blogs Visited: " + str(blogs_visited))
sys.exit()
time.sleep(.1)
blogs_visited += 1
if blogs_visited %10 == 0:
print("Visited " + str(blogs_visited) + " blogs successfully")
#insert the blogs into our frontier
fail_count = 0
print("Insert New Blogs to our Frontier")
while True:
ret = send_blogs_to_frontier(host,port,insert_blogs)
if ret:
break
fail_count += 1
if fail_count > 10:
print("Failed on Send Blogs, Number of Blogs Visited: " + str(blogs_visited))
sys.exit()
time.sleep(.1)
blogs_visited += 1
if blogs_visited %10 == 0:
print("Visited " + str(blogs_visited) + " blogs successfully")
finally:
print("Ending: " + str(blogs_visited))
|
|
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
from experiments.ctr_model import CTRModel
from hccf.utils.helpers import Timer
from sklearn.feature_extraction import FeatureHasher
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import log_loss
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
log = logging.getLogger(__name__)
FEATURES_CONFIG = {
'a': {
'count': 64,
'loc': 0.0,
'scale': 0.5,
'type': 'tree',
},
'b': {
'count': 50,
'loc': 0.0,
'scale': 0.5,
'type': 'tree',
},
'axb': {
'loc': 0.0,
'scale': 0.8,
'parts': ['a', 'b'],
}
}
def clean_data(filename):
preprocessor = Pipeline([
('fh', FeatureHasher(n_features=2 ** 13, input_type='string', non_negative=False)),
])
train_data = pd.read_table(filename, sep=',', chunksize=10000)
train_data = train_data.read()
y_train = train_data['click']
train_data.drop(['click'], axis=1, inplace=True) # remove id and click columns
x_train = np.asarray(train_data.astype(str))
y_train = np.asarray(y_train).ravel()
x_train = preprocessor.fit_transform(x_train).toarray()
return x_train, y_train
def clean_data_chunked(filename):
preprocessor = Pipeline([
('fh', FeatureHasher(n_features=2 ** 13, input_type='string', non_negative=False)),
])
train_data = pd.read_table(filename, sep=',', chunksize=1000)
for train_data_chunk in train_data:
print 'process chunk'
y_train = train_data_chunk['click']
train_data_chunk.drop(['click'], axis=1, inplace=True) # remove id and click columns
x_train = np.asarray(train_data_chunk.astype(str))
y_train = np.asarray(y_train).ravel()
x_train = preprocessor.fit_transform(x_train).toarray()
yield x_train, y_train
def create_dataset(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
train_filename = model + '.train.csv'
test_filename = model + '.test.csv'
if from_cache:
real_ctr_model = CTRModel.load(model + '.dat')
else:
with Timer('init real model'):
real_ctr_model = CTRModel(FEATURES_CONFIG, free_coef=-1, lam=100)
real_ctr_model.init()
with Timer('generate clicklog'):
real_ctr_model.generate_log(
filename=model,
format='csv',
train_length=train_dataset_length,
test_length=test_dataset_length,
)
real_ctr_model.save(model + '.dat')
with Timer('calculate likelihood'):
ll = real_ctr_model.loglikelihood()
ll0 = real_ctr_model.loglikelihood0()
likelihood_ratio = real_ctr_model.likelihood_ratio()
log.info('loglikelihood = %s', ll)
log.info('loglikelihood0 = %s', ll0)
log.info('likelihood_ratio = %s', likelihood_ratio)
return train_filename, test_filename
def ctr_gbdt(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
TRAIN_FILE, TEST_FILE = create_dataset(model, from_cache, train_dataset_length, test_dataset_length)
prediction_model = GradientBoostingClassifier(
loss='deviance',
learning_rate=0.1,
n_estimators=30,
subsample=1.0,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_depth=5,
)
x_train, y_train = clean_data(TRAIN_FILE)
x_test, y_test = clean_data(TEST_FILE)
with Timer('fit model'):
prediction_model.fit(x_train, y_train)
with Timer('evaluate model'):
y_prediction_train = prediction_model.predict_proba(x_train)
y_prediction_test = prediction_model.predict_proba(x_test)
loss_train = log_loss(y_train, y_prediction_train)
loss_test = log_loss(y_test, y_prediction_test)
print 'loss_train: %s' % loss_train
print 'loss_test: %s' % loss_test
def ctr_pca_sgd(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
TRAIN_FILE, TEST_FILE = create_dataset(model, from_cache, train_dataset_length, test_dataset_length)
prediction_model = SGDClassifier(
loss='log',
n_iter=200,
alpha=.0000001,
penalty='l2',
learning_rate='invscaling',
power_t=0.5,
eta0=4.0,
shuffle=True,
n_jobs=-1,
)
x_train, y_train = clean_data(TRAIN_FILE)
x_test, y_test = clean_data(TEST_FILE)
pca = PCA(n_components=100)
pca.fit(x_train)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
with Timer('fit model'):
prediction_model.fit(x_train, y_train)
with Timer('evaluate model'):
y_prediction_train = prediction_model.predict_proba(x_train)
y_prediction_test = prediction_model.predict_proba(x_test)
loss_train = log_loss(y_train, y_prediction_train)
loss_test = log_loss(y_test, y_prediction_test)
print 'loss_train: %s' % loss_train
print 'loss_test: %s' % loss_test
def ctr_svm(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
"""
Doesn't work
"""
TRAIN_FILE, TEST_FILE = create_dataset(model, from_cache, train_dataset_length, test_dataset_length)
prediction_model = LinearSVC(
penalty='l1',
loss='squared_hinge',
dual=False,
tol=0.0001,
C=1.0,
multi_class='ovr',
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
verbose=1,
random_state=None,
max_iter=1000,
)
x_train, y_train = clean_data(TRAIN_FILE)
x_test, y_test = clean_data(TEST_FILE)
with Timer('fit model'):
prediction_model.fit(x_train, y_train)
with Timer('evaluate model'):
y_prediction_train = prediction_model.predict_proba(x_train)
y_prediction_test = prediction_model.predict_proba(x_test)
loss_train = log_loss(y_train, y_prediction_train)
loss_test = log_loss(y_test, y_prediction_test)
print 'loss_train: %s' % loss_train
print 'loss_test: %s' % loss_test
if __name__ == '__main__':
# ctr_gbdt(
# from_cache=False,
# train_dataset_length=100000,
# test_dataset_length=100000,
# )
# ctr_pca_sgd(
# from_cache=False,
# train_dataset_length=100000,
# test_dataset_length=100000,
# )
# ctr_svm(
# model='sklearn-clicklog',
# from_cache=False,
# train_dataset_length=100000,
# test_dataset_length=100000,
# )
ctr_ftrl(
model='sklearn-clicklog',
from_cache=False,
train_dataset_length=100000,
test_dataset_length=100000,
)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUVNFDomainMapping(NURESTObject):
""" Represents a VNFDomainMapping in the VSD
Notes:
This represents domain segment identifier which is unique for domain per NSGateway.
"""
__rest_name__ = "vnfdomainmapping"
__resource_name__ = "vnfdomainmappings"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_SEGMENTATION_TYPE_VLAN = "VLAN"
def __init__(self, **kwargs):
""" Initializes a VNFDomainMapping instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> vnfdomainmapping = NUVNFDomainMapping(id=u'xxxx-xxx-xxx-xxx', name=u'VNFDomainMapping')
>>> vnfdomainmapping = NUVNFDomainMapping(data=my_dict)
"""
super(NUVNFDomainMapping, self).__init__()
# Read/Write Attributes
self._last_updated_by = None
self._segmentation_id = None
self._segmentation_type = None
self._entity_scope = None
self._associated_ns_gateway_id = None
self._associated_ns_gateway_name = None
self._external_id = None
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="segmentation_id", remote_name="segmentationID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="segmentation_type", remote_name="segmentationType", attribute_type=str, is_required=False, is_unique=False, choices=[u'VLAN'])
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="associated_ns_gateway_id", remote_name="associatedNSGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_name", remote_name="associatedNSGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def segmentation_id(self):
""" Get segmentation_id value.
Notes:
The segmentation ID (1-4095).
This attribute is named `segmentationID` in VSD API.
"""
return self._segmentation_id
@segmentation_id.setter
def segmentation_id(self, value):
""" Set segmentation_id value.
Notes:
The segmentation ID (1-4095).
This attribute is named `segmentationID` in VSD API.
"""
self._segmentation_id = value
@property
def segmentation_type(self):
""" Get segmentation_type value.
Notes:
The type of segmentation that is used.
This attribute is named `segmentationType` in VSD API.
"""
return self._segmentation_type
@segmentation_type.setter
def segmentation_type(self, value):
""" Set segmentation_type value.
Notes:
The type of segmentation that is used.
This attribute is named `segmentationType` in VSD API.
"""
self._segmentation_type = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def associated_ns_gateway_id(self):
""" Get associated_ns_gateway_id value.
Notes:
Associated NS Gateway
This attribute is named `associatedNSGatewayID` in VSD API.
"""
return self._associated_ns_gateway_id
@associated_ns_gateway_id.setter
def associated_ns_gateway_id(self, value):
""" Set associated_ns_gateway_id value.
Notes:
Associated NS Gateway
This attribute is named `associatedNSGatewayID` in VSD API.
"""
self._associated_ns_gateway_id = value
@property
def associated_ns_gateway_name(self):
""" Get associated_ns_gateway_name value.
Notes:
Name of associated NSGateway
This attribute is named `associatedNSGatewayName` in VSD API.
"""
return self._associated_ns_gateway_name
@associated_ns_gateway_name.setter
def associated_ns_gateway_name(self, value):
""" Set associated_ns_gateway_name value.
Notes:
Name of associated NSGateway
This attribute is named `associatedNSGatewayName` in VSD API.
"""
self._associated_ns_gateway_name = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, SAME, SAME, VALID,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, tf.Tensor):
return tf.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, tf.Tensor):
return tf.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if tf.test.is_gpu_available():
# "NCHW" format is not currently supported on CPU.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(tf.test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
return [tf.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [tf.float32, tf.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, strides,
padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = tf.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d(t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes,
conv_strides, padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu):
t1 = tf.constant(x1, shape=tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d(t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
with self.test_session() as sess:
values = sess.run(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-5, atol=1e-5)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, expected):
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(tensor_in_sizes,
filter_in_sizes,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
tensors.append(result)
with self.test_session() as sess:
values = sess.run(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
print("expected = ", expected)
print("actual = ", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self):
expected_output = [30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
312.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0,
765.0, 840.0, 843.0, 936.0, 1029.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2], padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2], padding="SAME",
expected=expected_output)
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2], padding="VALID",
expected=expected_output)
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3], padding="VALID",
expected=expected_output)
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2], padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2], padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3], padding="SAME",
expected=[44, 28, 41, 16])
# TODO this currently fails.
#self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu, err):
total_output_size = 1
total_filter_size = 1
for s in output_sizes:
total_output_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_filter_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = NHWCToNCHW(input_sizes)
t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
t1 = tf.constant(x1, shape=filter_sizes)
t2 = tf.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d_backprop_input(t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = sess.run(conv)
self.assertShapeEqual(value, conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
new_input_sizes = NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = tf.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = tf.constant(x1, shape=filter_sizes)
t2 = tf.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d_backprop_input(t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [14.0, 32.0, 50.0,
100.0, 163.0, 226.0,
167.0, 212.0, 257.0,
122.0, 140.0, 158.0,
478.0, 541.0, 604.0,
437.0, 482.0, 527.0]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0,
7.0, 12.0, 11.0, 18.0, 15.0, 24.0,
12.0, 16.0, 15.0, 20.0, 18.0, 24.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [1.0, 0.0, 2.0, 0.0,
0.0, 0.0, 0.0, 0.0,
3.0, 0.0, 4.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu):
total_input_size = 1
total_output_size = 1
for s in input_sizes:
total_input_size *= s
for s in output_sizes:
total_output_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x0 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
t0 = tf.constant(x0, shape=input_sizes, dtype=dtype)
t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = tf.constant(x2, shape=output_sizes, dtype=dtype)
explicit_strides = [1] + strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
explicit_strides = NHWCToNCHW(explicit_strides)
conv = tf.nn.conv2d_backprop_filter(t0,
t1,
t2,
strides=explicit_strides,
padding=padding,
data_format=data_format)
value = sess.run(conv)
self.assertShapeEqual(value, conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
t0 = tf.constant(x0, shape=input_sizes)
t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = tf.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d_backprop_filter(t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0,
32.0, 43.0, 54.0, 37.0, 50.0, 63.0, 42.0, 57.0, 72.0,
62.0, 85.0, 108.0, 67.0, 92.0, 117.0, 72.0, 99.0, 126.0,
77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0, 120.0, 153.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Gradient checkers
def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows,
filter_cols, in_depth, out_depth, stride_rows,
stride_cols, padding, test_input, data_format,
use_gpu):
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
else:
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu):
input_tensor = tf.constant(input_data, shape=input_shape,
dtype=dtype, name="input")
filter_tensor = tf.constant(filter_data, shape=filter_shape,
dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
if data_format == "NCHW":
new_input_tensor = NHWCToNCHW(input_tensor)
strides = NHWCToNCHW(strides)
else:
new_input_tensor = input_tensor
conv = tf.nn.conv2d(new_input_tensor,
filter_tensor,
strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = tf.test.compute_gradient(input_tensor, input_shape,
conv, output_shape)
else:
jacob_t, jacob_n = tf.test.compute_gradient(
filter_tensor, filter_shape, conv, output_shape)
if dtype == tf.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
print("conv_2d gradient error = ", err)
self.assertLess(err, 0.002)
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = tf.nn.conv2d(tf.placeholder(tf.float32),
tf.placeholder(tf.float32),
strides=[1, 1, 1, 1], padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
tf.nn.conv2d(tf.placeholder(tf.float32, shape=[1, 3]),
tf.placeholder(tf.float32),
strides=[1, 1, 1, 1], padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
tf.nn.conv2d(tf.placeholder(tf.float32),
tf.placeholder(tf.float32, shape=[1, 3]),
strides=[1, 1, 1, 1], padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
tf.nn.conv2d(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
tf.placeholder(tf.float32,
shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1], padding="SAME")
def testOpEdgeCases(self):
with self.test_session() as sess:
# Illegal strides.
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
tf.nn.conv2d(
tf.placeholder(tf.float32),
tf.placeholder(tf.float32),
strides=[2, 1, 1, 1],
padding="SAME"))
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
tf.nn.conv2d(
tf.placeholder(tf.float32),
tf.placeholder(tf.float32),
strides=[1, 1, 1, 2],
padding="SAME"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
tf.nn.conv2d(
tf.placeholder(
tf.float32, shape=[32, 20, 20, 3]),
tf.placeholder(
tf.float32, shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
tf.nn.conv2d(
tf.placeholder(
tf.float32, shape=[32, 20, 20, 3]),
tf.placeholder(
tf.float32, shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
# This is only a very simple test. More comprehensive tests live in
# //learning/dist_belief/experimental/brain_compatibility/conv_nn_test.py
# where we compare the numeric results of the depthwise conv op with the
# depthwise weighted sum transformer in dist_belief.
class DepthwiseConv2DTest(tf.test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride,
padding, expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session() as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
conv = tf.nn.depthwise_conv2d(t1, t2, strides=[1, stride, stride, 1],
padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1, padding="VALID",
expected=expected_output)
class SeparableConv2DTest(tf.test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return tf.constant(x, shape=sizes)
def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes,
pointwise_filter_in_sizes, stride, padding, expected):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
with self.test_session() as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
conv = tf.nn.separable_conv2d(t1, f1, f2, strides=[1, stride, stride, 1],
padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testSeparableConv2D(self):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75]
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1, padding="SAME",
expected=expected_output)
def testSeparableConv2DEqualInputOutputDepth(self):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0,
7047.0, 7449.0, 7851.0, 8253.0, 8655.0, 9057.0,
8352.0, 8829.0, 9306.0, 9783.0, 10260.0, 10737.0,
3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0,
10962.0, 11589.0, 12216.0, 12843.0, 13470.0, 14097.0,
12267.0, 12969.0, 13671.0, 14373.0, 15075.0, 15777.0,
13572.0, 14349.0, 15126.0, 15903.0, 16680.0, 17457.0,
5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0,
16182.0, 17109.0, 18036.0, 18963.0, 19890.0, 20817.0,
17487.0, 18489.0, 19491.0, 20493.0, 21495.0, 22497.0,
18792.0, 19869.0, 20946.0, 22023.0, 23100.0, 24177.0,
7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0,
4963.5, 5227.5, 5491.5, 5755.5, 6019.5, 6283.5,
5328.0, 5611.5, 5895.0, 6178.5, 6462.0, 6745.5,
5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5,
1757.25, 1840.5, 1923.75, 2007.0, 2090.25, 2173.5]
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1, padding="SAME",
expected=expected_output)
def testSeparableConv2DIllegalCases(self):
# Output depth less then input depth.
with self.assertRaisesRegexp(
ValueError,
"Refusing to perform an overparameterized separable convolution"):
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 5],
stride=1, padding="SAME",
expected=None)
class DeepConv2DTest(tf.test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes,
conv_strides, padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.test_session(use_gpu=False) as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = tf.nn.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = sess.run([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = sess.run([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
def GetInceptionFwdTest(input_size, filter_size, stride, padding):
def Test(self):
tf.logging.info("Testing InceptionFwd %s", (input_size, filter_size,
stride, padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size,
stride, padding):
def Test(self):
tf.logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size,
strides, padding):
def Test(self):
tf.logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size,
strides, padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
GetInceptionBackInputTest(input_size_, filter_size_, output_size_,
stride_, padding_))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
GetInceptionBackFilterTest(input_size_, filter_size_, output_size_,
[stride_, stride_], padding_))
tf.test.main()
|
|
# Import GUI functionality
import Tkinter as tk
from tkFileDialog import askopenfilenames, asksaveasfilename
# Import internals
from .buttons import *
from .popups import *
# Import style
from . import theme
style_layerspane_normal = {"bg": theme.color4,
"width": 200}
style_layersheader = {"bg": theme.color2,
"font": theme.titlefont1["type"],
"fg": theme.titlefont1["color"],
"anchor": "w", "padx": 5}
style_layeritem_normal = {"bg": theme.color4,
"width": 200,
"relief": "ridge"}
style_layercheck = {"bg": theme.color4}
style_layername_normal = {"bg": theme.color4,
"fg": theme.font1["color"],
"font": theme.font1["type"],
"relief": "flat",
"anchor": "w"}
# Import GIS functionality
import pythongis as pg
from . import dispatch
# Panes
class LayerItem(tk.Frame):
def __init__(self, master, renderlayer, name=None, **kwargs):
# get theme style
style = style_layeritem_normal.copy()
style.update(kwargs)
# Make this class a subclass of tk.Frame and add to it
tk.Frame.__init__(self, master, **style)
self.layerspane = self.master.master
self.statusbar = self.layerspane.statusbar
# Create a frame to place main row with name etc
self.firstrow = tk.Frame(self, **style)
self.firstrow.pack(side="top", fill="x", expand=True)
# Create the visibility check box
var = tk.BooleanVar(self)
self.checkbutton = tk.Checkbutton(self.firstrow, variable=var, offvalue=False, onvalue=True, command=self.toggle_visibility, **style_layercheck)
self.checkbutton.var = var
self.checkbutton.pack(side="left")
self.checkbutton.select()
# Create Delete button to the right
self.deletebutton = IconButton(self.firstrow, padx=2, relief="flat", command=self.delete)
self.deletebutton.set_icon("delete_layer.png")
self.deletebutton.pack(side="right")
# Create the layername display
self.renderlayer = renderlayer
if name: layername = name
elif self.renderlayer.data.filepath:
layername = os.path.split(self.renderlayer.data.filepath)[-1]
else: layername = "Unnamed layer"
self.namelabel = tk.Label(self.firstrow, text=layername, **style_layername_normal)
self.namelabel.pack(side="left", fill="x", expand=True)
# Bind drag events
def start_drag(event):
self.dragging = event.widget.master.master
self.config(cursor="exchange")
def stop_drag(event):
# find closest layerindex to release event
def getindex(layeritem):
return self.layerspane.layers.get_position(layeritem.renderlayer)
goingdown = event.y_root - (self.dragging.winfo_rooty() + self.dragging.winfo_height() / 2.0) > 0
if goingdown:
i = len(self.layerspane.layersview.winfo_children())
for layeritem in sorted(self.layerspane.layersview.winfo_children(), key=getindex, reverse=True):
if event.y_root < layeritem.winfo_rooty() + layeritem.winfo_height() / 2.0:
break
i -= 1
else:
i = 0
for layeritem in sorted(self.layerspane.layersview.winfo_children(), key=getindex):
if event.y_root > layeritem.winfo_rooty() - layeritem.winfo_height() / 2.0:
break
i += 1
# move layer
frompos = self.layerspane.layers.get_position(self.dragging.renderlayer)
if i != frompos:
self.layerspane.move_layer(frompos, i)
# clean up
self.dragging = None
self.config(cursor="arrow")
self.dragging = None
self.namelabel.bind("<Button-1>", start_drag)
self.namelabel.bind("<ButtonRelease-1>", stop_drag)
def toggle_visibility(self):
self.layerspane.toggle_layer(self)
def delete(self):
self.layerspane.remove_layer(self)
def ask_rename(self):
# place entry widget on top of namelabel
nameentry = tk.Entry(self)
nameentry.place(x=self.namelabel.winfo_x(), y=self.namelabel.winfo_y(), width=self.namelabel.winfo_width(), height=self.namelabel.winfo_height())
# set its text to layername and select all text
nameentry.insert(0, self.namelabel["text"])
nameentry.focus()
nameentry.selection_range(0, tk.END)
# accept or cancel change via keypress events
def finish(event):
newname = nameentry.get()
nameentry.destroy()
self.namelabel["text"] = newname
def cancel(event):
nameentry.destroy()
nameentry.bind("<Return>", finish)
nameentry.bind("<Escape>", cancel)
class LayersPane(tk.Frame):
def __init__(self, master, layer_rightclick=None, **kwargs):
# get theme style
style = style_layerspane_normal.copy()
style.update(kwargs)
# Make this class a subclass of tk.Frame and add to it
tk.Frame.__init__(self, master, **style)
# Make the top header
self.header = tk.Label(self, text="Layers:", **style_layersheader)
self.header.pack(side="top", fill="x")
# Then, the layer list view
self.layersview = tk.Frame(self, **style)
self.layersview.pack(side="top", fill="x")
self.pack_propagate(False) # important, this prevents layeritem names from deciding the size of layerspane
def __iter__(self):
for layeritem in self.layersview.winfo_children():
yield layeritem
def assign_layergroup(self, layergroup):
layergroup.layerspane = self
self.layers = layergroup
def add_layer(self, filepath_or_loaded, name=None, **kwargs):
def from_filepath(filepath):
if filepath.lower().endswith((".shp",".geojson",".json")):
func = pg.vector.data.VectorData
args = (filepath,)
elif filepath.lower().endswith((".asc",".ascii",
".tif",".tiff",".geotiff",
".jpg",".jpeg",
".png",".bmp",".gif")):
func = pg.raster.data.RasterData
args = (filepath,)
else:
popup_message(self, "Fileformat not supported\n\n" + filepath )
return
self.statusbar.task.start("Loading layer from file...")
pending = dispatch.request_results(func, args, kwargs)
def finish(loaded):
if isinstance(loaded, Exception):
popup_message(self, str(loaded) + "\n\n" + filepath )
else:
from_loaded(loaded)
self.statusbar.task.stop()
dispatch.after_completion(self, pending, finish)
def from_loaded(loaded):
# add the data as a rendering layer
if isinstance(loaded, pg.vector.data.VectorData):
renderlayer = pg.renderer.VectorLayer(loaded)
elif isinstance(loaded, pg.raster.data.RasterData):
renderlayer = pg.renderer.RasterLayer(loaded)
self.layers.add_layer(renderlayer)
# list a visual representation in the layerspane list
listlayer = LayerItem(self.layersview, renderlayer=renderlayer, name=name)
listlayer.namelabel.bind("<Button-3>", self.layer_rightclick)
listlayer.pack(fill="x", side="bottom")
# render to and update all mapcanvases connected to the layergroup
for mapcanvas in self.layers.connected_maps:
if len(mapcanvas.layers.layers) == 1:
# auto zoom to layer if it is the only layer
mapcanvas.zoom_bbox(*loaded.bbox)
func = mapcanvas.render_one
args = [renderlayer]
self.statusbar.task.start("Rendering layer...")
pending = dispatch.request_results(func, args)
def finish(loaded):
if isinstance(loaded, Exception):
popup_message(self, "Rendering error: " + str(loaded) )
else:
mapcanvas.mapview.update_image()
self.statusbar.task.stop()
dispatch.after_completion(self, pending, finish)
# load from file or go straight to listing/rendering
if isinstance(filepath_or_loaded, (str,unicode)):
from_filepath(filepath_or_loaded)
else:
from_loaded(filepath_or_loaded)
def toggle_layer(self, layeritem):
# toggle visibility
if layeritem.renderlayer.visible == True:
layeritem.renderlayer.visible = False
elif layeritem.renderlayer.visible == False:
layeritem.renderlayer.visible = True
# update all mapcanvas
for mapcanvas in self.layers.connected_maps:
mapcanvas.update_draworder()
mapcanvas.mapview.update_image()
def remove_layer(self, layeritem):
# remove from rendering
layerpos = self.layers.get_position(layeritem.renderlayer)
self.layers.remove_layer(layerpos)
for mapcanvas in self.layers.connected_maps:
mapcanvas.update_draworder()
mapcanvas.mapview.update_image()
# remove from layers list
layeritem.destroy()
def move_layer(self, fromindex, toindex):
self.layers.move_layer(fromindex, toindex)
for mapcanvas in self.layers.connected_maps:
mapcanvas.update_draworder()
mapcanvas.mapview.update_image()
self.update_layerlist()
def update_layerlist(self):
def getindex(layeritem):
return self.layers.get_position(layeritem.renderlayer)
for layeritem in sorted(self.layersview.winfo_children(), key=getindex, reverse=True):
layeritem.pack_forget()
layeritem.pack(fill="x")
def bind_layer_rightclick(self, func):
self.layer_rightclick = func
def assign_statusbar(self, statusbar):
self.statusbar = statusbar
|
|
"""A simple declarative layer for SQLAlchemy ORM.
SQLAlchemy object-relational configuration involves the usage of Table,
mapper(), and class objects to define the three areas of configuration.
declarative moves these three types of configuration underneath the
individual mapped class. Regular SQLAlchemy schema and ORM constructs are
used in most cases::
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite://')
Base = declarative_base(engine)
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
Above, the ``declarative_base`` callable produces a new base class from
which all mapped classes inherit from. When the class definition is
completed, a new ``Table`` and ``mapper()`` have been generated, accessible
via the ``__table__`` and ``__mapper__`` attributes on the ``SomeClass``
class.
You may omit the names from the Column definitions. Declarative will fill
them in for you.
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column(Integer, primary_key=True)
name = Column(String(50))
Attributes may be added to the class after its construction, and they will
be added to the underlying ``Table`` and ``mapper()`` definitions as
appropriate::
SomeClass.data = Column('data', Unicode)
SomeClass.related = relation(RelatedInfo)
Classes which are mapped explicitly using ``mapper()`` can interact freely
with declarative classes. The ``declarative_base`` base class contains a
``MetaData`` object as well as a dictionary of all classes created against
the base. So to access the above metadata and create tables we can say::
Base.metadata.create_all()
The ``declarative_base`` can also receive a pre-created ``MetaData``
object::
mymetadata = MetaData()
Base = declarative_base(metadata=mymetadata)
Relations to other classes are done in the usual way, with the added feature
that the class specified to ``relation()`` may be a string name. The "class
registry" associated with ``Base`` is used at mapper compilation time to
resolve the name into the actual class object, which is expected to have
been defined once the mapper configuration is used::
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50))
addresses = relation("Address", backref="user")
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
Column constructs, since they are just that, are immediately usable, as
below where we define a primary join condition on the ``Address`` class
using them::
class Address(Base)
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
user = relation(User, primaryjoin=user_id==User.id)
Synonyms are one area where ``declarative`` needs to slightly change the
usual SQLAlchemy configurational syntax. To define a getter/setter which
proxies to an underlying attribute, use ``synonym`` with the ``descriptor``
argument::
class MyClass(Base):
__tablename__ = 'sometable'
_attr = Column('attr', String)
def _get_attr(self):
return self._some_attr
def _set_attr(self, attr)
self._some_attr = attr
attr = synonym('_attr', descriptor=property(_get_attr, _set_attr))
The above synonym is then usable as an instance attribute as well as a
class-level expression construct::
x = MyClass()
x.attr = "some value"
session.query(MyClass).filter(MyClass.attr == 'some other value').all()
As an alternative to ``__tablename__``, a direct ``Table`` construct may be
used::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column(Integer, primary_key=True),
Column(String(50))
)
This is the preferred approach when using reflected tables, as below::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata, autoload=True)
Mapper arguments are specified using the ``__mapper_args__`` class variable.
Note that the column objects declared on the class are immediately usable,
as in this joined-table inheritance example::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {'polymorphic_on':discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity':'engineer'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
primary_language = Column(String(50))
For single-table inheritance, the ``__tablename__`` and ``__table__`` class
variables are optional on a class when the class inherits from another
mapped class.
As a convenience feature, the ``declarative_base()`` sets a default
constructor on classes which takes keyword arguments, and assigns them to
the named attributes::
e = Engineer(primary_language='python')
Note that ``declarative`` has no integration built in with sessions, and is
only intended as an optional syntax for the regular usage of mappers and
Table objects. A typical application setup using ``scoped_session`` might
look like::
engine = create_engine('postgres://scott:tiger@localhost/test')
Session = scoped_session(sessionmaker(transactional=True, autoflush=False, bind=engine))
Base = declarative_base()
Mapped instances then make usage of ``Session`` in the usual way.
"""
from sqlalchemy.schema import Table, Column, MetaData
from sqlalchemy.orm import synonym as _orm_synonym, mapper, comparable_property
from sqlalchemy.orm.interfaces import MapperProperty
from sqlalchemy.orm.properties import PropertyLoader, ColumnProperty
from sqlalchemy import util, exceptions
__all__ = ['declarative_base', 'synonym_for', 'comparable_using',
'declared_synonym']
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' in cls.__dict__:
return type.__init__(cls, classname, bases, dict_)
cls._decl_class_registry[classname] = cls
our_stuff = util.OrderedDict()
for k in dict_:
value = dict_[k]
if (isinstance(value, tuple) and len(value) == 1 and
isinstance(value[0], (Column, MapperProperty))):
util.warn("Ignoring declarative-like tuple value of attribute "
"%s: possibly a copy-and-paste error with a comma "
"left at the end of the line?" % k)
continue
if not isinstance(value, (Column, MapperProperty)):
continue
prop = _deferred_relation(cls, value)
our_stuff[k] = prop
table = None
if '__table__' not in cls.__dict__:
if '__tablename__' in cls.__dict__:
tablename = cls.__tablename__
autoload = cls.__dict__.get('__autoload__')
if autoload:
table_kw = {'autoload': True}
else:
table_kw = {}
cols = []
for key, c in our_stuff.iteritems():
if isinstance(c, ColumnProperty):
for col in c.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
cols.append(col)
elif isinstance(c, Column):
_undefer_column_name(key, c)
cols.append(c)
cls.__table__ = table = Table(tablename, cls.metadata,
*cols, **table_kw)
else:
table = cls.__table__
mapper_args = getattr(cls, '__mapper_args__', {})
if 'inherits' not in mapper_args:
inherits = cls.__mro__[1]
inherits = cls._decl_class_registry.get(inherits.__name__, None)
mapper_args['inherits'] = inherits
if hasattr(cls, '__mapper_cls__'):
mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__)
else:
mapper_cls = mapper
cls.__mapper__ = mapper_cls(cls, table, properties=our_stuff, **mapper_args)
return type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
if '__mapper__' in cls.__dict__:
if isinstance(value, Column):
_undefer_column_name(key, value)
cls.__table__.append_column(value)
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(key, _deferred_relation(cls, value))
else:
type.__setattr__(cls, key, value)
else:
type.__setattr__(cls, key, value)
def _deferred_relation(cls, prop):
if isinstance(prop, PropertyLoader) and isinstance(prop.argument, basestring):
arg = prop.argument
def return_cls():
try:
return cls._decl_class_registry[arg]
except KeyError:
raise exceptions.InvalidRequestError("When compiling mapper %s, could not locate a declarative class named %r. Consider adding this property to the %r class after both dependent classes have been defined." % (prop.parent, arg, prop.parent.class_))
prop.argument = return_cls
return prop
def declared_synonym(prop, name):
"""Deprecated. Use synonym(name, descriptor=prop)."""
return _orm_synonym(name, descriptor=prop)
declared_synonym = util.deprecated(None, False)(declared_synonym)
def synonym_for(name, map_column=False):
"""Decorator, make a Python @property a query synonym for a column.
A decorator version of [sqlalchemy.orm#synonym()]. The function being
decorated is the 'descriptor', otherwise passes its arguments through
to synonym()::
@synonym_for('col')
@property
def prop(self):
return 'special sauce'
The regular ``synonym()`` is also usable directly in a declarative
setting and may be convenient for read/write properties::
prop = synonym('col', descriptor=property(_read_prop, _write_prop))
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
A decorator front end to [sqlalchemy.orm#comparable_property()], passes
throgh the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
def declarative_base(engine=None, metadata=None, mapper=None):
lcl_metadata = metadata or MetaData()
if engine:
lcl_metadata.bind = engine
class Base(object):
__metaclass__ = DeclarativeMeta
metadata = lcl_metadata
if mapper:
__mapper_cls__ = mapper
_decl_class_registry = {}
def __init__(self, **kwargs):
for k in kwargs:
if not hasattr(type(self), k):
raise TypeError('%r is an invalid keyword argument for %s' %
(k, type(self).__name__))
setattr(self, k, kwargs[k])
return Base
def _undefer_column_name(key, column):
if column.key is None:
column.key = key
if column.name is None:
column.name = key
|
|
import os
import sys
import shutil
import tempfile
from cStringIO import StringIO
from nose.tools import ok_, eq_
import mock
from django.conf import settings
from django.core.cache import cache
from airmozilla.main.models import Event, Template, VidlySubmission, Picture
from airmozilla.manage import videoinfo
from airmozilla.base.tests.testbase import DjangoTestCase
class _Response(object):
def __init__(self, content, status_code=200, headers=None):
self.content = self.text = content
self.status_code = status_code
self.headers = headers or {}
def iter_content(self, chunk_size=1024):
increment = 0
while True:
chunk = self.content[increment: increment + chunk_size]
increment += chunk_size
if not chunk:
break
yield chunk
class TestVideoinfo(DjangoTestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
sample_jpg = 'airmozilla/manage/tests/presenting.jpg'
sample_jpg2 = 'airmozilla/manage/tests/tucker.jpg'
_original_temp_directory_name = settings.SCREENCAPTURES_TEMP_DIRECTORY_NAME
def setUp(self):
super(TestVideoinfo, self).setUp()
settings.SCREENCAPTURES_TEMP_DIRECTORY_NAME = (
'test_' + self._original_temp_directory_name
)
def tearDown(self):
cache.clear()
assert settings.SCREENCAPTURES_TEMP_DIRECTORY_NAME.startswith('test_')
temp_dir = os.path.join(
tempfile.gettempdir(),
settings.SCREENCAPTURES_TEMP_DIRECTORY_NAME
)
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
super(TestVideoinfo, self).tearDown()
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('subprocess.Popen')
def test_fetch_duration(self, mock_popen, rhead, p_urllib2, p_logging):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>xxx999</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
def mocked_head(url, **options):
return _Response(
'',
200
)
rhead.side_effect = mocked_head
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
# print (args, kwargs)
url = command[2]
ffmpeged_urls.append(url)
class Inner:
def communicate(self):
out = ''
if 'abc123' in url:
err = "bla bla"
elif 'xyz123' in url:
err = """
Duration: 00:19:17.47, start: 0.000000, bitrate: 1076 kb/s
"""
else:
raise NotImplementedError(url)
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
event.save()
assert event.duration is None
videoinfo.fetch_durations()
event = Event.objects.get(id=event.id)
assert event.duration is None
# need to change to a different tag
# and make sure it has a VidlySubmission
VidlySubmission.objects.create(
event=event,
url='https://s3.com/asomething.mov',
tag='xyz123',
hd=True,
)
event.template_environment = {'tag': 'xyz123'}
event.save()
videoinfo.fetch_durations()
event = Event.objects.get(id=event.id)
eq_(event.duration, 1157)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('subprocess.Popen')
def test_fetch_duration_token_protected_public_event(
self, mock_popen, rhead, p_urllib2, p_logging
):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>xxx999</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
def mocked_head(url, **options):
return _Response(
'',
200
)
rhead.side_effect = mocked_head
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
# print (args, kwargs)
url = command[2]
ffmpeged_urls.append(url)
class Inner:
def communicate(self):
assert 'xyz123' in url
out = ''
err = """
Duration: 00:19:17.47, start: 0.000000, bitrate: 1076 kb/s
"""
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
event.save()
assert event.privacy == Event.PRIVACY_PUBLIC
assert event.duration is None
# need to change to a different tag
# and make sure it has a VidlySubmission
VidlySubmission.objects.create(
event=event,
url='https://s3.com/asomething.mov',
tag='xyz123',
token_protection=True, # Note!~
hd=True,
)
event.template_environment = {'tag': 'xyz123'}
event.save()
videoinfo.fetch_durations()
event = Event.objects.get(id=event.id)
eq_(event.duration, 1157)
url, = ffmpeged_urls
ok_('&token=' in url)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
def test_fetch_duration_fail_to_fetch(
self, rhead, p_urllib2, p_logging
):
def mocked_head(url, **options):
return _Response(
'Not Found',
404
)
rhead.side_effect = mocked_head
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
event.save()
assert event.duration is None
buffer = StringIO()
sys.stdout = buffer
try:
videoinfo.fetch_durations()
finally:
sys.stdout = sys.__stdout__
event = Event.objects.get(id=event.id)
eq_(event.duration, None) # because it failed
output = buffer.getvalue()
ok_('404' in output)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
def test_fetch_duration_fail_to_fetch_not_video(
self, rhead, p_urllib2, p_logging
):
def mocked_head(url, **options):
return _Response(
'<html>',
200,
headers={
'Content-Type': 'text/html; charset=utf-8'
}
)
rhead.side_effect = mocked_head
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
event.save()
assert event.duration is None
buffer = StringIO()
sys.stdout = buffer
try:
videoinfo.fetch_durations()
finally:
sys.stdout = sys.__stdout__
event = Event.objects.get(id=event.id)
eq_(event.duration, None) # because it failed
output = buffer.getvalue()
ok_(
'https://vid.ly/abc123?content=video&format=mp4 is a '
'text/html document' in output
)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
def test_fetch_duration_fail_to_fetch_0_content_length(
self, rhead, p_urllib2, p_logging
):
def mocked_head(url, **options):
return _Response(
'<html>',
200,
headers={
'Content-Length': '0'
}
)
rhead.side_effect = mocked_head
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
event.save()
assert event.duration is None
buffer = StringIO()
sys.stdout = buffer
try:
videoinfo.fetch_durations()
finally:
sys.stdout = sys.__stdout__
event = Event.objects.get(id=event.id)
eq_(event.duration, None) # because it failed
output = buffer.getvalue()
ok_(
'https://vid.ly/abc123?content=video&format=mp4 has a 0 byte '
'Content-Length' in output
)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('requests.get')
@mock.patch('subprocess.Popen')
def test_fetch_duration_save_locally(
self, mock_popen, rget, rhead, p_urllib2, p_logging
):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>xxx999</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
def mocked_head(url, **options):
if 'file.mpg' in url:
return _Response(
'',
200
)
return _Response(
'',
302,
headers={
'Location': 'https://otherplace.com/file.mpg'
}
)
rhead.side_effect = mocked_head
def mocked_get(url, **options):
return _Response(
'0' * 100000,
200,
headers={
'Content-Length': 100000
}
)
rget.side_effect = mocked_get
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
url = command[2]
ffmpeged_urls.append(url)
class Inner:
def communicate(self):
out = ''
if 'abc123' in url:
err = "bla bla"
elif 'xyz123' in url:
err = """
Duration: 00:19:17.47, start: 0.000000, bitrate: 1076 kb/s
"""
else:
raise NotImplementedError(url)
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
event.save()
assert event.duration is None
videoinfo.fetch_durations(save_locally=True)
event = Event.objects.get(id=event.id)
assert event.duration is None
ffmpeged_url, = ffmpeged_urls
ok_(ffmpeged_url.endswith('abc123.mp4'))
# need to change to a different tag
# and make sure it has a VidlySubmission
VidlySubmission.objects.create(
event=event,
url='https://s3.com/asomething.mov',
tag='xyz123',
hd=True,
)
event.template_environment = {'tag': 'xyz123'}
event.save()
videoinfo.fetch_durations(save_locally=True)
event = Event.objects.get(id=event.id)
eq_(event.duration, 1157)
ffmpeged_url, ffmpeged_url2 = ffmpeged_urls
ok_(ffmpeged_url.endswith('abc123.mp4'))
ok_(ffmpeged_url2.endswith('xyz123.mp4'))
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('requests.get')
@mock.patch('subprocess.Popen')
def test_fetch_duration_save_locally_some(
self, mock_popen, rget, rhead, p_urllib2, p_logging
):
"""This time we're going to have two events to ponder.
One is public and one is staff only.
With passing `save_locally_some` it should do
`ffmpeg -i http://url...` on the public one and
`wget https://...; ffmpeg -i /local/file.mpg` on the private one.
"""
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>xxx999</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
def mocked_head(url, **options):
# print "HEAD URL", url
if 'file.mp4' in url:
return _Response(
'',
200
)
return _Response(
'',
302,
headers={
'Location': 'https://otherplace.com/file.mp4'
}
)
rhead.side_effect = mocked_head
def mocked_get(url, **options):
# print "GET URL", url
return _Response(
'0' * 100000,
200,
headers={
'Content-Length': 100000
}
)
rget.side_effect = mocked_get
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
url = command[2]
ffmpeged_urls.append(url)
class Inner:
def communicate(self):
out = ''
if 'otherplace.com/file.mp4' in url:
err = """
Duration: 01:05:00.47, start: 0.000000, bitrate: 1076 kb/s
"""
elif 'xyz123' in url:
err = """
Duration: 00:19:17.47, start: 0.000000, bitrate: 1076 kb/s
"""
else:
raise NotImplementedError(url)
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
assert event.privacy == Event.PRIVACY_PUBLIC
event.save()
event2 = Event.objects.create(
slug='slug2',
title=event.title,
start_time=event.start_time,
placeholder_img=event.placeholder_img,
privacy=Event.PRIVACY_COMPANY,
template=template,
template_environment={'tag': 'xyz123'},
)
videoinfo.fetch_durations(save_locally_some=True)
event = Event.objects.get(id=event.id)
eq_(event.duration, 3900)
event2 = Event.objects.get(id=event2.id)
eq_(event2.duration, 1157)
ffmpeged_urls.sort()
ffmpeged_url1, ffmpeged_url2 = ffmpeged_urls
ok_(ffmpeged_url1.endswith('xyz123.mp4'))
ok_(ffmpeged_url1.startswith('/'))
ok_(ffmpeged_url2.endswith('file.mp4'))
ok_(ffmpeged_url2.startswith('http://'))
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('requests.get')
@mock.patch('subprocess.Popen')
def test_fetch_duration_save_locally_some_by_vidly_submission(
self, mock_popen, rget, rhead, p_urllib2, p_logging
):
"""This time we're going to have two events to ponder.
One is public and one is staff only.
With passing `save_locally_some` it should do
`ffmpeg -i http://url...` on the public one and
`wget https://...; ffmpeg -i /local/file.mpg` on the private one.
"""
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>xxx999</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
def mocked_head(url, **options):
# print "HEAD URL", url
if 'file.mp4' in url:
return _Response(
'',
200
)
return _Response(
'',
302,
headers={
'Location': 'https://otherplace.com/file.mp4'
}
)
rhead.side_effect = mocked_head
def mocked_get(url, **options):
# print "GET URL", url
return _Response(
'0' * 100000,
200,
headers={
'Content-Length': 100000
}
)
rget.side_effect = mocked_get
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
url = command[2]
ffmpeged_urls.append(url)
class Inner:
def communicate(self):
out = ''
if 'abc123.mp4' in url and url.startswith('/'):
err = """
Duration: 01:05:00.47, start: 0.000000, bitrate: 1076 kb/s
"""
else:
raise NotImplementedError(url)
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.template_environment = {'tag': 'abc123'}
assert event.privacy == Event.PRIVACY_PUBLIC
event.save()
# The event is public but the relevant vidly submission
# for it says it requires a token.
VidlySubmission.objects.create(
event=event,
tag='somethingelse',
)
VidlySubmission.objects.create(
event=event,
tag='abc123',
token_protection=True,
)
videoinfo.fetch_durations(save_locally_some=True)
event = Event.objects.get(id=event.id)
eq_(event.duration, 3900)
ok_('http://otherplace.com/file.mp4' not in ffmpeged_urls)
filename, = ffmpeged_urls
ok_(filename.endswith('abc123.mp4'))
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('subprocess.Popen')
def test_fetch_duration_ogg_videos(
self, mock_popen, rhead, p_urllib2, p_logging
):
def mocked_head(url, **options):
return _Response(
'',
200
)
rhead.side_effect = mocked_head
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
url = command[2]
assert url.endswith('foo.ogg')
ffmpeged_urls.append(url)
class Inner:
def communicate(self):
err = """
Duration: 00:10:31.52, start: 0.000000, bitrate: 77 kb/s
"""
out = ''
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Ogg Video',
content='<source src="{{ url }}" type="video/ogg" />'
)
event.template = template
event.template_environment = {'url': 'http://videos.m.org/foo.ogg'}
event.save()
assert event.duration is None
videoinfo.fetch_durations()
event = Event.objects.get(id=event.id)
eq_(event.duration, 631)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('subprocess.Popen')
def test_fetch_screencapture(self, mock_popen, rhead, p_urllib2, p_log):
assert Picture.objects.all().count() == 0, Picture.objects.all()
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>xxx999</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
def mocked_head(url, **options):
return _Response(
'',
200
)
rhead.side_effect = mocked_head
ffmpeged_urls = []
sample_jpg = self.sample_jpg
sample_jpg2 = self.sample_jpg2
def mocked_popen(command, **kwargs):
url = command[4]
ffmpeged_urls.append(url)
destination = command[-1]
assert os.path.isdir(os.path.dirname(destination))
class Inner:
def communicate(self):
out = err = ''
if 'xyz123' in url:
if '01.jpg' in destination:
shutil.copyfile(sample_jpg, destination)
else:
shutil.copyfile(sample_jpg2, destination)
else:
raise NotImplementedError(url)
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.template = template
event.save()
assert event.duration is None
videoinfo.fetch_screencaptures()
assert not ffmpeged_urls # because no event has a duration yet
event.duration = 1157
event.save()
# Make sure it has a HD VidlySubmission
VidlySubmission.objects.create(
event=event,
url='https://s3.com/asomething.mov',
tag='xyz123',
hd=True,
)
event.template_environment = {'tag': 'xyz123'}
event.save()
videoinfo.fetch_screencaptures()
assert ffmpeged_urls
eq_(Picture.objects.filter(event=event).count(), 15)
# When viewed, like it's viewed in the picture gallery and gallery
# select widget, we want the one called "Screencap 1" to appear
# before the one called "Screencap 2"
pictures = Picture.objects.all().order_by('event', '-created')
notes = [x.notes for x in pictures]
eq_(
notes,
["Screencap %d" % x for x in range(1, 16)]
)
# Try to do it again and it shouldn't run it again
# because there are pictures in the gallery already.
assert len(ffmpeged_urls) == 15, len(ffmpeged_urls)
videoinfo.fetch_screencaptures()
eq_(len(ffmpeged_urls), 15)
# and still
eq_(Picture.objects.filter(event=event).count(), 15)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
@mock.patch('requests.head')
@mock.patch('subprocess.Popen')
def test_fetch_screencapture_without_import(
self, mock_popen, rhead, p_urllib2, p_log
):
"""This test is effectively the same as test_fetch_screencapture()
but with `import_=False` set.
"""
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>xxx999</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
def mocked_head(url, **options):
return _Response(
'',
200
)
rhead.side_effect = mocked_head
ffmpeged_urls = []
sample_jpg = self.sample_jpg
sample_jpg2 = self.sample_jpg2
def mocked_popen(command, **kwargs):
url = command[4]
ffmpeged_urls.append(url)
destination = command[-1]
assert os.path.isdir(os.path.dirname(destination))
class Inner:
def communicate(self):
out = err = ''
if 'xyz123' in url:
# Let's create two jpeg's in that directory
if '01.jpg' in destination:
shutil.copyfile(sample_jpg, destination)
else:
shutil.copyfile(sample_jpg2, destination)
else:
raise NotImplementedError(url)
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
event = Event.objects.get(title='Test event')
template = Template.objects.create(
name='Vid.ly Something',
content="{{ tag }}"
)
event.duration = 1157
event.template = template
event.save()
# Make sure it has a HD VidlySubmission
VidlySubmission.objects.create(
event=event,
url='https://s3.com/asomething.mov',
tag='xyz123',
hd=True,
)
event.template_environment = {'tag': 'xyz123'}
event.save()
videoinfo.fetch_screencaptures(import_=False)
assert ffmpeged_urls
eq_(Picture.objects.filter(event=event).count(), 0)
# there should now be some JPEGs in the dedicated temp directory
temp_dir = os.path.join(
tempfile.gettempdir(),
settings.SCREENCAPTURES_TEMP_DIRECTORY_NAME
)
# expect there to be a directory with the event's ID
directory_name = '%s_%s' % (event.id, event.slug)
event_temp_dir = os.path.join(temp_dir, directory_name)
ok_(os.path.isdir(event_temp_dir))
# there should be 2 JPEGs in there
eq_(
sorted(os.listdir(event_temp_dir)),
["screencap-%02d.jpg" % x for x in range(1, 16)]
)
def test_import_screencaptures_empty(self):
"""it should be possible to run this at any time, even if
the dedicated temp directory does not exist yet. """
assert not Picture.objects.all().count()
videoinfo.import_screencaptures()
ok_(not Picture.objects.all().count())
def test_import_screencaptures(self):
"""it should be possible to run this at any time, even if
the dedicated temp directory does not exist yet. """
event = Event.objects.get(title='Test event')
# First, put some pictures in the temp directory for this event.
temp_dir = os.path.join(
tempfile.gettempdir(),
settings.SCREENCAPTURES_TEMP_DIRECTORY_NAME
)
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
# expect there to be a directory with the event's ID
directory_name = '%s_%s' % (event.id, event.slug)
event_temp_dir = os.path.join(temp_dir, directory_name)
if not os.path.isdir(event_temp_dir):
os.mkdir(event_temp_dir)
# sample_jpg = self.sample_jpg
# sample_jpg2 = self.sample_jpg2
shutil.copyfile(
self.sample_jpg,
os.path.join(event_temp_dir, 'screencap-01.jpg')
)
shutil.copyfile(
self.sample_jpg2,
os.path.join(event_temp_dir, 'screencap-02.jpg')
)
# Also create an empty broken file
dest = os.path.join(event_temp_dir, 'screencap-03.jpg')
with open(dest, 'wb') as f:
f.write('')
# An extra one that won't get imported because the name isn't
# matching.
shutil.copyfile(
self.sample_jpg2,
os.path.join(event_temp_dir, 'otherfile.jpg')
)
videoinfo.import_screencaptures()
ok_(not os.path.isdir(event_temp_dir))
ok_(os.path.isdir(temp_dir))
eq_(Picture.objects.filter(event=event).count(), 2)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import with_statement
from twisted.python import log
from twisted.internet import defer
from twisted.internet.interfaces import IProtocolFactory
from twisted.internet.error import ConnectionDone
from twisted.protocols.basic import LineOnlyReceiver
from zope.interface import implementer
from txtorcon.util import hmac_sha256, compare_via_hash
from txtorcon.log import txtorlog
from txtorcon.interface import ITorControlProtocol
from .spaghetti import FSM, State, Transition
import os
import re
import base64
DEFAULT_VALUE = 'DEFAULT'
class TorProtocolError(RuntimeError):
"""
Happens on 500-level responses in the protocol, almost certainly
in an errback chain.
:ivar code: the actual error code
:ivar text: other text from the protocol
"""
def __init__(self, code, text):
self.code = code
self.text = text
super(TorProtocolError, self).__init__(text)
def __str__(self):
return str(self.code) + ' ' + self.text
@implementer(IProtocolFactory)
class TorProtocolFactory(object):
"""
Builds TorControlProtocol objects. Implements IProtocolFactory for
Twisted interaction.
If your running Tor doesn't support COOKIE authentication, then
you should supply a password callback.
"""
def __init__(self, password_function=lambda: None):
"""
Builds protocols to talk to a Tor client on the specified
address. For example::
TCP4ClientEndpoint(reactor, "localhost", 9051).connect(TorProtocolFactory())
reactor.run()
By default, COOKIE authentication is used if
available.
:param password_function:
If supplied, this is a zero-argument method that returns a
password (or a Deferred). By default, it returns None. This
is only queried if the Tor we connect to doesn't support
(or hasn't enabled) COOKIE authentication.
"""
self.password_function = password_function
def doStart(self):
":api:`twisted.internet.interfaces.IProtocolFactory` API"
def doStop(self):
":api:`twisted.internet.interfaces.IProtocolFactory` API"
def buildProtocol(self, addr):
":api:`twisted.internet.interfaces.IProtocolFactory` API"
proto = TorControlProtocol(self.password_function)
proto.factory = self
return proto
class Event(object):
"""
A class representing one of the valid EVENTs that Tor
supports.
This allows you to listen for such an event; see
TorController.add_event The callbacks will be called every time
the event in question is received.
"""
def __init__(self, name):
self.name = name
self.callbacks = []
def listen(self, cb):
self.callbacks.append(cb)
def unlisten(self, cb):
self.callbacks.remove(cb)
def got_update(self, data):
# print self.name,"got_update:",data
for cb in self.callbacks:
cb(data)
def unquote(word):
if len(word) == 0:
return word
if word[0] == '"' and word[-1] == '"':
return word[1:-1]
elif word[0] == "'" and word[-1] == "'":
return word[1:-1]
return word
def parse_keywords(lines, multiline_values=True):
"""
Utility method to parse name=value pairs (GETINFO etc). Takes a
string with newline-separated lines and expects at most one = sign
per line. Accumulates multi-line values.
:param multiline_values:
The default is True which allows for multi-line values until a
line with the next = sign on it. So: '''Foo=bar\nBar'''
produces one key, 'Foo', with value 'bar\nBar' -- set to
False, there would be two keys: 'Foo' with value 'bar' and
'Bar' with value DEFAULT_VALUE.
"""
rtn = {}
key = None
value = ''
# FIXME could use some refactoring to reduce code duplication!
for line in lines.split('\n'):
if line.strip() == 'OK':
continue
if '=' in line and ' ' not in line.split('=', 1)[0]:
if key:
if key in rtn:
if isinstance(rtn[key], list):
rtn[key].append(unquote(value))
else:
rtn[key] = [rtn[key], unquote(value)]
else:
rtn[key] = unquote(value)
(key, value) = line.split('=', 1)
else:
if key is None:
rtn[line.strip()] = DEFAULT_VALUE
elif multiline_values is False:
rtn[key] = value
rtn[line.strip()] = DEFAULT_VALUE
key = None
value = ''
else:
value = value + '\n' + line
if key:
if key in rtn:
if isinstance(rtn[key], list):
rtn[key].append(unquote(value))
else:
rtn[key] = [rtn[key], unquote(value)]
else:
rtn[key] = unquote(value)
return rtn
@implementer(ITorControlProtocol)
class TorControlProtocol(LineOnlyReceiver):
"""
This is the main class that talks to a Tor and implements the "raw"
procotol.
This instance does not track state; see :class:`txtorcon.TorState`
for the current state of all Circuits, Streams and Routers.
:meth:`txtorcon.TorState.build_circuit` allows you to build custom
circuits.
:meth:`txtorcon.TorControlProtocol.add_event_listener` can be used
to listen for specific events.
To see how circuit and stream listeners are used, see
:class:`txtorcon.TorState`, which is also the place to go if you
wish to add your own stream or circuit listeners.
"""
def __init__(self, password_function=None):
"""
:param password_function:
A zero-argument callable which returns a password (or
Deferred). It is only called if the Tor doesn't have
COOKIE authentication turned on. Tor's default is COOKIE.
"""
self.password_function = password_function
"""If set, a callable to query for a password to use for
authentication to Tor (default is to use COOKIE, however). May
return Deferred."""
self.version = None
"""Version of Tor we've connected to."""
self.is_owned = None
"""If not None, this is the PID of the Tor process we own
(TAKEOWNERSHIP, etc)."""
self.events = {}
"""events we've subscribed to (keyed by name like "GUARD", "STREAM")"""
self.valid_events = {}
"""all valid events (name -> Event instance)"""
self.valid_signals = []
"""A list of all valid signals we accept from Tor"""
self.on_disconnect = defer.Deferred()
"""
This Deferred is triggered when the connection is closed. If
there was an error, the errback is called instead.
"""
self.post_bootstrap = defer.Deferred()
"""
This Deferred is triggered when we're done setting up
(authentication, getting information from Tor). You will want
to use this to do things with the :class:`TorControlProtocol`
class when it's set up, like::
def setup_complete(proto):
print "Setup complete, attached to Tor version",proto.version
def setup(proto):
proto.post_bootstrap.addCallback(setup_complete)
TCP4ClientEndpoint(reactor, "localhost", 9051).connect(TorProtocolFactory())
d.addCallback(setup)
See the helper method :func:`txtorcon.build_tor_connection`.
"""
# variables related to the state machine
self.defer = None # Deferred we returned for the current command
self.response = ''
self.code = None
self.command = None # currently processing this command
self.commands = [] # queued commands
# Here we build up the state machine. Mostly it's pretty
# simply, confounded by the fact that 600's (notify) can come
# at any time AND can be multi-line itself. Luckily, these
# can't be nested, nor can the responses be interleaved.
idle = State("IDLE")
recv = State("RECV")
recvmulti = State("RECV_PLUS")
recvnotify = State("NOTIFY_MULTILINE")
idle.add_transition(Transition(idle,
self._is_single_line_response,
self._broadcast_response))
idle.add_transition(Transition(recvmulti,
self._is_multi_line,
self._start_command))
idle.add_transition(Transition(recv,
self._is_continuation_line,
self._start_command))
recv.add_transition(Transition(recvmulti,
self._is_multi_line,
self._accumulate_response))
recv.add_transition(Transition(recv,
self._is_continuation_line,
self._accumulate_response))
recv.add_transition(Transition(idle,
self._is_finish_line,
self._broadcast_response))
recvmulti.add_transition(Transition(recv,
self._is_end_line,
lambda x: None))
recvmulti.add_transition(Transition(recvmulti,
self._is_not_end_line,
self._accumulate_multi_response))
self.fsm = FSM([recvnotify, idle, recvmulti, recv])
self.state_idle = idle
# hand-set initial state default start state is first in the
# list; the above looks nice in dotty though
self.fsm.state = idle
self.stop_debug()
def start_debug(self):
self.debuglog = open('txtorcon-debug.log', 'w')
def stop_debug(self):
def noop(*args, **kw):
pass
class NullLog(object):
write = noop
flush = noop
self.debuglog = NullLog()
def graphviz_data(self):
return self.fsm.dotty()
# see end of file for all the state machine matcher and
# transition methods.
def get_info_raw(self, *args):
"""
Mostly for internal use; gives you the raw string back from the
GETINFO command. See :meth:`getinfo
<txtorcon.TorControlProtocol.get_info>`
"""
info = ' '.join([str(x) for x in list(args)])
return self.queue_command('GETINFO %s' % info)
def get_info_incremental(self, key, line_cb):
"""
Mostly for internal use; calls GETINFO for a single key and
calls line_cb with each line received, as it is received.
See :meth:`getinfo <txtorcon.TorControlProtocol.get_info>`
"""
def strip_ok_and_call(line):
if line.strip() != 'OK':
line_cb(line)
return self.queue_command('GETINFO %s' % key, strip_ok_and_call)
# The following methods are the main TorController API and
# probably the most interesting for users.
@defer.inlineCallbacks
def get_info(self, *args):
"""
Uses GETINFO to obtain informatoin from Tor.
:param args:
should be a list or tuple of strings which are valid
information keys. For valid keys, see control-spec.txt
from torspec.
.. todo:: make some way to automagically obtain valid
keys, either from running Tor or parsing control-spec
:return:
a ``Deferred`` which will callback with a dict containing
the keys you asked for. If you want to avoid the parsing
into a dict, you can use get_info_raw instead.
"""
lines = yield self.get_info_raw(*args)
rtn = {}
key = None
for line in lines.split('\n'):
if line.split('=', 1)[0] in args:
key = line.split('=', 1)[0]
rtn[key] = line.split('=', 1)[1]
else:
rtn[key] = rtn[key] + '\n' + line
defer.returnValue(rtn)
def get_conf(self, *args):
"""
Uses GETCONF to obtain configuration values from Tor.
:param args: any number of strings which are keys to get. To
get all valid configuraiton names, you can call:
``get_info('config/names')``
:return: a Deferred which callbacks with one or many
configuration values (depends on what you asked for). See
control-spec for valid keys (you can also use TorConfig which
will come set up with all the keys that are valid). The value
will be a dict.
Note that Tor differentiates between an empty value and a
default value; in the raw protocol one looks like '250
MyFamily' versus '250 MyFamily=' where the latter is set to
the empty string and the former is a default value. We
differentiate these by setting the value in the dict to
DEFAULT_VALUE for the default value case, or an empty string
otherwise.
"""
d = self.queue_command('GETCONF %s' % ' '.join(args))
d.addCallback(parse_keywords).addErrback(log.err)
return d
def get_conf_raw(self, *args):
"""
Same as get_conf, except that the results are not parsed into a dict
"""
return self.queue_command('GETCONF %s' % ' '.join(args))
def set_conf(self, *args):
"""
set configuration values. see control-spec for valid
keys. args is treated as a list containing name then value
pairs. For example, ``set_conf('foo', 'bar')`` will (attempt
to) set the key 'foo' to value 'bar'.
:return: a ``Deferred`` that will callback with the response
('OK') or errback with the error code and message (e.g.
``"552 Unrecognized option: Unknown option 'foo'. Failing."``)
"""
if len(args) % 2:
d = defer.Deferred()
d.errback(RuntimeError("Expected an even number of arguments."))
return d
strargs = [str(x) for x in args]
keys = [strargs[i] for i in range(0, len(strargs), 2)]
values = [strargs[i] for i in range(1, len(strargs), 2)]
def maybe_quote(s):
if ' ' in s:
return '"%s"' % s
return s
values = [maybe_quote(v) for v in values]
args = ' '.join(map(lambda x, y: '%s=%s' % (x, y), keys, values))
return self.queue_command('SETCONF ' + args)
def signal(self, nm):
"""
Issues a signal to Tor. See control-spec or
:attr:`txtorcon.TorControlProtocol.valid_signals` for which ones
are available and their return values.
:return: a ``Deferred`` which callbacks with Tor's response
(``OK`` or something like ``552 Unrecognized signal code "foo"``).
"""
if nm not in self.valid_signals:
raise RuntimeError("Invalid signal " + nm)
return self.queue_command('SIGNAL %s' % nm)
def add_event_listener(self, evt, callback):
""":param evt: event name, see also
:var:`txtorcon.TorControlProtocol.events` .keys()
Add a listener to an Event object. This may be called multiple
times for the same event. If it's the first listener, a new
SETEVENTS call will be initiated to Tor.
Currently the callback is any callable that takes a single
argument, that is the text collected for the event from the
tor control protocol.
For more information on the events supported, see
`control-spec section 4.1
<https://gitweb.torproject.org/torspec.git/tree/control-spec.txt#n1260>`_
.. note::
this is a low-level interface; if you want to follow
circuit or stream creation etc. see TorState and methods
like add_circuit_listener
:Return: ``None``
.. todo::
need an interface for the callback
show how to tie in Stem parsing if you want
"""
if evt not in self.valid_events.values():
try:
evt = self.valid_events[evt]
except:
raise RuntimeError("Unknown event type: " + evt)
if evt.name not in self.events:
self.events[evt.name] = evt
self.queue_command('SETEVENTS %s' % ' '.join(self.events.keys()))
evt.listen(callback)
return None
def remove_event_listener(self, evt, cb):
if evt not in self.valid_events.values():
# this lets us pass a string or a real event-object
try:
evt = self.valid_events[evt]
except:
raise RuntimeError("Unknown event type: " + evt)
evt.unlisten(cb)
if len(evt.callbacks) == 0:
# note there's a slight window here for an event of this
# type to come in before the SETEVENTS succeeds; see
# _handle_notify which explicitly ignore this case.
del self.events[evt.name]
self.queue_command('SETEVENTS %s' % ' '.join(self.events.keys()))
def protocolinfo(self):
"""
:return: a Deferred which will give you PROTOCOLINFO; see control-spec
"""
return self.queue_command("PROTOCOLINFO 1")
def authenticate(self, passphrase):
"""Call the AUTHENTICATE command."""
return self.queue_command('AUTHENTICATE ' + passphrase.encode("hex"))
def quit(self):
"""
Sends the QUIT command, which asks Tor to hang up on this
controller connection.
If you've taken ownership of the Tor to which you're
connected, this should also cause it to exit. Otherwise, it
won't.
"""
return self.queue_command('QUIT')
def queue_command(self, cmd, arg=None):
"""
returns a Deferred which will fire with the response data when
we get it
Note that basically every request is ultimately funelled
through this command.
"""
d = defer.Deferred()
self.commands.append((d, cmd, arg))
self._maybe_issue_command()
return d
# the remaining methods are internal API implementations,
# callbacks and state-tracking methods -- you shouldn't have any
# need to call them.
def lineReceived(self, line):
"""
:api:`twisted.protocols.basic.LineOnlyReceiver` API
"""
self.debuglog.write(line + '\n')
self.debuglog.flush()
self.fsm.process(line)
def connectionMade(self):
"Protocol API"
txtorlog.msg('got connection, authenticating')
d = self.protocolinfo()
d.addCallback(self._do_authenticate)
d.addErrback(self._auth_failed)
def connectionLost(self, reason):
"Protocol API"
txtorlog.msg('connection terminated: ' + str(reason))
if self.on_disconnect.callbacks:
if reason.check(ConnectionDone):
self.on_disconnect.callback(self)
else:
self.on_disconnect.errback(reason)
self.on_disconnect = None
return None
def _handle_notify(self, code, rest):
"""
Internal method to deal with 600-level responses.
"""
firstline = rest[:rest.find('\n')]
args = firstline.split()
if args[0] in self.events:
self.events[args[0]].got_update(rest[len(args[0]) + 1:])
return
# not considering this an error, as there's a slight window
# after remove_event_listener is called (so the handler is
# deleted) but the SETEVENTS command has not yet succeeded
def _maybe_issue_command(self):
"""
If there's at least one command queued and we're not currently
processing a command, this will issue the next one on the
wire.
"""
if self.command:
return
if len(self.commands):
self.command = self.commands.pop(0)
(d, cmd, cmd_arg) = self.command
self.defer = d
self.debuglog.write(cmd + '\n')
self.debuglog.flush()
self.transport.write(cmd + '\r\n')
def _auth_failed(self, fail):
"""
Errback if authentication fails.
"""
self.post_bootstrap.errback(fail)
return None
def _safecookie_authchallenge(self, reply):
"""
Callback on AUTHCHALLENGE SAFECOOKIE
"""
kw = parse_keywords(reply.replace(' ', '\n'))
server_hash = base64.b16decode(kw['SERVERHASH'])
server_nonce = base64.b16decode(kw['SERVERNONCE'])
# FIXME put string in global. or something.
expected_server_hash = hmac_sha256(
"Tor safe cookie authentication server-to-controller hash",
self.cookie_data + self.client_nonce + server_nonce
)
if not compare_via_hash(expected_server_hash, server_hash):
raise RuntimeError(
'Server hash not expected; wanted "%s" and got "%s".' %
(base64.b16encode(expected_server_hash),
base64.b16encode(server_hash))
)
client_hash = hmac_sha256(
"Tor safe cookie authentication controller-to-server hash",
self.cookie_data + self.client_nonce + server_nonce
)
client_hash_hex = base64.b16encode(client_hash)
return self.queue_command('AUTHENTICATE %s' % client_hash_hex)
def _do_authenticate(self, protoinfo):
"""
Callback on PROTOCOLINFO to actually authenticate once we know
what's supported.
"""
methods = None
for line in protoinfo.split('\n'):
if line[:5] == 'AUTH ':
kw = parse_keywords(line[5:].replace(' ', '\n'))
methods = kw['METHODS'].split(',')
if not methods:
raise RuntimeError(
"Didn't find AUTH line in PROTOCOLINFO response."
)
if 'SAFECOOKIE' in methods:
cookie = re.search('COOKIEFILE="(.*)"', protoinfo).group(1)
self.cookie_data = open(cookie, 'r').read()
if len(self.cookie_data) != 32:
raise RuntimeError(
"Expected authentication cookie to be 32 bytes, got %d" %
len(self.cookie_data)
)
txtorlog.msg("Using SAFECOOKIE authentication", cookie,
len(self.cookie_data), "bytes")
self.client_nonce = os.urandom(32)
cmd = 'AUTHCHALLENGE SAFECOOKIE ' + \
base64.b16encode(self.client_nonce)
d = self.queue_command(cmd)
d.addCallback(self._safecookie_authchallenge)
d.addCallback(self._bootstrap)
d.addErrback(self._auth_failed)
return
elif 'COOKIE' in methods:
cookie = re.search('COOKIEFILE="(.*)"', protoinfo).group(1)
with open(cookie, 'r') as cookiefile:
data = cookiefile.read()
if len(data) != 32:
raise RuntimeError(
"Expected authentication cookie to be 32 "
"bytes, got %d instead." % len(data)
)
txtorlog.msg("Using COOKIE authentication",
cookie, len(data), "bytes")
d = self.authenticate(data)
d.addCallback(self._bootstrap)
d.addErrback(self._auth_failed)
return
if self.password_function:
d = defer.maybeDeferred(self.password_function)
d.addCallback(self._do_password_authentication)
d.addErrback(self._auth_failed)
return
raise RuntimeError(
"The Tor I connected to doesn't support SAFECOOKIE nor COOKIE"
" authentication and I have no password_function specified."
)
def _do_password_authentication(self, passwd):
if not passwd:
raise RuntimeError("No password available.")
d = self.authenticate(passwd)
d.addCallback(self._bootstrap)
d.addErrback(self._auth_failed)
def _set_valid_events(self, events):
"used as a callback; see _bootstrap"
self.valid_events = {}
for x in events.split():
self.valid_events[x] = Event(x)
@defer.inlineCallbacks
def _bootstrap(self, *args):
"""
The inlineCallbacks decorator allows us to make this method
look synchronous; see the Twisted docs. Each yeild is for a
Deferred after which the method continues. When this method
finally exits, we're set up and do the post_bootstrap
callback.
"""
try:
self.valid_signals = yield self.get_info('signal/names')
self.valid_signals = self.valid_signals['signal/names']
except TorProtocolError:
self.valid_signals = ["RELOAD", "DUMP", "DEBUG", "NEWNYM",
"CLEARDNSCACHE"]
self.version = yield self.get_info('version')
self.version = self.version['version']
txtorlog.msg("Connected to a Tor with VERSION", self.version)
eventnames = yield self.get_info('events/names')
eventnames = eventnames['events/names']
self._set_valid_events(eventnames)
yield self.queue_command('USEFEATURE EXTENDED_EVENTS')
self.post_bootstrap.callback(self)
defer.returnValue(self)
# State Machine transitions and matchers. See the __init__ method
# for a way to output a GraphViz dot diagram of the machine.
def _is_end_line(self, line):
"for FSM"
return line.strip() == '.'
def _is_not_end_line(self, line):
"for FSM"
return not self._is_end_line(line)
def _is_single_line_response(self, line):
"for FSM"
try:
code = int(line[:3])
except:
return False
sl = len(line) > 3 and line[3] == ' '
# print "single line?",line,sl
if sl:
self.code = code
return True
return False
def _start_command(self, line):
"for FSM"
# print "startCommand",self.code,line
self.code = int(line[:3])
# print "startCommand:",self.code
if self.command and self.command[2] is not None:
self.command[2](line[4:])
else:
self.response = line[4:] + '\n'
return None
def _is_continuation_line(self, line):
"for FSM"
# print "isContinuationLine",self.code,line
code = int(line[:3])
if self.code and self.code != code:
raise RuntimeError("Unexpected code %d, wanted %d" % (code,
self.code))
return line[3] == '-'
def _is_multi_line(self, line):
"for FSM"
# print "isMultiLine",self.code,line,line[3] == '+'
code = int(line[:3])
if self.code and self.code != code:
raise RuntimeError("Unexpected code %d, wanted %d" % (code,
self.code))
return line[3] == '+'
def _accumulate_multi_response(self, line):
"for FSM"
if self.command and self.command[2] is not None:
self.command[2](line)
else:
self.response += (line + '\n')
return None
def _accumulate_response(self, line):
"for FSM"
if self.command and self.command[2] is not None:
self.command[2](line[4:])
else:
self.response += (line[4:] + '\n')
return None
def _is_finish_line(self, line):
"for FSM"
# print "isFinish",line
if len(line) < 1:
return False
if line[0] == '.':
return True
if len(line) > 3 and line[3] == ' ':
return True
return False
def _broadcast_response(self, line):
"for FSM"
# print "BCAST",line
if len(line) > 3:
if self.code >= 200 and self.code < 300 and \
self.command and self.command[2] is not None:
self.command[2](line[4:])
resp = ''
else:
resp = self.response + line[4:]
else:
resp = self.response
self.response = ''
if self.code >= 200 and self.code < 300:
if self.defer is None:
raise RuntimeError(
'Got a response, but didn\'t issue a command: "%s"' % resp
)
if resp.endswith('\nOK'):
resp = resp[:-3]
self.defer.callback(resp)
elif self.code >= 500 and self.code < 600:
err = TorProtocolError(self.code, resp)
self.defer.errback(err)
elif self.code >= 600 and self.code < 700:
self._handle_notify(self.code, resp)
self.code = None
return
elif self.code is None:
raise RuntimeError("No code set yet in broadcast response.")
else:
raise RuntimeError(
"Unknown code in broadcast response %d." % self.code
)
# note: we don't do this for 600-level responses
self.command = None
self.code = None
self.defer = None
self._maybe_issue_command()
return None
|
|
import json
import pandas as pd
import requests
from py2cytoscape.data.network_view import CyNetworkView
from ..util import util_networkx as nx_util
from ..util import util_dataframe as df_util
from .util_http import check_response
from . import BASE_URL, HEADERS
import warnings
warnings.warn('\n\n\n**** data.cynetwork will be deprecated in the next py2cytoscape release. ****\n\n\n')
BASE_URL_NETWORK = BASE_URL + 'networks'
class CyNetwork(object):
def __init__(self, suid=None, session=None, url=None):
if pd.isnull(url):
raise ValueError("URL is missing.")
# Validate required argument
if pd.isnull(suid):
raise ValueError("SUID is missing.")
else:
self.__id = suid
self.__url = url + '/' + str(self.__id) + '/'
self.session = session if session is not None else requests.Session()
def get_id(self):
"""
Get session-unique ID of this network
:return: SUID as integer
"""
return self.__id
def to_json(self):
"""
Return this network in Cytoscape.js format.
:return: Cytoscape.js Style JSON as dictionary.
"""
return self.session.get(self.__url).json()
def to_networkx(self):
"""
Return this network in NetworkX graph object.
:return: Network as NetworkX graph object
"""
return nx_util.to_networkx(self.session.get(self.__url).json())
def to_dataframe(self, extra_edges_columns=[]):
"""
Return this network in pandas DataFrame.
:return: Network as DataFrame. This is equivalent to SIF.
"""
return df_util.to_dataframe(
self.session.get(self.__url).json(),
edges_attr_cols=extra_edges_columns
)
def get_nodes(self):
"""
Get all nodes as a list of SUIDs
:return:
"""
return self.session.get(self.__url + 'nodes').json()
def get_edges(self, fmt='suid'):
if fmt == 'suid':
return self.session.get(self.__url + 'edges').json()
elif fmt == 'edgelist':
# TODO: implement this
pass
else:
raise ValueError(fmt + ' is not supported for edge format.')
def add_node(self, node_name, dataframe=False):
""" Add a single node to the network. """
if node_name is None:
return None
return self.add_nodes([node_name], dataframe=dataframe)
def add_nodes(self, node_name_list, dataframe=False):
"""
Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes.
"""
res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS)
check_response(res)
nodes = res.json()
if dataframe:
return pd.DataFrame(nodes).set_index(['SUID'])
else:
return {node['name']: node['SUID'] for node in nodes}
def add_edge(self, source, target, interaction='-', directed=True, dataframe=True):
""" Add a single edge from source to target. """
new_edge = {
'source': source,
'target': target,
'interaction': interaction,
'directed': directed
}
return self.add_edges([new_edge], dataframe=dataframe)
def add_edges(self, edge_list, dataframe=True):
"""
Add a all edges in edge_list.
:return: A data structure with Cytoscape SUIDs for the newly-created edges.
:param edge_list: List of (source, target, interaction) tuples *or*
list of dicts with 'source', 'target', 'interaction', 'direction' keys.
:param dataframe: If dataframe is True (default), return a Pandas DataFrame.
If dataframe is False, return a list of dicts with keys 'SUID', 'source' and 'target'.
"""
# It might be nice to have an option pass a list of dicts instead of list of tuples
if not isinstance(edge_list[0], dict):
edge_list = [{'source': edge_tuple[0],
'target': edge_tuple[1],
'interaction': edge_tuple[2]}
for edge_tuple in edge_list]
res = self.session.post(self.__url + 'edges', data=json.dumps(edge_list), headers=HEADERS)
check_response(res)
edges = res.json()
if dataframe:
return pd.DataFrame(edges).set_index(['SUID'])
else:
return edges
def delete_node(self, id):
url = self.__url + 'nodes/' + str(id)
self.session.delete(url)
def delete_edge(self, id):
url = self.__url + 'edges/' + str(id)
self.session.delete(url)
def __get_table(self, type, fmt=None):
url = self.__url + 'tables/default' + type
if fmt is None or fmt == 'dataframe':
return pd.DataFrame(self.session.get(url).json()['rows'])
elif fmt == 'csv' or fmt == 'tsv':
return self.session.get(url + '.' + fmt).content
elif fmt == 'cytoscapejs':
return self.session.get(url).json()['rows']
else:
raise ValueError('Unsupported format: ' + fmt)
def get_node_table(self, fmt=None):
return self.__get_table('node', fmt)
def get_edge_table(self, fmt=None):
return self.__get_table('edge', fmt)
def get_network_table(self, fmt=None):
return self.__get_table('network', fmt)
def __get_columns(self, type=None):
url = self.__url + 'tables/default' + type + '/columns'
df = pd.DataFrame(self.session.get(url).json())
return df.set_index(['name'])
def get_node_columns(self):
"""
Get node table column information as DataFrame
:return: Node column information ad DataFrame
"""
return self.__get_columns('node')
def get_edge_columns(self):
"""
Get edge table column information as DataFrame
:return: Edge column information ad DataFrame
"""
return self.__get_columns('edge')
def get_network_columns(self):
"""
Get network table column information as DataFrame
:return: Network column information ad DataFrame
"""
return self.__get_columns('networks')
def __get_column(self, type=None, column=None):
url = self.__url + 'tables/default' + type + '/columns/' + column
result = self.session.get(url).json()
return pd.Series(result['values'])
def get_node_column(self, column):
return self.__get_column('node', column=column)
def get_edge_column(self, column):
return self.__get_column('edge', column=column)
def __get_value(self, type=None, id=None, column=None):
if column is None and id is not None:
# Extract a row in table
url = self.__url + 'tables/default' + type + '/rows/' + str(id)
return pd.Series(self.session.get(url).json())
elif column is not None and id is not None:
url = self.__url + 'tables/default' + type + '/rows/' + str(id) + '/' + column
return self.session.get(url).content
else:
raise ValueError('ID is required.')
def get_node_value(self, id, column=None):
return self.__get_value(type='node', id=id, column=column)
def get_edge_value(self, id, column=None):
return self.__get_value(type='edge', id=id, column=column)
def get_network_value(self, column):
return self.__get_value(type='network', id=self.__id, column=column)
def update_node_table(self, df=None, network_key_col='name',
data_key_col=None):
return self.__update_table('node', df=df, network_key_col=network_key_col, data_key_col=data_key_col)
def __update_table(self, type, df, network_key_col='name',
data_key_col=None):
is_index_col = False
if data_key_col is None:
# Use index
data_key = network_key_col
is_index_col = True
else:
data_key = data_key_col
table = {
'key': network_key_col,
'dataKey': data_key
}
if is_index_col:
# Use DataFrame's index as the mapping key
df2 = pd.DataFrame(df)
df2[network_key_col] = df.index
data = df2.to_json(orient='records')
del df2
else:
data = df.to_json(orient='records')
table['data'] = json.loads(data)
url = self.__url + 'tables/default' + type
self.session.put(url, json=table, headers=HEADERS)
def __delete_column(self, type, column):
url = self.__url + 'tables/default' + type + '/columns/' + column
self.session.delete(url)
def delete_node_table_column(self, column):
self.__delete_column('node', column=column)
def delete_edge_table_column(self, column):
self.__delete_column('edge', column=column)
def delete_network_table_column(self, column):
self.__delete_column('network', column=column)
def __create_column(self, type, name, data_type, immutable, list):
url = self.__url + 'tables/default' + type + '/columns'
new_column = {
'name': name,
'type': data_type,
'immutable': immutable,
'list': list
}
self.session.post(url, data=json.dumps(new_column), headers=HEADERS)
def create_node_column(self, name, data_type='String', is_immutable=False, is_list=False):
self.__create_column('node', name=name, data_type=data_type, immutable=is_immutable, list=is_list)
def create_edge_column(self, name, data_type='String', is_immutable=False, is_list=False):
self.__create_column('edge', name=name, data_type=data_type, immutable=is_immutable, list=is_list)
def create_network_column(self, name, data_type='String', is_immutable=False, is_list=False):
self.__create_column('network', name=name, data_type=data_type, immutable=is_immutable, list=is_list)
# Utility functions
def get_neighbours(self, node_id):
url = self.__url + 'nodes/' + str(node_id) + '/neighbors'
return self.session.get(url).json()
def get_adjacent_edges(self, node_id):
url = self.__url + 'nodes/' + str(node_id) + '/adjEdges'
return self.session.get(url).json()
# Views
def get_views(self):
"""
Get views as a list of SUIDs
:return:
"""
url = self.__url + 'views'
return self.session.get(url).json()
def get_png(self, height=1200):
url = self.__url + 'views/first.png?h=' + str(height)
return self.session.get(url).content
def get_svg(self, height=1200):
url = self.__url + 'views/first.svg?h=' + str(height)
return self.session.get(url).content
def get_pdf(self):
url = self.__url + 'views/first.pdf'
return self.session.get(url).content
def get_first_view(self, fmt='json'):
"""
Get a first view model as dict
:return:
"""
url = self.__url + 'views/first'
return self.session.get(url).json()
def get_view(self, view_id, fmt='json'):
if fmt == 'json':
url = self.__url + 'views/' + str(view_id)
return self.session.get(url).json()
elif fmt == 'view':
return self.__get_view_object(view_id)
else:
return None
def __get_view_object(self, view_id):
"""
Create a new CyNetworkView object for the given ID.
:param view_id:
:return:
"""
view = CyNetworkView(self, view_id)
return view
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__id == other.__id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
|
|
# -*- coding: utf-8 -*-
import datetime
import os
import re
import time
import pytz
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
from twactor import cache, connection, json, log
class User(cache.CachedObject):
"""Get info on a twitter user."""
STATUS_UPDATE_INTERVAL = 3 * 60 # 3 minutes between each status update.
def __init__(self, username_or_id, *args, **kwargs):
if isinstance(username_or_id, basestring):
self._cache['screen_name'] = username_or_id.decode('utf-8')
elif isinstance(username_or_id, (int, long)):
self._cache['id'] = username_or_id
self.profile = UserProfile(self)
def __eq__(self, user):
if not isinstance(user, (User, UserProfile)):
return False
elif isinstance(user, User):
return self.username == user.username
elif isinstance(user, UserProfile):
return self == user.user
def __repr__(self):
return 'User(%r)' % (self._identifier,)
@classmethod
def me(cls):
return cls(cls._connection_broker.username)
def _update_cache(self):
logger = log.getLogger('twactor.User.update')
logger.debug('Updating cache for user %s' % (self._identifier,))
try:
data = self._connection_broker.get('/users/show/%s.json' % (
self._identifier,))
except Exception, exc:
# TODO: implement better error handling.
logger.error('Error fetching user info for %s' % (
self._identifier,))
else:
self._cache = data
@property
def _identifier(self):
return self._cache.get('screen_name',
self._cache.get('id', None) or '')
@property
@cache.update_on_time(STATUS_UPDATE_INTERVAL)
def status(self):
status_data = self._cache['status'].copy()
status_data['user'] = self._cache.copy()
status_data['user'].pop('status')
return Tweet(status_data['id'], cache=status_data
)._with_connection_broker(self._connection_broker)
@property
@cache.update_on_key('created_at')
def joined(self):
dtime = datetime.datetime.strptime(self._cache['created_at'],
'%a %b %d %H:%M:%S +0000 %Y')
return dtime.replace(tzinfo=pytz.utc)
@property
@cache.update_on_key('utc_offset')
def timezone(self):
tzinfo = pytz.FixedOffset(self._cache['utc_offset'] / 60.0)
tzinfo.dst = lambda *args: datetime.timedelta()
return (self._cache['time_zone'], tzinfo)
id = cache.simple_map('id')
username = cache.simple_map('screen_name')
description = cache.simple_map('description')
location = cache.simple_map('location')
name = cache.simple_map('name')
protected = cache.simple_map('protected')
url = cache.simple_map('url')
_favourite_count = cache.simple_map('favourites_count')
_follower_count = cache.simple_map('followers_count')
_friend_count = cache.simple_map('friends_count')
_status_count = cache.simple_map('statuses_count')
_time_zone_name = cache.simple_map('time_zone')
_time_zone_utc_offset = cache.simple_map('utc_offset')
class UserProfile(cache.CachedMirror):
_mirrored_attribute = 'user'
def __eq__(self, profile):
if not isinstance(profile, (User, UserProfile)):
return False
elif isinstance(profile, UserProfile):
return profile.user == self.user
elif isinstance(profile, User):
return profile == self.user
def __repr__(self):
return 'UserProfile(%r)' % (self.user,)
bg_color = cache.simple_map('profile_background_color')
bg_image_url = cache.simple_map('profile_background_image_url')
bg_title = cache.simple_map('profile_background_title')
avatar_url = cache.simple_map('profile_image_url')
link_color = cache.simple_map('profile_link_color')
sidebar_border_color = cache.simple_map('profile_sidebar_border_color')
sidebar_fill_color = cache.simple_map('profile_sidebar_fill_color')
text_color = cache.simple_map('profile_text_color')
class Tweet(cache.CachedObject):
def __init__(self, id, *args, **kwargs):
try:
id = int(id)
except TypeError:
pass
else:
self._cache['id'] = id
def __eq__(self, tweet):
if not isinstance(tweet, Tweet):
return False
return tweet.id == self.id
def __repr__(self):
return 'Tweet(%r)' % (self.id,)
def _update_cache(self):
logger = log.getLogger('twactor.Tweet.update')
logger.debug('Updating cache for tweet %d' % (self.id,))
try:
data = self._connection_broker.get(
'/statuses/show/%d.json' % (self.id,))
except Exception, exc:
# TODO: implement better error handling.
logger.error('Error fetching info for tweet ID %d' % (self.id,))
else:
self._cache = data
@property
@cache.update_on_key('user')
def user(self):
return User(self._cache['user']['screen_name'],
cache=self._cache['user'])._with_connection_broker(
self._connection_broker)
@property
@cache.update_on_key('source')
def source_name(self):
return re.search(r'>(.*)<', self._cache['source']).groups()[0]
@property
@cache.update_on_key('source')
def source_url(self):
return re.search(r'<a href="(.*)">', self._cache['source']).groups()[0]
@property
@cache.update_on_key('created_at')
def created(self):
return datetime.datetime.strptime(self._cache['created_at'],
'%a %b %d %H:%M:%S +0000 %Y').replace(tzinfo=pytz.utc)
@property
@cache.update_on_key('in_reply_to_status_id')
def in_reply_to(self):
if not self._cache['in_reply_to_status_id']:
return
cache = {'id': self._cache['in_reply_to_status_id']}
cache['user'] = {'id': self._cache['in_reply_to_user_id']}
return Tweet(self._cache['in_reply_to_status_id']
)._with_connection_broker(self._connection_broker)
id = cache.simple_map('id')
text = cache.simple_map('text')
truncated = cache.simple_map('truncated')
class PublicTimeline(cache.ForwardCachedList):
OBJ_CLASS = Tweet
UPDATE_INTERVAL = 60
_sort_attrs = ('id',)
def __len__(self):
return len(self._cache)
def __repr__(self):
return 'PublicTimeline()'
def _update_cache(self):
logger = log.getLogger('twactor.PublicTimeline.update')
logger.debug('Updating public timeline')
try:
return self._connection_broker.get('/statuses/public_timeline.json')
except Exception, exc:
# TODO: implement better error handling.
logger.error('Error fetching public timeline update')
class UserTimeline(cache.ForwardCachedList):
OBJ_CLASS = Tweet
# Too low and we make too many API calls. Too high and it takes too long to
# fetch the data. 100 is a reasonable amount, which can be changed at any
# time by just setting the attribute.
_count = 100
def __init__(self, *args, **kwargs):
user = None
if args:
user = args[0]
if not user:
user = User.me()
self.user = user
def __getitem__(self, pos_or_slice):
new_timeline = super(UserTimeline, self).__getitem__(pos_or_slice)
if isinstance(new_timeline, UserTimeline):
new_timeline.user = self.user
return new_timeline
def __len__(self):
return self.user._status_count
def __repr__(self):
return 'UserTimeline(%r)' % (self.user,)
def __str__(self):
return self.user.username.encode('utf-8')
def __unicode__(self):
return self.user.username
def _copy(self):
copy = type(self)(self.user, cache=self._cache[:],
updated=self._updated.copy())
copy._connection_broker = self._connection_broker
return copy
def _update_cache(self):
logger = log.getLogger('twactor.UserTimeline.update')
if ((time.time() - self._updated.get('__time', 0)) <
self.UPDATE_INTERVAL):
return []
logger.debug('Updating data for user %s' % (self.user.username,))
params = {'count': self._count}
if self._cache:
params['since_id'] = self._cache[-1]['id']
path = '/statuses/user_timeline/%s.json' % (self.user.username,)
try:
data = self._connection_broker.get(path, params=params)
except Exception, exc:
# TODO: implement better error handling.
logger.error('Error fetching user timeline update')
else:
logger.debug('Data for %s fetched' % (self.user.username,))
return data
class UserHistory(cache.ReverseCachedList):
OBJ_CLASS = Tweet
# Too low and we make too many API calls. Too high and it takes too long to
# fetch the data. 100 is a reasonable amount, which can be changed at any
# time by just setting the attribute.
_count = 100
def __init__(self, *args, **kwargs):
user = None
if args:
user = args[0]
if not user:
user = User.me()
elif isinstance(user, (basestring, int, long)):
user = User(user)
self.user = user
self._cache_page = kwargs.get('cache_page', 1)
def __getitem__(self, pos_or_slice):
new_history = super(UserHistory, self).__getitem__(pos_or_slice)
if isinstance(new_history, UserHistory):
new_history.user = self.user
return new_history
def __len__(self):
return self.user._status_count
def __repr__(self):
return 'UserHistory(%r)' % (self.user,)
def __str__(self):
return self.user.username.encode('utf-8')
def __unicode__(self):
return self.user.username
def _copy(self):
copy = type(self)(self.user, cache=self._cache[:],
updated=self._updated.copy())
copy._connection_broker = self._connection_broker
return copy
def _update_cache(self):
logger = log.getLogger('twactor.UserHistory.update')
logger.debug('Updating data for user %s' % (self.user.username,))
path = '/statuses/user_timeline/%s.json' % (self.user.username,)
params = {'page': self._cache_page, 'count': self._count}
try:
data = self._connection_broker.get(path, params=params)
except Exception, exc:
# TODO: implement better error handling.
logger.error('Error fetching data')
else:
logger.debug('Data for %s fetched' % (self.user.username,))
self._cache_page += 1
return data
class UserFollowers(cache.CachedObject):
pass # TODO: implement.
class UserFollowing(cache.CachedObject):
pass # TODO: implement
class UserDirectMessages(object):
pass # TODO: implement
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, David Hamp-Gonsalves
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Send the results of a query to the configured music player as a playlist.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.ui.commands import PromptChoice
from beets import config
from beets import ui
from beets import util
from os.path import relpath
from tempfile import NamedTemporaryFile
import subprocess
# Indicate where arguments should be inserted into the command string.
# If this is missing, they're placed at the end.
ARGS_MARKER = '$args'
def play(command_str, selection, paths, open_args, log, item_type='track',
keep_open=False):
"""Play items in paths with command_str and optional arguments. If
keep_open, return to beets, otherwise exit once command runs.
"""
# Print number of tracks or albums to be played, log command to be run.
item_type += 's' if len(selection) > 1 else ''
ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type))
log.debug(u'executing command: {} {!r}', command_str, open_args)
try:
if keep_open:
command = util.shlex_split(command_str)
command = command + open_args
subprocess.call(command)
else:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(
"Could not play the query: {0}".format(exc))
class PlayPlugin(BeetsPlugin):
def __init__(self):
super(PlayPlugin, self).__init__()
config['play'].add({
'command': None,
'use_folders': False,
'relative_to': None,
'raw': False,
'warning_threshold': 100,
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_listener)
def commands(self):
play_command = Subcommand(
'play',
help=u'send music to a player as a playlist'
)
play_command.parser.add_album_option()
play_command.parser.add_option(
u'-A', u'--args',
action='store',
help=u'add additional arguments to the command',
)
play_command.parser.add_option(
u'-y', u'--yes',
action="store_true",
help=u'skip the warning threshold',
)
play_command.func = self._play_command
return [play_command]
def _play_command(self, lib, opts, args):
"""The CLI command function for `beet play`. Create a list of paths
from query, determine if tracks or albums are to be played.
"""
use_folders = config['play']['use_folders'].get(bool)
relative_to = config['play']['relative_to'].get()
if relative_to:
relative_to = util.normpath(relative_to)
# Perform search by album and add folders rather than tracks to
# playlist.
if opts.album:
selection = lib.albums(ui.decargs(args))
paths = []
sort = lib.get_default_album_sort()
for album in selection:
if use_folders:
paths.append(album.item_dir())
else:
paths.extend(item.path
for item in sort.sort(album.items()))
item_type = 'album'
# Perform item query and add tracks to playlist.
else:
selection = lib.items(ui.decargs(args))
paths = [item.path for item in selection]
if relative_to:
paths = [relpath(path, relative_to) for path in paths]
item_type = 'track'
if not selection:
ui.print_(ui.colorize('text_warning',
u'No {0} to play.'.format(item_type)))
return
open_args = self._playlist_or_paths(paths)
command_str = self._command_str(opts.args)
# Check if the selection exceeds configured threshold. If True,
# cancel, otherwise proceed with play command.
if opts.yes or not self._exceeds_threshold(
selection, command_str, open_args, item_type):
play(command_str, selection, paths, open_args, self._log,
item_type)
def _command_str(self, args=None):
"""Create a command string from the config command and optional args.
"""
command_str = config['play']['command'].get()
if not command_str:
return util.open_anything()
# Add optional arguments to the player command.
if args:
if ARGS_MARKER in command_str:
return command_str.replace(ARGS_MARKER, args)
else:
return u"{} {}".format(command_str, args)
else:
# Don't include the marker in the command.
return command_str.replace(" " + ARGS_MARKER, "")
def _playlist_or_paths(self, paths):
"""Return either the raw paths of items or a playlist of the items.
"""
if config['play']['raw']:
return paths
else:
return [self._create_tmp_playlist(paths)]
def _exceeds_threshold(self, selection, command_str, open_args,
item_type='track'):
"""Prompt user whether to abort if playlist exceeds threshold. If
True, cancel playback. If False, execute play command.
"""
warning_threshold = config['play']['warning_threshold'].get(int)
# Warn user before playing any huge playlists.
if warning_threshold and len(selection) > warning_threshold:
if len(selection) > 1:
item_type += 's'
ui.print_(ui.colorize(
'text_warning',
u'You are about to queue {0} {1}.'.format(
len(selection), item_type)))
if ui.input_options((u'Continue', u'Abort')) == 'a':
return True
return False
def _create_tmp_playlist(self, paths_list):
"""Create a temporary .m3u file. Return the filename.
"""
m3u = NamedTemporaryFile('wb', suffix='.m3u', delete=False)
for item in paths_list:
m3u.write(item + b'\n')
m3u.close()
return m3u.name
def before_choose_candidate_listener(self, session, task):
"""Append a "Play" choice to the interactive importer prompt.
"""
return [PromptChoice('y', 'plaY', self.importer_play)]
def importer_play(self, session, task):
"""Get items from current import task and send to play function.
"""
selection = task.items
paths = [item.path for item in selection]
open_args = self._playlist_or_paths(paths)
command_str = self._command_str()
if not self._exceeds_threshold(selection, command_str, open_args):
play(command_str, selection, paths, open_args, self._log,
keep_open=True)
|
|
import logging
import os
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.colors as colors
from funfolding import binning, model, solution
from funfolding.visualization import visualize_classic_binning
from funfolding.visualization import visualize_tree_binning
from funfolding.visualization import visualize_model
from funfolding.visualization.visualize_llh import plot_llh_slice
import corner
from scipy import linalg
def generate_acceptance_correction(vec_f_truth,
binning,
logged_truth):
e_min = 200
e_max = 50000
gamma = -2.7
n_showers = 12000000
if logged_truth:
binning = np.power(10., binning)
normalization = (gamma + 1) / (e_max ** (gamma + 1) - e_min ** (gamma + 1))
corsika_cdf = lambda E: normalization * E ** (gamma + 1) / (gamma + 1)
vec_acceptance = np.zeros_like(vec_f_truth, dtype=float)
for i, vec_i_detected in enumerate(vec_f_truth):
p_bin_i = corsika_cdf(binning[i + 1]) - corsika_cdf(binning[i])
vec_acceptance[i] = p_bin_i * n_showers / vec_i_detected
return vec_acceptance
if __name__ == '__main__':
logging.captureWarnings(True)
logging.basicConfig(
format='%(processName)-10s %(name)s %(levelname)-8s %(message)s',
level=logging.INFO)
random_seed = 1340
n_walkers = 100
n_steps_used = 2000
n_samples_test = 5000
min_samples_leaf = 20
tau = 1.
binning_E = np.linspace(2.4, 4.2, 10)
random_state = np.random.RandomState(random_seed)
if not os.path.isfile('fact_simulations.hdf'):
from get_fact_simulations import download
logging.info('Downloading FACT simulations!')
download()
df = pd.read_hdf('fact_simulations.hdf', 'gamma_simulation')
binned_E = np.digitize(df.loc[:, 'log10(energy)'],
binning_E)
idx = np.arange(len(df))
random_state.shuffle(idx)
test_slice = slice(0, n_samples_test)
binning_slice = slice(n_samples_test, n_samples_test + 10 * n_samples_test)
A_slice = slice(n_samples_test + 10 * n_samples_test, None)
idx_test = np.sort(idx[test_slice])
idx_binning = np.sort(idx[binning_slice])
idx_A = np.sort(idx[A_slice])
binning_E = np.linspace(2.4, 4.2, 10)
binned_E = np.digitize(df.loc[:, 'log10(energy)'], binning_E)
binned_E_test = binned_E[idx_test]
binned_E_binning = binned_E[idx_binning]
binned_E_A = binned_E[idx_A]
obs_array = df.get(['log10(ConcCore)', 'log10(E_RF)']).values
X_A = obs_array[idx_A]
X_binning = obs_array[idx_binning]
X_test = obs_array[idx_test]
classic_binning = binning.ClassicBinning(
bins=[15, 25],
random_state=random_state)
classic_binning.fit(X_A)
fig, ax = plt.subplots()
visualize_classic_binning.plot_binning(ax,
classic_binning,
X_A,
log_c=False,
cmap='viridis')
fig.savefig('05_fact_example_original_binning.png')
tree_binning_2d = binning.TreeBinningSklearn(
regression=False,
min_samples_leaf=int(min_samples_leaf * 10.),
random_state=random_state)
tree_binning_2d.fit(X_binning,
binned_E_binning,
uniform=False)
fig, ax = plt.subplots(figsize=(6, 6))
visualize_tree_binning.plot_binning(ax,
tree_binning_2d,
limits=[-0.7,
-0.2,
2.7,
4.2],
X=X_A,
linecolor='k',
linewidth='0.3',
log_c=False,
as_hexbins=True,
hex_kwargs={'rasterized': True,
'gridsize': 50},
cmap='viridis')
ax.set_ylabel('log10(Energy Estimator [GeV])')
ax.set_xlabel('log10(Concentration [a.u.])')
fig.savefig('05_fact_example_original_tree_binning.png', dpi=300)
closest = classic_binning.merge(X_binning,
min_samples=int(min_samples_leaf * 10.),
mode='closest')
fig, ax = plt.subplots()
visualize_classic_binning.plot_binning(ax,
closest,
X_A,
log_c=False,
cmap='viridis')
fig.savefig('05_fact_example_original_binning_closest.png')
unmerged_model = model.LinearModel()
binned_g_A = classic_binning.digitize(X_A)
unmerged_model.initialize(digitized_obs=binned_g_A,
digitized_truth=binned_E_A)
binned_g_A = closest.digitize(X_A)
merged_model = model.LinearModel()
merged_model.initialize(digitized_obs=binned_g_A,
digitized_truth=binned_E_A)
single_obs_model = model.LinearModel()
max_e = np.max(X_A[:, 1]) + 1e-3
min_e = np.min(X_A[:, 1]) - 1e-3
binning_E_obs = np.linspace(min_e, max_e, 11)
binned_g_A = np.digitize(X_A[:, 1], binning_E_obs)
single_obs_model.initialize(digitized_obs=binned_g_A,
digitized_truth=binned_E_A)
single_obs_model_more_bins = model.LinearModel()
max_e = np.max(X_A[:, 1]) + 1e-3
min_e = np.min(X_A[:, 1]) - 1e-3
binning_E_obs = np.linspace(min_e, max_e, closest.n_bins + 1)
binned_g_A = np.digitize(X_A[:, 1], binning_E_obs)
single_obs_model_more_bins.initialize(digitized_obs=binned_g_A,
digitized_truth=binned_E_A)
fig, ax = plt.subplots(figsize=(2, 6))
visualize_model.plot_A(ax, merged_model)
fig.savefig('05_A_single_obs_model.png', dpi=300)
binned_g_A = tree_binning_2d.digitize(X_A)
tree_2d_model = model.LinearModel()
tree_2d_model.initialize(digitized_obs=binned_g_A,
digitized_truth=binned_E_A)
fig, ax = plt.subplots(figsize=(2, 6))
visualize_model.plot_A(ax, tree_2d_model)
fig.savefig('05_A_tree_model.png', dpi=300)
tree_obs = ["log10(E_RF)",
"log10(Size)",
"log10(ConcCore)",
"log10(numPixelInShower)",
"log10(Length)",
"Width",
"M3Trans",
"M3Long",
"m3l",
"m3t",
"Concentration_onePixel",
"Concentration_twoPixel",
"Leakage",
"Leakage2",
"concCOG",
"numIslands",
"phChargeShower_mean",
"phChargeShower_variance",
"phChargeShower_max"]
obs_array = df.get(tree_obs).values
X_tree_test = obs_array[idx_test]
X_tree_binning = obs_array[idx_binning]
X_tree_A = obs_array[idx_A]
tree_binning_uniform = binning.TreeBinningSklearn(
regression=False,
min_samples_leaf=int(min_samples_leaf * 10.),
random_state=random_state)
tree_binning_uniform.fit(X_tree_binning,
binned_E_binning,
uniform=True)
binned_g_A = tree_binning_uniform.digitize(X_tree_A)
tree_model_uniform = model.LinearModel()
tree_model_uniform.initialize(digitized_obs=binned_g_A,
digitized_truth=binned_E_A)
fig, ax = plt.subplots(figsize=(2, 6))
visualize_model.plot_A(ax, tree_model_uniform)
fig.savefig('05_A_tree_model_full_uniform.png', dpi=300)
tree_binning = binning.TreeBinningSklearn(
regression=False,
min_samples_leaf=int(min_samples_leaf * 10.),
random_state=random_state)
tree_binning.fit(X_tree_binning,
binned_E_binning,
uniform=False)
binned_g_A = tree_binning.digitize(X_tree_A)
tree_model = model.LinearModel()
tree_model.initialize(digitized_obs=binned_g_A,
digitized_truth=binned_E_A)
visualize_model.plot_A(ax, tree_model)
fig.savefig('05_A_tree_model_full.png', dpi=300)
fig, ax = plt.subplots()
svd_values = unmerged_model.evaluate_condition()
bin_edges = np.linspace(0, len(svd_values), len(svd_values) + 1)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.
ax.hist(bin_centers,
bins=bin_edges,
weights=svd_values,
histtype='step',
label='2 Observables (Unmerged; {} Bins)'.format(
classic_binning.n_bins))
svd_values = merged_model.evaluate_condition()
ax.hist(bin_centers,
bins=bin_edges,
weights=svd_values,
histtype='step',
label='2 Observables (Merged; {} Bins)'.format(closest.n_bins))
#
# svd_values = single_obs_model.evaluate_condition()
# ax.hist(bin_centers,
# bins=bin_edges,
# weights=svd_values,
# histtype='step',
# label='Single Observable ({} Bins)'.format(closest.n_bins))
#
# svd_values = single_obs_model_more_bins.evaluate_condition()
# ax.hist(bin_centers,
# bins=bin_edges,
# weights=svd_values,
# histtype='step',
# label='Single Observable ({} Bins)'.format(closest.n_bins))
svd_values = tree_2d_model.evaluate_condition()
ax.hist(bin_centers,
bins=bin_edges,
weights=svd_values,
histtype='step',
label='Tree Based 2D ({} Bins)'.format(tree_binning.n_bins))
svd_values = tree_model.evaluate_condition()
ax.hist(bin_centers,
bins=bin_edges,
weights=svd_values,
histtype='step',
label='Tree Based ({} Bins)'.format(tree_binning.n_bins))
#
svd_values = tree_model_uniform.evaluate_condition()
ax.hist(bin_centers,
bins=bin_edges,
ooweights=svd_values,
histtype='step',
label='Tree Based ({} Bins; Uniform)'.format(tree_binning.n_bins))
plt.legend(loc='lower left')
ax.set_yscale("log", nonposy='clip')
plt.savefig('05_condition.png')
exit()
binned_g_test = tree_binning.digitize(X_tree_test)
vec_g, vec_f = tree_model.generate_vectors(binned_g_test,
binned_E_test)
vec_f_truth = np.array(np.bincount(binned_E), dtype=float)[1:]
vec_acceptance = generate_acceptance_correction(vec_f_truth,
binning_E,
True)
vec_f_truth /= np.sum(vec_f_truth)
vec_f_truth *= n_samples_test
llh = solution.StandardLLH(tau=tau,
log_f=True,
vec_acceptance=vec_acceptance,
C='thikonov')
llh.initialize(vec_g=vec_g,
model=tree_model)
sol_gd = solution.LLHSolutionGradientDescent(n_steps=500,
gamma=0.01)
sol_gd.initialize(llh=llh, model=tree_model)
sol_gd.set_x0_and_bounds()
x, llh_values, gradient, hessian = sol_gd.fit()
idx_best = np.argmax(llh_values)
vec_f_str = ', '.join('{0:.2f}'.format(a)
for a in x[idx_best])
logging.info('Best Fit (Gradient):\t{}\t(LLH: {})'.format(
vec_f_str,
llh_values[idx_best]))
sol_mini = solution.LLHSolutionMinimizer()
sol_mini.initialize(llh=llh, model=tree_model)
sol_mini.set_x0_and_bounds(x0=x[idx_best])
best_fit, mini_cov = sol_mini.fit(constrain_N=False)
vec_f_str = ', '.join('{0:.2f}'.format(a)
for a in best_fit.x)
logging.info('Best Fit (Minimizer):\t{}\t(LLH: {})'.format(
vec_f_str,
best_fit.fun))
sol_mcmc = solution.LLHSolutionMCMC(n_burn_steps=100,
n_used_steps=n_steps_used,
n_walkers=n_walkers,
n_threads=1,
random_state=random_state)
sol_mcmc.initialize(llh=llh, model=tree_model)
sol_mcmc.set_x0_and_bounds(x0=best_fit.x)
vec_f_est_mcmc, sigma_vec_f, sample, probs = sol_mcmc.fit()
vec_f_str = ', '.join('{0:.2f}'.format(a)
for a in vec_f_est_mcmc)
logging.info('Best Fit (MCMC):\t{}\t(LLH: {})'.format(
vec_f_str,
max(probs)))
# sol_mcmc.n_threads = 9
# logging.info('Calculating Eff sample size:')
# n_eff = sol_mcmc.calc_effective_sample_size(sample, n_threads=9)
# n_eff_str = ', '.join(str(n) for n in n_eff)
# logging.info('per Walker:\t{} ({} Walker with {} steps)'.format(
# n_eff_str,
# n_walkers,
# n_steps_used))
def create_llh_slice(llh, best_fit, selected_bin=None):
if selected_bin is None:
selected_bin = np.argmax(best_fit)
points = np.linspace(0.9 * best_fit[selected_bin],
1.1 * best_fit[selected_bin],
31)
llh_values = np.zeros_like(points)
gradient_values = np.zeros_like(points)
hessian_values = np.zeros_like(points)
fig, [ax_grad, ax_hess] = plt.subplots(2, 1, figsize=(24, 18))
diff = np.diff(points)[0] / 1.5
for i, p_i in enumerate(points):
best_fit[selected_bin] = p_i
llh_values[i] = llh.evaluate_llh(best_fit)
gradient_values[i] = llh.evaluate_gradient(best_fit)[selected_bin]
hessian_values[i] = llh.evaluate_hessian(best_fit)[selected_bin,
selected_bin]
lower_x = p_i - diff
upper_x = p_i + diff
grad_lower_y = llh_values[i] - (diff * gradient_values[i])
grad_upper_y = llh_values[i] + (diff * gradient_values[i])
hess_lower_y = gradient_values[i] - (diff * hessian_values[i])
hess_upper_y = gradient_values[i] + (diff * hessian_values[i])
if gradient_values[i] < 0:
direction = -1.
else:
direction = 1.
ax_hess.plot([lower_x, upper_x],
[hess_lower_y, hess_upper_y],
'k-')
dy = gradient_values * diff
dx = np.ones_like(points) * diff
dx[gradient_values < 0] *= -1.
dy[gradient_values < 0] *= -1.
ax_grad.quiver(points, llh_values, dx, dy, angles='xy', scale_units='xy', scale=1.)
ax_grad.plot(best_fit[selected_bin], llh_values[selected_bin], 'ro')
ax_hess.plot(points, gradient_values, 'o')
fig.savefig('05_llh_scan.png')
plt.close(fig)
return selected_bin
logging.info('Creating plot of a LLH slice')
fig = plot_llh_slice(llh, best_fit.x)
fig.savefig('05_llh_slice.png')
logging.info('Creating corner plot')
corner_fig = corner.corner(sample,
truths=vec_f_est_mcmc,
truth_color='r',
rasterized=True)
corner_fig.savefig('05_corner_fact.png')
logging.info('Creating best fit plots')
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
bin_mids = (binning_E[1:] + binning_E[:-1]) / 2.
bin_width = (binning_E[1:] - binning_E[:-1]) / 2.
plt.hist(bin_mids, bins=binning_E, weights=vec_f_truth, histtype='step')
ax.errorbar(bin_mids,
best_fit.x,
yerr=np.sqrt(np.diag(np.absolute(mini_cov))),
xerr=bin_width,
ls="",
color="k",
label="Unfolding (Minimizer)")
ax.set_yscale("log", nonposy='clip')
ax.set_ylim([2e1, 2e3])
fig.savefig('05_unfolding_minimizer.png')
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
plt.hist(bin_mids, bins=binning_E, weights=vec_f_truth, histtype='step')
ax.errorbar(bin_mids,
vec_f_est_mcmc,
yerr=[vec_f_est_mcmc - sigma_vec_f[0, :],
sigma_vec_f[1, :] - vec_f_est_mcmc],
xerr=bin_width,
ls="",
color="r",
label="Unfolding (MCMC)")
ax.set_yscale("log", nonposy='clip')
ax.set_ylim([2e1, 2e3])
fig.savefig('05_unfolding_mcmc.png')
plt.close(fig)
logging.info('Creating LLH histogram')
fig, ax = plt.subplots(1, 1, figsize=(6, 4.5))
ax.hist(2*(np.max(probs) - probs),
bins=50,
weights=np.ones_like(probs) * 1./len(probs),
histtype='step', lw=2)
ax.set_yscale("log", nonposy='clip')
ax.set_xlabel(r'$-2\cdot\ln\left(\frac{\mathdefault{LLH}}{\mathdefault{LLH}_{\mathdefault{Best Fit}}}\right)$')
ax.set_ylabel(r'$\frac{\mathdefault{Bin}_i}{\sum_i \mathdefault{Bin}_i}$')
plt.tight_layout()
plt.savefig('05_hist_probs.png')
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import os
import re
from lib.ordered_dict import OrderedDict
LOGGER = logging.getLogger('dmprof')
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEFAULT_SORTERS = [
os.path.join(BASE_PATH, 'sorters', 'malloc.browser-module.json'),
os.path.join(BASE_PATH, 'sorters', 'malloc.renderer-module.json'),
os.path.join(BASE_PATH, 'sorters', 'malloc.type.json'),
os.path.join(BASE_PATH, 'sorters', 'malloc.WebCore.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.Android-specific.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.base.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.GPU.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.sharing.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.Skia.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.V8.json'),
]
DEFAULT_TEMPLATES = os.path.join(BASE_PATH, 'templates.json')
class Unit(object):
"""Represents a minimum unit of memory usage categorization.
It is supposed to be inherited for some different spaces like the entire
virtual memory and malloc arena. Such different spaces are called "worlds"
in dmprof. (For example, the "vm" world and the "malloc" world.)
"""
def __init__(self, unit_id, size):
self._unit_id = unit_id
self._size = size
@property
def unit_id(self):
return self._unit_id
@property
def size(self):
return self._size
class VMUnit(Unit):
"""Represents a Unit for a memory region on virtual memory."""
def __init__(self, unit_id, committed, reserved, mmap, region,
pageframe=None, group_pfn_counts=None):
super(VMUnit, self).__init__(unit_id, committed)
self._reserved = reserved
self._mmap = mmap
self._region = region
self._pageframe = pageframe
self._group_pfn_counts = group_pfn_counts
@property
def committed(self):
return self._size
@property
def reserved(self):
return self._reserved
@property
def mmap(self):
return self._mmap
@property
def region(self):
return self._region
@property
def pageframe(self):
return self._pageframe
@property
def group_pfn_counts(self):
return self._group_pfn_counts
class MMapUnit(VMUnit):
"""Represents a Unit for a mmap'ed region."""
def __init__(self, unit_id, committed, reserved, region, bucket_set,
pageframe=None, group_pfn_counts=None):
super(MMapUnit, self).__init__(unit_id, committed, reserved, True,
region, pageframe, group_pfn_counts)
self._bucket_set = bucket_set
def __repr__(self):
return str(self.region)
@property
def bucket_set(self):
return self._bucket_set
class UnhookedUnit(VMUnit):
"""Represents a Unit for a non-mmap'ed memory region on virtual memory."""
def __init__(self, unit_id, committed, reserved, region,
pageframe=None, group_pfn_counts=None):
super(UnhookedUnit, self).__init__(unit_id, committed, reserved, False,
region, pageframe, group_pfn_counts)
def __repr__(self):
return str(self.region)
class MallocUnit(Unit):
"""Represents a Unit for a malloc'ed memory block."""
def __init__(self, unit_id, size, alloc_count, free_count, bucket):
super(MallocUnit, self).__init__(unit_id, size)
self._bucket = bucket
self._alloc_count = alloc_count
self._free_count = free_count
def __repr__(self):
return str(self.bucket)
@property
def bucket(self):
return self._bucket
@property
def alloc_count(self):
return self._alloc_count
@property
def free_count(self):
return self._free_count
class UnitSet(object):
"""Represents an iterable set of Units."""
def __init__(self, world):
self._units = {}
self._world = world
def __repr__(self):
return str(self._units)
def __iter__(self):
for unit_id in sorted(self._units):
yield self._units[unit_id]
def append(self, unit, overwrite=False):
if not overwrite and unit.unit_id in self._units:
LOGGER.error('The unit id=%s already exists.' % str(unit.unit_id))
self._units[unit.unit_id] = unit
class AbstractRule(object):
"""An abstract class for rules to be matched with units."""
def __init__(self, dct):
self._name = dct['name']
self._hidden = dct.get('hidden', False)
self._subs = dct.get('subs', [])
def match(self, unit):
raise NotImplementedError()
@property
def name(self):
return self._name
@property
def hidden(self):
return self._hidden
def iter_subs(self):
for sub in self._subs:
yield sub
class VMRule(AbstractRule):
"""Represents a Rule to match with virtual memory regions."""
def __init__(self, dct):
super(VMRule, self).__init__(dct)
self._backtrace_function = dct.get('backtrace_function', None)
if self._backtrace_function:
self._backtrace_function = re.compile(self._backtrace_function)
self._backtrace_sourcefile = dct.get('backtrace_sourcefile', None)
if self._backtrace_sourcefile:
self._backtrace_sourcefile = re.compile(self._backtrace_sourcefile)
self._mmap = dct.get('mmap', None)
self._sharedwith = dct.get('sharedwith', [])
self._mapped_pathname = dct.get('mapped_pathname', None)
if self._mapped_pathname:
self._mapped_pathname = re.compile(self._mapped_pathname)
self._mapped_permission = dct.get('mapped_permission', None)
if self._mapped_permission:
self._mapped_permission = re.compile(self._mapped_permission)
def __repr__(self):
result = cStringIO.StringIO()
result.write('%s: ' % self._name)
attributes = []
attributes.append('mmap: %s' % self._mmap)
if self._backtrace_function:
attributes.append('backtrace_function: "%s"' %
self._backtrace_function.pattern)
if self._sharedwith:
attributes.append('sharedwith: "%s"' % self._sharedwith)
if self._mapped_pathname:
attributes.append('mapped_pathname: "%s"' % self._mapped_pathname.pattern)
if self._mapped_permission:
attributes.append('mapped_permission: "%s"' %
self._mapped_permission.pattern)
result.write('{ %s }' % ', '.join(attributes))
return result.getvalue()
def match(self, unit):
if unit.mmap:
assert unit.region[0] == 'hooked'
bucket = unit.bucket_set.get(unit.region[1]['bucket_id'])
assert bucket
assert bucket.allocator_type == 'mmap'
stackfunction = bucket.symbolized_joined_stackfunction
stacksourcefile = bucket.symbolized_joined_stacksourcefile
# TODO(dmikurube): Support shared memory.
sharedwith = None
if self._mmap == False: # (self._mmap == None) should go through.
return False
if (self._backtrace_function and
not self._backtrace_function.match(stackfunction)):
return False
if (self._backtrace_sourcefile and
not self._backtrace_sourcefile.match(stacksourcefile)):
return False
if (self._mapped_pathname and
not self._mapped_pathname.match(unit.region[1]['vma']['name'])):
return False
if (self._mapped_permission and
not self._mapped_permission.match(
unit.region[1]['vma']['readable'] +
unit.region[1]['vma']['writable'] +
unit.region[1]['vma']['executable'] +
unit.region[1]['vma']['private'])):
return False
if (self._sharedwith and
unit.pageframe and sharedwith not in self._sharedwith):
return False
return True
else:
assert unit.region[0] == 'unhooked'
# TODO(dmikurube): Support shared memory.
sharedwith = None
if self._mmap == True: # (self._mmap == None) should go through.
return False
if (self._mapped_pathname and
not self._mapped_pathname.match(unit.region[1]['vma']['name'])):
return False
if (self._mapped_permission and
not self._mapped_permission.match(
unit.region[1]['vma']['readable'] +
unit.region[1]['vma']['writable'] +
unit.region[1]['vma']['executable'] +
unit.region[1]['vma']['private'])):
return False
if (self._sharedwith and
unit.pageframe and sharedwith not in self._sharedwith):
return False
return True
class MallocRule(AbstractRule):
"""Represents a Rule to match with malloc'ed blocks."""
def __init__(self, dct):
super(MallocRule, self).__init__(dct)
self._backtrace_function = dct.get('backtrace_function', None)
if self._backtrace_function:
self._backtrace_function = re.compile(self._backtrace_function)
self._backtrace_sourcefile = dct.get('backtrace_sourcefile', None)
if self._backtrace_sourcefile:
self._backtrace_sourcefile = re.compile(self._backtrace_sourcefile)
self._typeinfo = dct.get('typeinfo', None)
if self._typeinfo:
self._typeinfo = re.compile(self._typeinfo)
def __repr__(self):
result = cStringIO.StringIO()
result.write('%s: ' % self._name)
attributes = []
if self._backtrace_function:
attributes.append('backtrace_function: "%s"' %
self._backtrace_function.pattern)
if self._typeinfo:
attributes.append('typeinfo: "%s"' % self._typeinfo.pattern)
result.write('{ %s }' % ', '.join(attributes))
return result.getvalue()
def match(self, unit):
assert unit.bucket.allocator_type == 'malloc'
stackfunction = unit.bucket.symbolized_joined_stackfunction
stacksourcefile = unit.bucket.symbolized_joined_stacksourcefile
typeinfo = unit.bucket.symbolized_typeinfo
if typeinfo.startswith('0x'):
typeinfo = unit.bucket.typeinfo_name
return ((not self._backtrace_function or
self._backtrace_function.match(stackfunction)) and
(not self._backtrace_sourcefile or
self._backtrace_sourcefile.match(stacksourcefile)) and
(not self._typeinfo or self._typeinfo.match(typeinfo)))
class AbstractSorter(object):
"""An abstract class for classifying Units with a set of Rules."""
def __init__(self, dct):
self._type = 'sorter'
self._version = dct['version']
self._world = dct['world']
self._name = dct['name']
self._root = dct.get('root', False)
self._order = dct['order']
self._rules = []
for rule in dct['rules']:
if dct['world'] == 'vm':
self._rules.append(VMRule(rule))
elif dct['world'] == 'malloc':
self._rules.append(MallocRule(rule))
else:
LOGGER.error('Unknown sorter world type')
def __repr__(self):
result = cStringIO.StringIO()
print >> result, '%s' % self._name
print >> result, 'world=%s' % self._world
print >> result, 'name=%s' % self._name
print >> result, 'order=%s' % self._order
print >> result, 'rules:'
for rule in self._rules:
print >> result, ' %s' % rule
return result.getvalue()
@staticmethod
def load(filename):
with open(filename) as sorter_f:
sorter_dict = json.load(sorter_f, object_pairs_hook=OrderedDict)
if sorter_dict['world'] == 'vm':
return VMSorter(sorter_dict)
elif sorter_dict['world'] == 'malloc':
return MallocSorter(sorter_dict)
else:
LOGGER.error('Unknown sorter world type')
return None
@property
def world(self):
return self._world
@property
def name(self):
return self._name
@property
def root(self):
return self._root
def iter_rule(self):
for rule in self._rules:
yield rule
def find(self, unit):
raise NotImplementedError()
def find_rule(self, name):
"""Finds a rule whose name is |name|. """
for rule in self._rules:
if rule.name == name:
return rule
return None
class VMSorter(AbstractSorter):
"""Represents a Sorter for memory regions on virtual memory."""
def __init__(self, dct):
assert dct['world'] == 'vm'
super(VMSorter, self).__init__(dct)
def find(self, unit):
for rule in self._rules:
if rule.match(unit):
return rule
return None
class MallocSorter(AbstractSorter):
"""Represents a Sorter for malloc'ed blocks."""
def __init__(self, dct):
assert dct['world'] == 'malloc'
super(MallocSorter, self).__init__(dct)
def find(self, unit):
if not unit.bucket:
return None
assert unit.bucket.allocator_type == 'malloc'
# TODO(dmikurube): Utilize component_cache again, or remove it.
for rule in self._rules:
if rule.match(unit):
return rule
return None
class SorterTemplates(object):
"""Represents a template for sorters."""
def __init__(self, dct):
self._dict = dct
def as_dict(self):
return self._dict
@staticmethod
def load(filename):
with open(filename) as templates_f:
templates_dict = json.load(templates_f, object_pairs_hook=OrderedDict)
return SorterTemplates(templates_dict)
class SorterSet(object):
"""Represents an iterable set of Sorters."""
def __init__(self, additional=None, default=None):
if not additional:
additional = []
if not default:
default = DEFAULT_SORTERS
self._sorters = {}
LOGGER.info('Loading sorters.')
for filename in default + additional:
LOGGER.info(' Loading a sorter "%s".' % filename)
sorter = AbstractSorter.load(filename)
if sorter.world not in self._sorters:
self._sorters[sorter.world] = []
self._sorters[sorter.world].append(sorter)
self._templates = SorterTemplates.load(DEFAULT_TEMPLATES)
def __repr__(self):
result = cStringIO.StringIO()
for world, sorters in self._sorters.iteritems():
for sorter in sorters:
print >> result, '%s: %s' % (world, sorter)
return result.getvalue()
def __iter__(self):
for sorters in self._sorters.itervalues():
for sorter in sorters:
yield sorter
def iter_world(self, world):
for sorter in self._sorters.get(world, []):
yield sorter
@property
def templates(self):
return self._templates
|
|
from __future__ import print_function, division
import math
import numpy as np
from bhc import bhc
class rbhc(object):
"""
An instance of Randomized Bayesian hierarchical clustering CRP
mixture model.
Attributes
----------
Notes
-----
The cost of rBHC scales as O(nlogn) and so should be preferred
for large data sets.
"""
def __init__(self, data, data_model, crp_alpha=1.0, sub_size=50,
verbose=False):
"""
Init a rbhc instance and perform the clustering.
Parameters
----------
data : numpy.ndarray (n, d)
Array of data where each row is a data point and each
column is a dimension.
data_model : CollapsibleDistribution
Provides the approprite ``log_marginal_likelihood``
function for the data.
crp_alpha : float (0, Inf)
CRP concentration parameter.
sub_size : int
The size of the random subset of pooints used to form the
tree whose top split is employed to filter the data.
Denoted m in the Heller & Ghahramani (2005b).
verbose : bool
If true various bits of information, possibly with
diagnostic uses, will be printed.
"""
self.data = data
self.data_model = data_model
self.crp_alpha = crp_alpha
self.sub_size = sub_size
self.verbose = verbose
self.nodes = {}
# initialize the tree
self.assignments = []
root_node = rbhc_Node(data, data_model, crp_alpha)
self.nodes[0] = {0: root_node}
# self.tree = rbhc_Node.recursive_split(root_node, 50)
self.recursive_split(root_node)
self.find_assignments()
self.refine_probs()
def recursive_split(self, parent_node):
rBHC_split, children = rbhc_Node.as_split(parent_node,
self.sub_size)
if self.verbose:
print("Parent node [{0}][{1}] ".format(
parent_node.node_level,
parent_node.level_index), end="")
if rBHC_split: # continue recussing down
if children[0].node_level not in self.nodes:
self.nodes[children[0].node_level] = {}
self.nodes[children[0].node_level][children[0].level_index] = (
children[0])
self.nodes[children[1].node_level][children[1].level_index] = (
children[1])
if self.verbose:
print("split to children:\n"
"\tnode [{0}][{1}], size : {2}\n"
"\tnode [{3}][{4}], size : {5}\n".format(
children[0].node_level,
children[0].level_index, children[0].nk,
children[1].node_level,
children[1].level_index, children[1].nk))
self.recursive_split(children[0])
self.recursive_split(children[1])
else: # terminate
if parent_node.tree_terminated and self.verbose:
print("terminated with bhc tree")
elif parent_node.truncation_terminated and self.verbose:
print("truncated")
def find_assignments(self):
""" find_assignements()
Find which Node each data point is assigned to on each
level.
This fills self.assignemnts - which is a list, with an
ndarray for each level. The array for each level gives
the level index of the nde it is associated with.
If a data point is not assigned to a node on a given
level it is given the value -1.
"""
self.assignments.append(np.zeros(self.data.shape[0]))
for level_key in self.nodes:
if level_key != 0:
self.assignments.append(
np.zeros(self.data.shape[0])-1)
for index_key in self.nodes[level_key]:
if index_key % 2 == 0:
parent_index = int(index_key/2)
write_indexes = (self.assignments[level_key-1]
== parent_index)
self.assignments[level_key][write_indexes] = (
parent_index*2+1
- self.nodes[level_key-1][parent_index].
left_allocate.astype(int))
def refine_probs(self):
""" refine_probs()
Improve the estimated probabilities used by working with
the full set of data allocated to each node, rather than
just the initial sub-set used to create/split nodes.
"""
# travel up from leaves improving log_rk etc.
for level_it in range(len(self.assignments)-1, -1, -1):
# print(level_it, self.nodes[level_it].keys())
for node_it in self.nodes[level_it]:
node = self.nodes[level_it][node_it]
if node.tree_terminated:
if node.nk > 1:
# log_rk, etc are accurate
node.log_dk = node.true_bhc.root_node.log_dk
node.log_pi = node.true_bhc.root_node.log_pi
node.logp = node.true_bhc.root_node.logp
node.log_ml = node.true_bhc.root_node.log_ml
node.log_rk = node.true_bhc.root_node.log_rk
else:
node.log_dk = self.crp_alpha
node.log_pi = 0.
node.logp = self.data_model.log_marginal_likelihood(
node.data)
node.log_ml = node.logp
node.log_rk = 0.
elif node.truncation_terminated:
node.log_dk = (math.log(self.crp_alpha)
+ math.lgamma(node.nk))
node.log_pi = 0.
node.logp = self.data_model.log_marginal_likelihood(
node.data)
node.log_ml = node.logp
node.log_rk = 0.
else:
left_child = self.nodes[level_it+1][node_it*2]
right_child = self.nodes[level_it+1][node_it*2+1]
node.log_dk = np.logaddexp(
math.log(self.crp_alpha)
+ math.lgamma(node.nk),
left_child.log_dk + right_child.log_dk)
node.log_pi = -math.log1p(math.exp(
left_child.log_dk
+ right_child.log_dk
- math.log(self.crp_alpha)
- math.lgamma(node.nk)))
neg_pi = math.log(-math.expm1(node.log_pi))
node.logp = self.data_model.log_marginal_likelihood(
node.data)
node.log_ml = np.logaddexp(node.log_pi+node.logp,
neg_pi + left_child.log_ml
+ right_child.log_ml)
node.log_rk = node.log_pi + node.logp - node.log_ml
# travel down from top improving
for level_it in range(1, len(self.assignments)):
for node_it in self.nodes[level_it]:
node = self.nodes[level_it][node_it]
parent_node = self.nodes[level_it-1][int(node_it/2)]
node.prev_wk = (parent_node.prev_wk
* (1-math.exp(parent_node.log_rk)))
def __str__(self):
bhc_str = ("==================================\n"
"rBHC fit to {0} data points, with "
"alpha={1} and sub_size={2} .\n".format(
self.data.shape[0], self.crp_alpha, self.sub_size))
for l_it in range(len(self.nodes)):
bhc_str += "===== LEVEL {0} =====\n".format(l_it)
for n_it in self.nodes[l_it]:
node = self.nodes[l_it][n_it]
bhc_str += ("node : {0} size : {1} "
"node_prob : {2:.5f} \n".format(
n_it, node.nk,
node.prev_wk*np.exp(node.log_rk)))
return bhc_str
def sample(self, size=1):
""" sample(size)
Sample from a fitted rBHC tree.
Parameters
----------
size : int
The number of samples to draw
"""
output = np.zeros((size, self.data.shape[1]))
for it in range(size):
sampled = False
node = self.nodes[0][0]
l_it = 0
n_it = 0
while not sampled:
if node.tree_terminated: # tree has BHC child at this node
if node.nk > 1:
output[it, :] = node.true_bhc.sample()
else:
output[it, :] = self.data_model.conditional_sample(
node.data)
sampled = True
elif node.truncation_terminated:
output[it, :] = self.data_model.conditional_sample(
node.data)
sampled = True
elif np.random.rand() < math.exp(node.log_rk):
# sample from node
output[it, :] = self.data_model.conditional_sample(
node.data)
sampled = True
else: # drop to next level
child_ratio = (self.nodes[l_it+1][n_it*2].nk
/ (self.nodes[l_it+1][n_it*2].nk
+ self.nodes[l_it+1][n_it*2+1].nk))
if np.random.rand() < child_ratio:
l_it += 1
n_it = n_it*2
else:
l_it += 1
n_it = n_it*2+1
node = self.nodes[l_it][n_it]
return output
class rbhc_Node(object):
""" A node in the randomised Bayesian hierarchical clustering.
Attributes
----------
nk : int
Number of data points assigned to the node
D : int
The dimension of the data points
data : numpy.ndarrary (n, d)
The data assigned to the Node. Each row is a datum.
data_model : idsteach.CollapsibleDistribution
The data model used to calcuate marginal likelihoods
crp_alpha : float
Chinese restaurant process concentration parameter
log_rk : float
The probability of the merged hypothesis for the node.
Given by eqn 3 of Heller & Ghahrimani (2005).
prev_wk : float
The product of the (1-r_k) factors for the nodes leading
to this node from (and including) the root node. Used in
eqn 9 of Heller & ghahramani (2005a).
node_level : int, optional
The level in the hierarchy at which the node is found.
The root node lives in level 0 and the level number
increases down the tree.
level_index : int, optional
An index that identifies each node within a level.
left_allocate : ndarray(bool)
An array that records if a datum has been allocated
to the left child (True) or the right(False).
log_dk : float
Cached probability variable. Do not define if the node is
a leaf.
log_pi : float
Cached probability variable. Do not define if the node is
a leaf.
log_ml : float
The log marginal likelihood for the tree of the node and
its children. This is given by eqn 2 of Heller &
Ghahrimani (2005). Note that this definition is
recursive. Do not define if the node is
a leaf.
logp : float
The log marginal likelihood for the particular cluster
represented by the node. Given by eqn 1 of Heller &
Ghahramani (2005).
"""
def __init__(self, data, data_model, crp_alpha=1.0, prev_wk=1.,
node_level=0, level_index=0):
""" __init__(data, data_model, crp_alpha=1.0)
Initialise a rBHC node.
Parameters
----------
data : numpy.ndarrary (n, d)
The data assigned to the Node. Each row is a datum.
data_model : idsteach.CollapsibleDistribution
The data model used to calcuate marginal likelihoods
crp_alpha : float, optional
Chinese restaurant process concentration parameter
prev_wk : float
The product of the (1-r_k) factors for the nodes
leading to this node from (and including) the root
node. Used in eqn 9 of Heller & ghahramani (2005a).
node_level : int, optional
The level in the hierarchy at which the node is found.
The root node lives in level 0 and the level number
increases down the tree.
level_index : int, optional
An index that identifies each node within a level.
"""
self.data = data
self.data_model = data_model
self.crp_alpha = crp_alpha
self.prev_wk = prev_wk
self.node_level = node_level
self.level_index = level_index
self.nk = data.shape[0]
self.D = data.shape[1]
self.log_rk = 0
self.tree_terminated = False
self.truncation_terminated = False
def set_rk(self, log_rk):
""" set_rk(log_rk)
Set the value of the ln(r_k) The probability of the
merged hypothesis as given in Eqn 3 of Heller & Ghahramani
(2005a)
Parameters
----------
log_rk : float
The value of log_rk for the node
"""
self.log_rk = log_rk
@classmethod
def as_split(cls, parent_node, sub_size):
""" as_split(parent_node, subsize)
Perform a splitting of a rBHC node into two children.
If the number of data points is large a randomized
filtered split, as in Fig 4 of Heller & Ghahramani (2005b)
is performed.
Otherwise, if the number of points is less than or equal
to subsize then these are simply subject to a bhc
clustering.
Parameters
----------
parent_node : rbhc_Node
The parent node that is going to be split
sub_size : int
The size of the random subset of pooints used to form
the tree whose top split is employed to filter the
data.
Denoted m in Heller & Ghahramani (2005b).
Returns
-------
rBHC_split : bool
True if the size of data is greater than sub_size and
so a rBHC split/filtering has occured.
False if the size of data is less than/equal to
sub_size and so an bhc clustering that includes all
the data has been found.
children : list(rbhc_Node) , bhc
A clustering of the data, either onto two child
rbhc_Nodes or as a full bhc tree of all the data
within parent_node.
left_allocate : ndarray(bool)
An array that records if a datum has been allocated
to the left child (True) or the right(False).
"""
if (parent_node.prev_wk*parent_node.nk) < 1E-3:
print("Truncating", parent_node.prev_wk, parent_node.nk,
parent_node.prev_wk*parent_node.nk)
rBHC_split = False
parent_node.truncation_terminated = True
children = []
# make subsample tree
if parent_node.nk > sub_size:
parent_node.subsample_bhc(sub_size)
# set log_rk from the estimate given by self.sub_bhc
parent_node.set_rk(parent_node.sub_bhc.root_node.log_rk)
elif parent_node.nk > 1:
parent_node.true_bhc = bhc(parent_node.data,
parent_node.data_model,
parent_node.crp_alpha)
parent_node.set_rk(parent_node.true_bhc.root_node.log_rk)
parent_node.tree_terminated = True
else:
parent_node.set_rk(0.)
parent_node.tree_terminated = True
else:
if parent_node.nk > sub_size: # do rBHC filter
# make subsample tree
parent_node.subsample_bhc(sub_size)
# set log_rk from the estimate given by self.sub_bhc
parent_node.set_rk(parent_node.sub_bhc.root_node.log_rk)
# filter data through top level of subsample_bhc
parent_node.filter_data()
# create new nodes
child_prev_wk = (parent_node.prev_wk
* (1-math.exp(parent_node.log_rk)))
child_level = parent_node.node_level+1
left_child = cls(parent_node.left_data,
parent_node.data_model,
parent_node.crp_alpha, child_prev_wk,
child_level, parent_node.level_index*2)
right_child = cls(parent_node.right_data,
parent_node.data_model,
parent_node.crp_alpha, child_prev_wk,
child_level, parent_node.level_index*2+1)
rBHC_split = True
children = [left_child, right_child]
elif parent_node.nk > 1: # just use the bhc tree
parent_node.true_bhc = bhc(parent_node.data,
parent_node.data_model,
parent_node.crp_alpha)
children = parent_node.true_bhc
rBHC_split = False
parent_node.tree_terminated = True
parent_node.set_rk(children.root_node.log_rk)
else: # only 1 datum
children = []
rBHC_split = False
parent_node.tree_terminated = True
parent_node.set_rk(0.)
print("\n", parent_node.node_level, parent_node.level_index,
parent_node.nk, parent_node.prev_wk,
math.exp(parent_node.log_rk), (1-math.exp(parent_node.log_rk)))
return (rBHC_split, children)
def subsample_bhc(self, sub_size):
""" subsample_bhc(sub_size)
Produce a subsample of sub_size data points and then
perform an bhc clustering on it.
Parameters
----------
sub_size : int
The size of the random subset of pooints used to form
the tree whose top split is employed to filter the
data.
Denoted m in Heller & Ghahramani (2005b).
"""
self.sub_indexes = np.random.choice(np.arange(self.nk),
sub_size, replace=False)
sub_data = self.data[self.sub_indexes]
self.sub_bhc = bhc(sub_data, self.data_model, self.crp_alpha)
def filter_data(self):
""" filter_data()
Filter the data in a rbhc_node onto the two Nodes at the
second from top layer of a bhc tree.
"""
# set up data arrays
self.left_data = np.empty(shape=(0, self.D))
self.right_data = np.empty(shape=(0, self.D))
# create assignemnt array
self.left_allocate = np.zeros(self.nk, dtype=bool)
# Run through data
for ind in np.arange(self.nk):
# check if in subset
if ind in self.sub_indexes:
sub_ind = np.argwhere(self.sub_indexes == ind)[0][0]
if self.sub_bhc.assignments[-2][sub_ind] == 0:
self.left_allocate[ind] = True
self.left_data = np.vstack((self.left_data,
self.data[ind]))
else:
self.right_data = np.vstack((self.right_data,
self.data[ind]))
# non subset data
else:
left_prob = (self.sub_bhc.root_node.left_child.log_pi
+ self.data_model.log_posterior_predictive(
self.data[ind],
self.sub_bhc.root_node.left_child.data))
right_prob = (self.sub_bhc.root_node.right_child.log_pi
+ self.data_model.log_posterior_predictive(
self.data[ind],
self.sub_bhc.root_node.right_child.data))
if left_prob >= right_prob:
# possibly change this to make tupe and vstack at
# end if cost is high
self.left_allocate[ind] = True
self.left_data = np.vstack((self.left_data,
self.data[ind]))
else:
self.right_data = np.vstack((self.right_data,
self.data[ind]))
print("split", np.sum(self.left_allocate), self.left_allocate.size)
|
|
#!/usr/bin/env python
#
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
import numpy as np
import unittest
import os
from meld import vault, comm
from meld.remd import master_runner, ladder, adaptor
from meld import system
from meld.test.helper import TempDirHelper
from meld.util import in_temp_dir
from meld.pdb_writer import PDBWriter
class DataStorePickleTestCase(unittest.TestCase):
"""
Test that we can read and write the items that are pickled into the Data directory.
"""
def setUp(self):
self.N_ATOMS = 500
self.N_REPLICAS = 4
def test_init_mode_w_creates_directories(self):
"calling initialize should create the Data and Data/Backup directories"
with in_temp_dir():
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
store.initialize(mode='w')
self.assertTrue(os.path.exists('Data'), 'Data directory does not created')
self.assertTrue(os.path.exists('Data/Backup'), 'Backup directory not created')
def test_init_mode_w_creates_results(self):
"calling initialize should create the results.h5 file"
with in_temp_dir():
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
store.initialize(mode='w')
self.assertTrue(os.path.exists('Data/Blocks/block_000000.nc'), 'results_000000.nc not created')
def test_init_mode_w_raises_when_dirs_exist(self):
"calling initialize should raise RuntimeError when Data and Data/Backup directories exist"
with in_temp_dir():
os.mkdir('Data')
os.mkdir('Data/Backup')
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
with self.assertRaises(RuntimeError):
store.initialize(mode='w')
def test_save_and_load_data_store(self):
"should be able to save and then reload the DataStore"
with in_temp_dir():
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
store.initialize(mode='w')
store.save_data_store()
store2 = vault.DataStore.load_data_store()
self.assertEqual(store.n_atoms, store2.n_atoms)
self.assertEqual(store.n_replicas, store2.n_replicas)
self.assertIsNone(store2._cdf_data_set)
self.assertTrue(os.path.exists('Data/data_store.dat'))
def test_save_and_load_communicator(self):
"should be able to save and reload the communicator"
with in_temp_dir():
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
store.initialize(mode='w')
c = comm.MPICommunicator(self.N_ATOMS, self.N_REPLICAS)
# set _mpi_comm to something
# this should not be saved
c._mpi_comm = 'foo'
store.save_communicator(c)
c2 = store.load_communicator()
self.assertEqual(c.n_atoms, c2.n_atoms)
self.assertEqual(c.n_replicas, c2.n_replicas)
self.assertIsNone(c2._mpi_comm, '_mpi_comm should not be saved')
self.assertTrue(os.path.exists('Data/communicator.dat'))
def test_save_and_load_remd_runner(self):
"should be able to save and reload an remd runner"
with in_temp_dir():
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
store.initialize(mode='w')
l = ladder.NearestNeighborLadder(n_trials=100)
policy = adaptor.AdaptationPolicy(1.0, 50, 100)
a = adaptor.EqualAcceptanceAdaptor(n_replicas=self.N_REPLICAS, adaptation_policy=policy)
runner = master_runner.MasterReplicaExchangeRunner(self.N_REPLICAS, max_steps=100, ladder=l, adaptor=a)
store.save_remd_runner(runner)
runner2 = store.load_remd_runner()
self.assertEqual(runner.n_replicas, runner2.n_replicas)
self.assertTrue(os.path.exists('Data/remd_runner.dat'))
def test_save_and_load_system(self):
"should be able to save and load a System"
with in_temp_dir():
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
store.initialize(mode='w')
fake_system = object()
store.save_system(fake_system)
store.load_system()
self.assertTrue(os.path.exists('Data/system.dat'))
def test_save_and_load_run_options(self):
"should be able to save and load run options"
with in_temp_dir():
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
store.initialize(mode='w')
fake_run_options = object()
store.save_run_options(fake_run_options)
store.load_run_options()
self.assertTrue(os.path.exists('Data/run_options.dat'))
class DataStoreHD5TestCase(unittest.TestCase, TempDirHelper):
"""
Test that we can read and write the data that goes in the hd5 file.
"""
def setUp(self):
self.setUpTempDir()
# setup data store
self.N_ATOMS = 500
self.N_REPLICAS = 16
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
self.store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer, block_size=10)
self.store.initialize(mode='w')
def tearDown(self):
self.tearDownTempDir()
def test_should_raise_stage_is_reduces(self):
"should raise if we try to write to a previous stage"
test_pos = np.zeros((self.N_REPLICAS, self.N_ATOMS, 3))
self.store.save_positions(test_pos, 0)
self.store.save_positions(test_pos, 1)
with self.assertRaises(RuntimeError):
self.store.save_positions(test_pos, 0)
def test_should_create_second_block(self):
"should create a second block once the first one fills up"
test_pos = np.zeros((self.N_REPLICAS, self.N_ATOMS, 3))
for i in range(11):
self.store.save_positions(test_pos, i)
self.assertTrue(os.path.exists('Data/Blocks/block_000000.nc'))
self.assertTrue(os.path.exists('Data/Blocks/block_000001.nc'))
def test_can_save_and_load_positions(self):
"should be able to save and load positions"
test_pos = np.zeros((self.N_REPLICAS, self.N_ATOMS, 3))
for i in range(self.N_REPLICAS):
test_pos[i, :, :] = i
STAGE = 0
self.store.save_positions(test_pos, STAGE)
self.store.save_data_store()
self.store.close()
store2 = vault.DataStore.load_data_store()
store2.initialize(mode='a')
test_pos2 = store2.load_positions(STAGE)
np.testing.assert_equal(test_pos, test_pos2)
def test_can_save_and_load_velocities(self):
"should be able to save and load velocities"
test_vel = np.zeros((self.N_REPLICAS, self.N_ATOMS, 3))
for i in range(self.N_REPLICAS):
test_vel[i, :, :] = i
STAGE = 0
self.store.save_velocities(test_vel, STAGE)
self.store.save_data_store()
self.store.close()
store2 = vault.DataStore.load_data_store()
store2.initialize(mode='a')
test_vel2 = store2.load_velocities(STAGE)
np.testing.assert_equal(test_vel, test_vel2)
def test_can_save_and_load_alphas(self):
"should be able to save and load lambdas"
test_lambdas = np.zeros(self.N_REPLICAS)
for i in range(self.N_REPLICAS):
test_lambdas[i] = i / (self.N_REPLICAS - 1)
STAGE = 0
self.store.save_alphas(test_lambdas, STAGE)
self.store.save_data_store()
self.store.close()
store2 = vault.DataStore.load_data_store()
store2.initialize(mode='a')
test_lambdas2 = store2.load_alphas(STAGE)
np.testing.assert_equal(test_lambdas, test_lambdas2)
def test_can_save_and_load_energies(self):
"should be able to save and load energies"
test_energies = np.zeros(self.N_REPLICAS)
for i in range(self.N_REPLICAS):
test_energies[i] = i
STAGE = 0
self.store.save_energies(test_energies, STAGE)
self.store.save_data_store()
self.store.close()
store2 = vault.DataStore.load_data_store()
store2.initialize(mode='a')
test_energies2 = store2.load_energies(STAGE)
np.testing.assert_equal(test_energies, test_energies2)
def test_can_save_and_load_states(self):
"should be able to save and load states"
def gen_state(index, n_atoms):
pos = index * np.ones((n_atoms, 3))
vel = index * np.ones((n_atoms, 3))
energy = index
lam = index / 100.
return system.SystemState(pos, vel, lam, energy)
states = [gen_state(i, self.N_ATOMS) for i in range(self.N_REPLICAS)]
STAGE = 0
self.store.save_states(states, STAGE)
self.store.save_data_store()
self.store.close()
store2 = vault.DataStore.load_data_store()
store2.initialize(mode='a')
states2 = store2.load_states(STAGE)
np.testing.assert_equal(states[-1].positions, states2[-1].positions)
def test_can_save_and_load_two_states(self):
"should be able to save and load states"
def gen_state(index, n_atoms):
pos = index * np.ones((n_atoms, 3))
vel = index * np.ones((n_atoms, 3))
energy = index
lam = index / 100.
return system.SystemState(pos, vel, lam, energy)
states = [gen_state(i, self.N_ATOMS) for i in range(self.N_REPLICAS)]
STAGE = 0
self.store.save_states(states, STAGE)
self.store.save_states(states, STAGE + 1)
self.store.save_data_store()
self.store.close()
store2 = vault.DataStore.load_data_store()
store2.initialize(mode='a')
states2 = store2.load_states(STAGE)
np.testing.assert_equal(states[-1].positions, states2[-1].positions)
def test_can_save_and_load_permutation_vector(self):
"should be able to save and load permutation vector"
test_vec = np.array(range(self.N_REPLICAS))
STAGE = 0
self.store.save_permutation_vector(test_vec, STAGE)
self.store.save_data_store()
self.store.close()
store2 = vault.DataStore.load_data_store()
store2.initialize(mode='a')
test_vec2 = store2.load_permutation_vector(STAGE)
np.testing.assert_equal(test_vec, test_vec2)
class DataStoreBackupTestCase(unittest.TestCase, TempDirHelper):
"""
Test that backup files are created/copied correctly.
"""
def setUp(self):
self.setUpTempDir()
self.N_ATOMS = 500
self.N_REPLICAS = 16
# setup objects to save to disk
c = comm.MPICommunicator(self.N_ATOMS, self.N_REPLICAS)
l = ladder.NearestNeighborLadder(n_trials=100)
policy = adaptor.AdaptationPolicy(1.0, 50, 100)
a = adaptor.EqualAcceptanceAdaptor(n_replicas=self.N_REPLICAS, adaptation_policy=policy)
# make some states
def gen_state(index, n_atoms):
pos = index * np.ones((n_atoms, 3))
vel = index * np.ones((n_atoms, 3))
energy = index
lam = index / 100.
return system.SystemState(pos, vel, lam, energy)
states = [gen_state(i, self.N_ATOMS) for i in range(self.N_REPLICAS)]
runner = master_runner.MasterReplicaExchangeRunner(self.N_REPLICAS, max_steps=100, ladder=l, adaptor=a)
pdb_writer = object() # dummy pdb writer; can't use a mock because they can't be pickled
self.store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, pdb_writer)
self.store.initialize(mode='w')
# save some stuff
self.store.save_data_store()
self.store.save_communicator(c)
self.store.save_remd_runner(runner)
self.store.save_states(states, stage=0)
def tearDown(self):
self.tearDownTempDir()
def test_backup_copies_comm(self):
"communicator.dat should be backed up"
self.store.backup(stage=0)
self.assertTrue(os.path.exists('Data/Backup/communicator.dat'))
def test_backup_copies_store(self):
"data_store.dat should be backed up"
self.store.backup(stage=0)
self.assertTrue(os.path.exists('Data/Backup/data_store.dat'))
def test_backup_copies_remd_runner(self):
"remd_runner.dat should be backed up"
self.store.backup(stage=0)
self.assertTrue(os.path.exists('Data/Backup/remd_runner.dat'))
class TestReadOnlyMode(unittest.TestCase, TempDirHelper):
def setUp(self):
self.setUpTempDir()
self.N_ATOMS = 500
self.N_REPLICAS = 16
# setup objects to save to disk
c = comm.MPICommunicator(self.N_ATOMS, self.N_REPLICAS)
l = ladder.NearestNeighborLadder(n_trials=100)
policy = adaptor.AdaptationPolicy(1.0, 50, 100)
a = adaptor.EqualAcceptanceAdaptor(n_replicas=self.N_REPLICAS, adaptation_policy=policy)
# make some states
def gen_state(index, n_atoms):
pos = index * np.ones((n_atoms, 3))
vel = index * np.ones((n_atoms, 3))
energy = index
lam = index / 100.
return system.SystemState(pos, vel, lam, energy)
runner = master_runner.MasterReplicaExchangeRunner(self.N_REPLICAS, max_steps=100, ladder=l, adaptor=a)
self.pdb_writer = object()
store = vault.DataStore(self.N_ATOMS, self.N_REPLICAS, self.pdb_writer, block_size=10)
store.initialize(mode='w')
# save some stuff
store.save_communicator(c)
store.save_remd_runner(runner)
store.save_system(object())
for index in range(100):
states = [gen_state(index, self.N_ATOMS) for i in range(self.N_REPLICAS)]
store.save_states(states, stage=index)
store.close()
store.save_data_store()
self.store = vault.DataStore.load_data_store()
self.store.initialize(mode='r')
def tearDown(self):
self.tearDownTempDir()
def test_saving_comm_should_raise(self):
with self.assertRaises(RuntimeError):
self.store.save_communicator(object())
def test_saving_remd_runner_should_raise(self):
with self.assertRaises(RuntimeError):
self.store.save_remd_runner(object())
def test_saving_system_should_raise(self):
with self.assertRaises(RuntimeError):
self.store.save_system(object())
def test_should_load_correct_states(self):
for i in range(90):
print i
states = self.store.load_states(stage=i)
self.assertAlmostEqual(states[0].positions[0, 0], i)
def test_load_all_positions_should_give_the_correct_positions(self):
positions = self.store.load_all_positions()
print positions.shape
self.assertEqual(positions.shape[0], self.N_REPLICAS)
self.assertEqual(positions.shape[1], self.N_ATOMS)
self.assertEqual(positions.shape[2], 3)
self.assertEqual(positions.shape[3], 90)
for i in range(90):
self.assertAlmostEqual(positions[0, 0, 0, i], i)
class TestPDBWriter(unittest.TestCase):
def setUp(self):
self.atom_numbers = [1000, 1001]
self.atom_names = ['ABCD', 'A2']
self.residue_numbers = [999, 1000]
self.residue_names = ['XYZ', 'R2']
self.coords = np.zeros((2, 3))
self.coords[0, :] = 1.0
self.coords[1, :] = 2.0
self.writer = PDBWriter(self.atom_numbers, self.atom_names, self.residue_numbers, self.residue_names)
def test_should_raise_with_wrong_number_of_atom_names(self):
with self.assertRaises(AssertionError):
PDBWriter(self.atom_numbers, ['CA'], self.residue_numbers, self.residue_names)
def test_should_raise_with_wrong_number_of_residue_numbers(self):
with self.assertRaises(AssertionError):
PDBWriter(self.atom_numbers, self.atom_names, [1], self.residue_names)
def test_should_raise_with_wrong_number_of_residue_names(self):
with self.assertRaises(AssertionError):
PDBWriter(self.atom_numbers, self.atom_names, self.residue_numbers, ['R1'])
def test_should_raise_with_bad_coordinate_size(self):
with self.assertRaises(AssertionError):
self.writer.get_pdb_string(np.zeros((3, 3)), 1)
def test_output_should_have_six_lines(self):
result = self.writer.get_pdb_string(self.coords, 1)
lines = result.splitlines()
self.assertEqual(len(lines), 6)
def test_header_should_have_correct_stage(self):
result = self.writer.get_pdb_string(self.coords, 1)
lines = result.splitlines()
self.assertIn('REMARK', lines[0])
self.assertIn('stage 1', lines[0])
def test_atom_line_should_have_correct_format(self):
result = self.writer.get_pdb_string(self.coords, 1)
lines = result.splitlines()
result = lines[1]
expected_result = 'ATOM 1000 ABCD XYZ 999 1.000 1.000 1.000'
print expected_result
print lines[1]
self.assertEqual(result, expected_result)
def test_other_atom_line_should_have_correct_format(self):
result = self.writer.get_pdb_string(self.coords, 1)
lines = result.splitlines()
result = lines[2]
expected_result = 'ATOM 1001 A2 R2 1000 2.000 2.000 2.000'
print expected_result
print lines[2]
self.assertEqual(result, expected_result)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StructuredParametersQueryRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'blueprint_name': 'str',
'cluster_name': 'str',
'account_name': 'str',
'storage_name': 'str',
'file_system_type': 'str',
'attached_cluster': 'bool'
}
attribute_map = {
'blueprint_name': 'blueprintName',
'cluster_name': 'clusterName',
'account_name': 'accountName',
'storage_name': 'storageName',
'file_system_type': 'fileSystemType',
'attached_cluster': 'attachedCluster'
}
def __init__(self, blueprint_name=None, cluster_name=None, account_name=None, storage_name=None, file_system_type=None, attached_cluster=False):
"""
StructuredParametersQueryRequest - a model defined in Swagger
"""
self._blueprint_name = None
self._cluster_name = None
self._account_name = None
self._storage_name = None
self._file_system_type = None
self._attached_cluster = None
self.blueprint_name = blueprint_name
self.cluster_name = cluster_name
if account_name is not None:
self.account_name = account_name
self.storage_name = storage_name
self.file_system_type = file_system_type
self.attached_cluster = attached_cluster
@property
def blueprint_name(self):
"""
Gets the blueprint_name of this StructuredParametersQueryRequest.
gathered from blueprintName field from the blueprint JSON
:return: The blueprint_name of this StructuredParametersQueryRequest.
:rtype: str
"""
return self._blueprint_name
@blueprint_name.setter
def blueprint_name(self, blueprint_name):
"""
Sets the blueprint_name of this StructuredParametersQueryRequest.
gathered from blueprintName field from the blueprint JSON
:param blueprint_name: The blueprint_name of this StructuredParametersQueryRequest.
:type: str
"""
if blueprint_name is None:
raise ValueError("Invalid value for `blueprint_name`, must not be `None`")
self._blueprint_name = blueprint_name
@property
def cluster_name(self):
"""
Gets the cluster_name of this StructuredParametersQueryRequest.
name of the stack
:return: The cluster_name of this StructuredParametersQueryRequest.
:rtype: str
"""
return self._cluster_name
@cluster_name.setter
def cluster_name(self, cluster_name):
"""
Sets the cluster_name of this StructuredParametersQueryRequest.
name of the stack
:param cluster_name: The cluster_name of this StructuredParametersQueryRequest.
:type: str
"""
if cluster_name is None:
raise ValueError("Invalid value for `cluster_name`, must not be `None`")
self._cluster_name = cluster_name
@property
def account_name(self):
"""
Gets the account_name of this StructuredParametersQueryRequest.
Account name of the path
:return: The account_name of this StructuredParametersQueryRequest.
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""
Sets the account_name of this StructuredParametersQueryRequest.
Account name of the path
:param account_name: The account_name of this StructuredParametersQueryRequest.
:type: str
"""
self._account_name = account_name
@property
def storage_name(self):
"""
Gets the storage_name of this StructuredParametersQueryRequest.
Storage name of the path
:return: The storage_name of this StructuredParametersQueryRequest.
:rtype: str
"""
return self._storage_name
@storage_name.setter
def storage_name(self, storage_name):
"""
Sets the storage_name of this StructuredParametersQueryRequest.
Storage name of the path
:param storage_name: The storage_name of this StructuredParametersQueryRequest.
:type: str
"""
if storage_name is None:
raise ValueError("Invalid value for `storage_name`, must not be `None`")
self._storage_name = storage_name
@property
def file_system_type(self):
"""
Gets the file_system_type of this StructuredParametersQueryRequest.
Type of filesystem
:return: The file_system_type of this StructuredParametersQueryRequest.
:rtype: str
"""
return self._file_system_type
@file_system_type.setter
def file_system_type(self, file_system_type):
"""
Sets the file_system_type of this StructuredParametersQueryRequest.
Type of filesystem
:param file_system_type: The file_system_type of this StructuredParametersQueryRequest.
:type: str
"""
if file_system_type is None:
raise ValueError("Invalid value for `file_system_type`, must not be `None`")
self._file_system_type = file_system_type
@property
def attached_cluster(self):
"""
Gets the attached_cluster of this StructuredParametersQueryRequest.
Attached cluster
:return: The attached_cluster of this StructuredParametersQueryRequest.
:rtype: bool
"""
return self._attached_cluster
@attached_cluster.setter
def attached_cluster(self, attached_cluster):
"""
Sets the attached_cluster of this StructuredParametersQueryRequest.
Attached cluster
:param attached_cluster: The attached_cluster of this StructuredParametersQueryRequest.
:type: bool
"""
if attached_cluster is None:
raise ValueError("Invalid value for `attached_cluster`, must not be `None`")
self._attached_cluster = attached_cluster
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StructuredParametersQueryRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 The SymbiFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""
sphinx_hdl_diagrams
~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import re
import codecs
import posixpath
import subprocess
import sys
from os import path
from docutils import statemachine, nodes, io, utils
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
import sphinx
from sphinx.directives.code import LiteralInclude
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.i18n import search_image_for_language
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
try:
from .version import __version__
except ImportError:
__version__ = "0.0.dev0"
logger = logging.getLogger(__name__)
class HDLDiagramError(SphinxError):
category = 'HDLDiagram error'
class hdl_diagram(nodes.General, nodes.Inline, nodes.Element):
'''Base class for hdl_diagram node'''
pass
def figure_wrapper(directive, node, caption):
# type: (Directive, nodes.Node, unicode) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
parsed = nodes.Element()
directive.state.nested_parse(
ViewList([caption], source=''), directive.content_offset, parsed)
caption_node = nodes.caption(
parsed[0].rawsource, '', *parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node += caption_node
return figure_node
def align_spec(argument):
# type: (Any) -> bool
return directives.choice(argument, ('left', 'center', 'right'))
def hdl_diagram_name(srcpath, srclineno, hdl_path):
srcdir, srcfile = path.split(srcpath)
srcbase, srcext = path.splitext(srcfile)
hdl_path = path.normpath(path.join(srcdir, hdl_path))
hdl_path = utils.relative_path(None, hdl_path)
hdl_path = nodes.reprunicode(hdl_path)
hdl_path_segments = [hdl_path]
while hdl_path_segments[0]:
a, b = path.split(hdl_path_segments[0])
hdl_path_segments[0:1] = [a, b]
hdl_file, hdl_ext = path.splitext(hdl_path_segments[-1])
return '-'.join(
[srcbase, str(srclineno)] +
hdl_path_segments[1:-1] +
[hdl_file],
)
class NoLicenseInclude(LiteralInclude):
def run(self):
# type: () -> List[nodes.Node]
rel_filename, filename = self.env.relfn2path(self.arguments[0])
code = open(filename, 'r').read().strip().split('\n')
first_line = next(
(idx for idx, line in enumerate(code) if 'SPDX' in line), 1)
if first_line > 1:
first_line += 3 if code[first_line][1] == '*' else 2
last_line = len(code)
while len(code[first_line - 1]) == 0:
first_line += 1
self.options['lines'] = '{}-{}'.format(first_line, last_line)
self.options['lineno-start'] = first_line
try:
return LiteralInclude.run(self)
except Exception as exc:
return [document.reporter.warning(exc, line=self.lineno)]
class HDLDiagram(Directive):
"""
Directive to insert diagram generated from HDL code.
"""
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'type': str,
'module': str,
'flatten': bool,
'skin': str,
'yosys_script': str,
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
}
global_variable_options = {
"hdl_diagram_output_format": ["svg", "png"],
"hdl_diagram_skin": ["default"], # or path
"hdl_diagram_yosys_script": ["default"], # or path
"hdl_diagram_yosys": ["yowasp", "system"] # or path
}
def run(self):
# type: () -> List[nodes.Node]
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
print("hdl-diagram", self)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
if self.arguments:
hdl_file = self.arguments[0]
outname = hdl_diagram_name(
*self.state_machine.get_source_and_line(), hdl_file)
# self.state.document.settings.record_dependencies.add(hdl_path)
env = self.state.document.settings.env
argument = search_image_for_language(hdl_file, env)
rel_filename, filename = env.relfn2path(hdl_file)
env.note_dependency(rel_filename)
else:
assert False, "TODO!"
# TODO: ????
hdl_diagram_code = '\n'.join(self.content)
node = hdl_diagram()
node['code'] = filename
node['options'] = {}
node['options']['outname'] = outname
node['options']['flatten'] = 'flatten' in self.options
node['options']['module'] = self.options.get('module', 'top')
node['options']['type'] = self.options.get('type', 'netlistsvg')
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
yosys_script = self.options.get('yosys_script', None)
if yosys_script not in [None, 'default']:
_, yosys_script_filename = env.relfn2path(yosys_script)
if not path.exists(yosys_script_filename):
raise HDLDiagramError("Yosys script {} does not exist!".format(yosys_script_filename))
else:
node['options']['yosys_script'] = yosys_script_filename
else:
node['options']['yosys_script'] = yosys_script
skin = self.options.get('skin', None)
if skin not in [None, 'default']:
_, skin_filename = env.relfn2path(skin)
if not os.path.exists(skin_filename):
raise HDLDiagramError("Skin file {} does not exist!".format(skin_filename))
else:
node['options']['skin'] = skin_filename
else:
node['options']['skin'] = skin
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
def run_yosys(src, cmd, yosys='yowasp'):
if yosys == 'yowasp':
import yowasp_yosys
ycmd = ["-q", "-p", "{}".format(cmd), src]
print("Running YoWASP yosys: {}".format(ycmd))
yowasp_yosys.run_yosys(ycmd)
elif yosys == 'system':
ycmd = "yosys -p '{cmd}' {src}".format(src=src, cmd=cmd)
print("Running yosys: {}".format(ycmd))
subprocess.check_output(ycmd, shell=True)
else:
ycmd = "{yosys} -p '{cmd}' {src}".format(yosys=yosys, src=src, cmd=cmd)
print("Running yosys: {}".format(ycmd))
subprocess.check_output(ycmd, shell=True)
def diagram_yosys(ipath, opath, module='top', flatten=False,
yosys_script='default', yosys='yowasp'):
# Assertions
assert path.exists(ipath), 'Input file missing: {}'.format(ipath)
assert not path.exists(opath), 'Output file exists: {}'.format(opath)
yosys_options = HDLDiagram.global_variable_options["hdl_diagram_yosys"]
assert yosys in yosys_options or os.path.exists(yosys), "Invalid hdl_diagram_yosys value!"
if yosys_script != 'default':
assert path.exists(yosys_script), 'Yosys script file missing: {}'.format(yosys_script)
oprefix, oext = path.splitext(opath)
assert oext.startswith('.'), oext
# Diagram generation
oext = oext[1:]
if flatten:
flatten = '-flatten'
else:
flatten = ''
if yosys_script == 'default':
yosys_script_cmd = ""
else:
yosys_script_cmd = "script {}".format(yosys_script)
yosys_cmd = "prep -top {top} {flatten}; cd {top}; {script}; show -format {fmt} -prefix {oprefix}".format(
top=module,
flatten=flatten,
fmt=oext,
oprefix=oprefix,
script=yosys_script_cmd
).strip()
run_yosys(ipath, yosys_cmd, yosys)
if yosys == 'yowasp':
# somehow yowasp_yosys fails to execute `dot` to convert the dot file to svg,
# which works on native yosys, perhaps a limitation with wasm
svgdata = subprocess.check_output(["dot", "-Tsvg", "{}.dot".format(oprefix)])
with open("{}.svg".format(oprefix), "wb") as img:
img.write(svgdata)
assert path.exists(opath), 'Output file {} was not created!'.format(opath)
print('Output file created: {}'.format(opath))
def run_netlistsvg(ipath, opath, skin='default'):
assert path.exists(ipath), 'Input file missing: {}'.format(ipath)
assert not path.exists(opath), 'Output file exists: {}'.format(opath)
if skin != 'default':
assert path.exists(skin), 'Skin file missing: {}'.format(skin)
netlistsvg_cmd = "netlistsvg {ipath} -o {opath}".format(ipath=ipath, opath=opath)
if skin != 'default':
netlistsvg_cmd += " --skin {skin}".format(skin=skin)
print("Running netlistsvg:", netlistsvg_cmd)
subprocess.check_output(netlistsvg_cmd, shell=True)
assert path.exists(opath), 'Output file {} was not created!'.format(opath)
print('netlistsvg - Output file created: {}'.format(opath))
def diagram_netlistsvg(ipath, opath, module='top', flatten=False,
yosys_script='default', skin='default', yosys='yowasp'):
# Assertions
assert path.exists(ipath), 'Input file missing: {}'.format(ipath)
assert not path.exists(opath), 'Output file exists: {}'.format(opath)
yosys_options = HDLDiagram.global_variable_options["hdl_diagram_yosys"]
assert yosys in yosys_options or os.path.exists(yosys), "Invalid hdl_diagram_yosys value!"
if yosys_script != 'default':
assert path.exists(yosys_script), 'Yosys script file missing: {}'.format(yosys_script)
if skin != 'default':
assert path.exists(skin), 'Skin file missing: {}'.format(skin)
oprefix, oext = path.splitext(opath)
assert oext.startswith('.'), oext
# Diagram generation
oext = oext[1:]
if flatten:
flatten = '-flatten'
else:
flatten = ''
if yosys_script == 'default':
yosys_script_cmd = ""
else:
yosys_script_cmd = "script {}".format(yosys_script)
ojson = oprefix + '.json'
if path.exists(ojson):
os.remove(ojson)
yosys_cmd = """prep -top {top} {flatten}; cd {top}; {script}; write_json {compat} {ojson}""".format(
top=module,
flatten=flatten,
ojson=ojson,
script=yosys_script_cmd,
compat="-compat-int" if yosys == 'yowasp' else ""
).strip()
run_yosys(ipath, yosys_cmd, yosys)
assert path.exists(ojson), 'Output file {} was not created!'.format(ojson)
run_netlistsvg(ojson, opath, skin)
print('netlistsvg - Output file created: {}'.format(ojson))
def nmigen_to_rtlil(fname, oname):
assert os.path.exists(fname)
output_dir = os.path.dirname(oname)
os.makedirs(output_dir, exist_ok=True)
cmd = "{python} {script} > {output}".format(python=sys.executable, script=fname, output=oname)
subprocess.run(cmd, shell=True, check=True)
def render_diagram(self, code, options, format, skin, yosys_script):
# type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
"""Render hdl code into a PNG or SVG output file."""
source_path = code
source_fn, source_ext = os.path.splitext(source_path)
fname = '%s.%s' % (options['outname'], format)
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)
if source_ext == '.py':
module = 'top'
ilfn = path.join(self.builder.outdir, self.builder.imagedir, options['outname'] + '.il')
nmigen_to_rtlil(source_path, ilfn)
source_path = ilfn
elif source_ext == '.il' or source_ext == '.v':
module = options['module']
else:
raise HDLDiagramError("hdl_diagram_code file extension must be one of '.v', "
"'.il', or '.py', but is %r" % source_ext)
if path.isfile(outfn):
print('Exiting file:', outfn)
return relfn, outfn
ensuredir(path.dirname(outfn))
yosys_script = options['yosys_script'] if options['yosys_script'] is not None else yosys_script
skin = options['skin'] if options['skin'] is not None else skin
yosys = self.builder.config.hdl_diagram_yosys
yosys_options = HDLDiagram.global_variable_options["hdl_diagram_yosys"]
if yosys not in yosys_options and not os.path.exists(yosys):
raise HDLDiagramError("Yosys not found!")
else:
yosys = yosys if yosys in yosys_options else os.path.realpath(yosys)
diagram_type = options['type']
if diagram_type.startswith('yosys'):
assert diagram_type.startswith('yosys-'), diagram_type
diagram_yosys(
source_path,
outfn,
module=options['module'],
flatten=options['flatten'],
yosys_script=yosys_script,
yosys=yosys)
elif diagram_type == 'netlistsvg':
diagram_netlistsvg(
source_path,
outfn,
module=options['module'],
flatten=options['flatten'],
skin=skin,
yosys=yosys)
else:
raise Exception('Invalid diagram type "%s"' % diagram_type)
# raise self.severe(\n' %
# (SafeString(diagram_type),))
return relfn, outfn
def render_diagram_html(
self, node, code, options, imgcls=None, alt=None):
# type: (nodes.NodeVisitor, hdl_diagram, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
yosys_script = self.builder.config.hdl_diagram_yosys_script
if yosys_script != 'default' and not path.exists(yosys_script):
raise HDLDiagramError("Yosys script file {} does not exist! Change hdl_diagram_yosys_script variable".format(yosys_script))
skin = self.builder.config.hdl_diagram_skin
if skin != 'default' and not path.exists(skin):
raise HDLDiagramError("Skin file {} does not exist! Change hdl_diagram_skin variable".format(skin))
format = self.builder.config.hdl_diagram_output_format
try:
if format not in ('png', 'svg'):
raise HDLDiagramError("hdl_diagram_output_format must be one of 'png', "
"'svg', but is %r" % format)
fname, outfn = render_diagram(self, code, options, format, skin, yosys_script)
except HDLDiagramError as exc:
logger.warning('hdl_diagram code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
imgcss = imgcls and 'class="%s"' % imgcls or ''
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
self.body.append('<img src="%s" alt="%s" %s/>\n' %
(fname, alt, imgcss))
if 'align' in node:
self.body.append('</div>\n')
raise nodes.SkipNode
def html_visit_hdl_diagram(self, node):
# type: (nodes.NodeVisitor, hdl_diagram) -> None
render_diagram_html(self, node, node['code'], node['options'])
def render_diagram_latex(self, node, code, options):
# type: (nodes.NodeVisitor, hdl_diagram, unicode, Dict, unicode) -> None
try:
fname, outfn = render_diagram(self, code, options, 'pdf')
except HDLDiagramError as exc:
logger.warning('hdl_diagram code %r: ' % code + str(exc))
raise nodes.SkipNode
is_inline = self.is_inline(node)
if is_inline:
para_separator = ''
else:
para_separator = '\n'
if fname is not None:
post = None # type: unicode
if not is_inline and 'align' in node:
if node['align'] == 'left':
self.body.append('{')
post = '\\hspace*{\\fill}}'
elif node['align'] == 'right':
self.body.append('{\\hspace*{\\fill}')
post = '}'
self.body.append('%s\\includegraphics{%s}%s' %
(para_separator, fname, para_separator))
if post:
self.body.append(post)
raise nodes.SkipNode
def latex_visit_hdl_diagram(self, node):
# type: (nodes.NodeVisitor, hdl_diagram) -> None
render_diagram_latex(self, node, node['code'], node['options'])
def render_diagram_texinfo(self, node, code, options):
# type: (nodes.NodeVisitor, hdl_diagram, unicode, Dict, unicode) -> None
try:
fname, outfn = render_diagram(self, code, options, 'png')
except HDLDiagramError as exc:
logger.warning('hdl_diagram code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[hdl_diagram],png}\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_hdl_diagram(self, node):
# type: (nodes.NodeVisitor, hdl_diagram) -> None
render_diagram_texinfo(self, node, node['code'], node['options'])
def text_visit_hdl_diagram(self, node):
# type: (nodes.NodeVisitor, hdl_diagram) -> None
if 'alt' in node.attributes:
self.add_text(_('[diagram: %s]') % node['alt'])
else:
self.add_text(_('[diagram]'))
raise nodes.SkipNode
def man_visit_hdl_diagram(self, node):
# type: (nodes.NodeVisitor, hdl_diagram) -> None
if 'alt' in node.attributes:
self.body.append(_('[diagram: %s]') % node['alt'])
else:
self.body.append(_('[diagram]'))
raise nodes.SkipNode
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_node(hdl_diagram,
html=(html_visit_hdl_diagram, None),
latex=(latex_visit_hdl_diagram, None),
texinfo=(texinfo_visit_hdl_diagram, None),
text=(text_visit_hdl_diagram, None),
man=(man_visit_hdl_diagram, None))
app.add_directive('hdl-diagram', HDLDiagram)
app.add_directive('verilog-diagram', HDLDiagram)
app.add_directive('no-license', NoLicenseInclude)
app.add_config_value('hdl_diagram_output_format', 'svg', 'html')
app.add_config_value('hdl_diagram_skin', 'default', 'html')
app.add_config_value('hdl_diagram_yosys_script', 'default', 'html')
app.add_config_value('hdl_diagram_yosys', 'yowasp', 'html')
return {'version': '1.0', 'parallel_read_safe': True}
|
|
import logging
import os
import pysam
import sys
from svviz import commandline
from svviz import disambiguate
from svviz import debug
from svviz import datahub
from svviz import dotplots
from svviz import export
from svviz import flanking
from svviz import insertsizes
from svviz import remap
from svviz import summarystats
from svviz import track
from svviz import utilities
from svviz import variants
from svviz import vcf
from svviz import web
def checkRequirements(args):
if not remap.check_swalign():
print "ERROR: check that svviz is correctly installed -- the 'ssw' Smith-Waterman alignment module does not appear to be functional"
sys.exit(1)
if args.export and (args.export.lower().endswith("pdf") or args.export.lower().endswith("png")):
if not export.canConvertSVGToPDF():
print "ERROR: librsvg needs to be installed in order to export to pdf and png format."
sys.exit(1)
def loadISDs(dataHub):
""" Load the Insert Size Distributions """
for sample in dataHub:
logging.info(" > {} <".format(sample.name))
sample.readStatistics = insertsizes.ReadStatistics(sample.bam, keepReads=dataHub.args.save_reads)
if sample.readStatistics.orientations != "any":
if len(sample.readStatistics.orientations) > 1:
logging.warn(" ! multiple read pair orientations found within factor !\n"
" ! of 2x of one another; if you aren't expecting your !\n"
" ! input data to contain multiple orientations, this !\n"
" ! could be a bug in the mapping software or svviz !")
if len(sample.readStatistics.orientations) < 1:
logging.error(" No valid read orientations found for dataset:{}".format(sample.name))
sample.orientations = sample.readStatistics.orientations
if sample.orientations == "any":
sample.singleEnded = True
logging.info(" valid orientations: {}".format(",".join(sample.orientations) if sample.orientations!="any" else "any"))
if sample.orientations == "any":
searchDist = sample.readStatistics.readLengthUpperQuantile()
alignDist = sample.readStatistics.readLengthUpperQuantile()*1.25 + dataHub.args.context
else:
searchDist = sample.readStatistics.meanInsertSize()+sample.readStatistics.stddevInsertSize()*2
alignDist = sample.readStatistics.meanInsertSize()+sample.readStatistics.stddevInsertSize()*4 + dataHub.args.context
if dataHub.args.flanks:
searchDist += dataHub.args.context
sample.searchDistance = int(searchDist)
dataHub.alignDistance = max(dataHub.alignDistance, int(alignDist))
logging.info(" Using search distance: {}".format(sample.searchDistance))
logging.info(" Using align distance: {}".format(dataHub.alignDistance))
def loadReads(dataHub):
readCount = 0
for sample in dataHub:
logging.info(" - {}".format(sample.name))
sample.reads = remap.getReads(dataHub.variant, sample.bam, dataHub.args.min_mapq, dataHub.args.pair_min_mapq,
sample.searchDistance, sample.singleEnded, dataHub.args.include_supplementary)
readCount += len(sample.reads)
return readCount
def setSampleParams(dataHub):
for sample in dataHub:
sample.minMapq = dataHub.args.min_mapq
if sample.singleEnded:
sample.orientations = "any"
def runRemap(dataHub):
for sample in dataHub:
sample.alnCollections = remap.do_realign(dataHub.variant, sample.reads, dataHub.args.processes)
def runDisambiguation(dataHub):
flankingRegionCollection = flanking.FlankingRegionCollection(dataHub.variant)
for sample in dataHub:
disambiguate.batchDisambiguate(sample.alnCollections, sample.readStatistics, sample.orientations,
singleEnded=sample.singleEnded, flankingRegionCollection=flankingRegionCollection,
maxMultimappingSimilarity=dataHub.args.max_multimapping_similarity)
return disambiguate.checkMultimapping(dataHub)
def renderSamples(dataHub):
for sample in dataHub:
flankingReads = {"ref":[], "alt":[]}
if dataHub.args.flanks:
flankingReads["ref"] = [alnCollection.sets["ref"] for alnCollection in sample.alnCollections if alnCollection.why=="flanking"]
flankingReads["alt"] = [alnCollection.sets["alt"] for alnCollection in sample.alnCollections if alnCollection.why=="flanking"]
ref_track = track.Track(dataHub.variant.chromParts("ref"), sample.chosenSets("ref")+flankingReads["ref"], 3000, 4000,
variant=dataHub.variant, allele="ref", thickerLines=dataHub.args.thicker_lines, colorCigar=(not dataHub.args.skip_cigar))
sample.tracks["ref"] = ref_track
alt_track = track.Track(dataHub.variant.chromParts("alt"), sample.chosenSets("alt")+flankingReads["alt"], 5000, 15000,
variant=dataHub.variant, allele="alt", thickerLines=dataHub.args.thicker_lines, colorCigar=(not dataHub.args.skip_cigar))
sample.tracks["alt"] = alt_track
amb_track = track.Track(dataHub.variant.chromParts("ref"), sample.chosenSets("amb"), 4000, 10000,
variant=dataHub.variant, allele="amb", thickerLines=dataHub.args.thicker_lines, colorCigar=(not dataHub.args.skip_cigar))
sample.tracks["amb"] = amb_track
def renderAxesAndAnnotations(dataHub):
for allele in ["alt", "ref", "amb"]:
# TODO: store width somewhere better
t = dataHub.samples.values()[0].tracks[allele]
for name, annotationSet in dataHub.annotationSets.iteritems():
dataHub.alleleTracks[allele][name] = track.AnnotationTrack(annotationSet, t.scale, dataHub.variant, allele)
axis = track.Axis(t.scale, dataHub.variant, allele)
dataHub.alleleTracks[allele]["axis"] = axis
def ensureExportData(dataHub):
if dataHub.trackCompositor is None:
dataHub.trackCompositor = export.TrackCompositor(dataHub)
def runDirectExport(dataHub):
if dataHub.args.export:
logging.info("* Exporting views *")
ensureExportData(dataHub)
if dataHub.args.type == "batch" or dataHub.args.format is not None:
exportFormat = dataHub.args.format
if exportFormat is None:
exportFormat = "pdf"
if not os.path.exists(dataHub.args.export):
os.makedirs(dataHub.args.export)
path = os.path.join(dataHub.args.export, "{}.{}".format(dataHub.variant.shortName(), exportFormat))
else:
exportFormat = dataHub.args.export.split(".")[-1]
path = dataHub.args.export
exportData = dataHub.trackCompositor.render()
if exportFormat.lower() != "svg":
exportData = export.convertSVG(exportData, exportFormat)
outf = open(path, "w")
outf.write(exportData)
outf.close()
if dataHub.args.open_exported:
utilities.launchFile(dataHub.args.export)
def runWebView(dataHub):
if not dataHub.args.no_web:
## TODO: only prepare export SVG when needed
ensureExportData(dataHub)
plotInsertSizeDistributions(dataHub)
web.dataHub = dataHub
web.run(dataHub.args.port)
def plotInsertSizeDistributions(dataHub):
# TODO: show only for samples with insert size distributions (ie paired end)
if all(sample.readStatistics.hasInsertSizeDistribution() for sample in dataHub):
plotISDs = True
for name, sample in dataHub.samples.iteritems():
isd = sample.readStatistics
sample.insertSizePlot = insertsizes.plotInsertSizeDistribution(isd, name, dataHub)
plotISDs = plotISDs and sample.insertSizePlot
if not plotISDs:
for sample in dataHub:
sample.insertSizePlot = None
def generateDotplots(dataHub):
if dataHub.args.dotplots:
logging.info(" * Generating dotplots *")
if len(dataHub.variant.chromParts("ref")) > 1:
logging.warning(" --> currently don't support producing dotplots with multi-part variants")
return
dotplotPngData = dotplots.dotplot(dataHub)
if dotplotPngData is not None:
dataHub.dotplots["ref vs ref"] = dotplotPngData
def saveReads(dataHub):
if dataHub.args.save_reads:
logging.info("* Saving relevant reads *")
for i, sample in enumerate(dataHub):
outbam_path = dataHub.args.save_reads
if not outbam_path.endswith(".bam"):
outbam_path += ".bam"
if len(dataHub.samples) > 1:
logging.debug("Using i = {}".format(i))
outbam_path = outbam_path.replace(".bam", ".{}.bam".format(i))
# print out just the reads we're interested for use later
bam_small = pysam.Samfile(outbam_path, "wb", template=sample.bam)
for read in sample.reads:
bam_small.write(read)
for read in sample.readStatistics.reads:
bam_small.write(read)
bam_small.close()
sorted_path = outbam_path.replace(".bam", ".sorted")
pysam.sort(outbam_path, sorted_path)
pysam.index(sorted_path+".bam")
def saveState(dataHub):
import cPickle as pickle
pickle.dump(dataHub, open(dataHub.args.save_state, "w"))
logging.warn("^"*20 + " saving state to pickle and exiting " + "^"*20)
def run(args):
# entry point from python
args = commandline.parseArgs(args)
checkRequirements(args)
dataHub = datahub.DataHub()
dataHub.setArgs(args)
logging.info("* Sampling reads to calculate Insert Size Distributions *")
loadISDs(dataHub)
if args.type == "batch":
logging.info("* Loading variants from input VCF file *")
dataHub.args.no_web = True
svs = vcf.getVariants(dataHub)
logging.info(" Loaded {} variants".format(len(svs)))
else:
logging.info("* Loading variant *")
svs = [variants.getVariant(dataHub)]
summaryStats = summarystats.Summary()
skipped = 0
for i, variant in enumerate(svs):
logging.info("* Running for variant {}/{} {} *".format(i+1, len(svs), variant))
dataHub.reset()
dataHub.variant = variant
setSampleParams(dataHub)
debug.printDebugInfo(dataHub)
logging.info("* Loading reads and finding mates *")
readCount = loadReads(dataHub)
saveReads(dataHub)
if dataHub.args.max_reads and readCount > dataHub.args.max_reads:
logging.info("+++ Skipping variant -- number of reads ({}) exceeds threshold set by user ({})".format(
readCount, dataHub.args.max_reads))
skipped += 1
continue
logging.info("* Realigning reads *")
runRemap(dataHub)
logging.info("* Assigning reads to most probable alleles *")
runDisambiguation(dataHub)
if not dataHub.args.no_web or dataHub.args.export:
logging.info("* Rendering tracks *")
renderSamples(dataHub)
renderAxesAndAnnotations(dataHub)
generateDotplots(dataHub)
runDirectExport(dataHub)
summaryStats.addVariantResults(dataHub)
summaryStats.display()
if dataHub.args.summary is not None:
summaryStats.saveToPath(dataHub.args.summary)
if skipped > 0:
logging.info("\n\nSkipped {} variants because they exceeded the --max-reads threshold\n\n".format(skipped))
if dataHub.args.save_state is not None:
saveState(dataHub)
return
runWebView(dataHub)
def main():
# entry point for shell script
run(sys.argv)
if __name__ == '__main__':
main()
|
|
"""
Database API
(part of web.py)
"""
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
import time, os, urllib, urlparse
try:
import datetime
except ImportError:
datetime = None
try: set
except NameError:
from sets import Set as set
from utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
try:
# db module can work independent of web.py
from webapi import debug, config
except:
import sys
debug = sys.stderr
config = storage()
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
# disable builtins to avoid risk for remote code exection.
dictionary['__builtins__'] = object()
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, long):
return str(obj)
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
<sql: '1=2'>
>>> sqlors('foo = ', [1])
<sql: 'foo = 1'>
>>> sqlors('foo = ', 1)
<sql: 'foo = 1'>
>>> sqlors('foo = ', [1,2,3])
<sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
try:
import DBUtils
# enable pooling if DBUtils module is available.
self.has_pooling = True
except ImportError:
self.has_pooling = False
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, style
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print >> debug, 'ERR:', str(sql_query)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, vars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, dict):
where = self._where_dict(where)
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def _where_dict(self, where):
where_clauses = []
for k, v in where.iteritems():
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
return SQLQuery.join(where_clauses, " AND ")
else:
return None
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if vars is None: vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test: return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
>>> db.select('foo', where={'id': 5}, _test=True)
<sql: 'SELECT * FROM foo WHERE id = 5'>
"""
if vars is None: vars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
<sql: 'SELECT * FROM foo WHERE bar_id = 3'>
>>> db.where('foo', source=2, crust='dewey', _test=True)
<sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'">
>>> db.where('foo', _test=True)
<sql: 'SELECT * FROM foo'>
"""
where = self._where_dict(kwargs)
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, vars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif sql == 'WHERE' and isinstance(val, dict):
nout = self._where_dict(val)
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "[email protected]"}, {"name": "bar", "email": "[email protected]"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '[email protected]'), ('bar', '[email protected]')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError, 'Not all rows have the same keys'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, vars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if vars is None: vars = {}
where = self._where(where, vars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, vars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if vars is None: vars = {}
where = self._where(where, vars)
q = 'DELETE FROM ' + table
if using: q += ' USING ' + sqllist(using)
if where: q += ' WHERE ' + where
if _test: return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class PostgresDB(DB):
"""Postgres driver."""
def __init__(self, **keywords):
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
if db_module.__name__ == "psycopg2":
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
if db_module.__name__ == "pgdb" and 'port' in keywords:
keywords["host"] += ":" + str(keywords.pop('port'))
# if db is not provided postgres driver will take it from PGDATABASE environment variable
if 'db' in keywords:
keywords['database'] = keywords.pop('db')
self.dbname = "postgres"
self.paramstyle = db_module.paramstyle
DB.__init__(self, db_module, keywords)
self.supports_multiple_insert = True
self._sequences = None
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# when seqname is not provided guess the seqname and make sure it exists
seqname = tablename + "_id_seq"
if seqname not in self._get_all_sequences():
seqname = None
if seqname:
query += "; SELECT currval('%s')" % seqname
return query
def _get_all_sequences(self):
"""Query postgres to find names of all sequences used in this database."""
if self._sequences is None:
q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
self._sequences = set([c.relname for c in self.query(q)])
return self._sequences
def _connect(self, keywords):
conn = DB._connect(self, keywords)
try:
conn.set_client_encoding('UTF8')
except AttributeError:
# fallback for pgdb driver
conn.cursor().execute("set client_encoding to 'UTF-8'")
return conn
def _connect_with_pooling(self, keywords):
conn = DB._connect_with_pooling(self, keywords)
conn._con._con.set_client_encoding('UTF8')
return conn
class MySQLDB(DB):
def __init__(self, **keywords):
import MySQLdb as db
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, iterbetter):
del out.__len__
return out
class FirebirdDB(DB):
"""Firebird Database.
"""
def __init__(self, **keywords):
try:
import kinterbasdb as db
except Exception:
db = None
pass
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.paramstyle = db.paramstyle
DB.__init__(self, db, keywords)
def delete(self, table, where=None, using=None, vars=None, _test=False):
# firebird doesn't support using clause
using=None
return DB.delete(self, table, where, using, vars, _test)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', ''),
('FIRST', limit),
('SKIP', offset),
('', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order)
)
class MSSQLDB(DB):
def __init__(self, **keywords):
import pymssql as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.dbname = "mssql"
DB.__init__(self, db, keywords)
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
# MSSQLDB expects params to be a tuple.
# Overwriting the default implementation to convert params to tuple.
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, tuple(params)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('TOP', limit),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('OFFSET', offset))
def _test(self):
"""Test LIMIT.
Fake presence of pymssql module for running tests.
>>> import sys
>>> sys.modules['pymssql'] = sys.modules['sys']
MSSQL has TOP clause instead of LIMIT clause.
>>> db = MSSQLDB(db='test', user='joe', pw='secret')
>>> db.select('foo', limit=4, _test=True)
<sql: 'SELECT * TOP 4 FROM foo'>
"""
pass
class OracleDB(DB):
def __init__(self, **keywords):
import cx_Oracle as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
#@@ TODO: use db.makedsn if host, port is specified
keywords['dsn'] = keywords.pop('db')
self.dbname = 'oracle'
db.paramstyle = 'numeric'
self.paramstyle = db.paramstyle
# oracle doesn't support pooling
keywords.pop('pooling', None)
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# It is not possible to get seq name from table name in Oracle
return query
else:
return query + "; SELECT %s.currval FROM dual" % seqname
def dburl2dict(url):
"""
Takes a URL to a database and parses it into an equivalent dictionary.
>>> dburl2dict('postgres:///mygreatdb')
{'pw': None, 'dbn': 'postgres', 'db': 'mygreatdb', 'host': None, 'user': None, 'port': None}
>>> dburl2dict('postgres://james:[email protected]:5432/mygreatdb')
{'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': 5432}
>>> dburl2dict('postgres://james:[email protected]/mygreatdb')
{'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
>>> dburl2dict('postgres://james:d%[email protected]/mygreatdb')
{'pw': 'd@y', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
>>> dburl2dict('mysql://james:d%[email protected]/mygreatdb')
{'pw': 'd@y', 'dbn': 'mysql', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
"""
parts = urlparse.urlparse(urllib.unquote(url))
return {'dbn': parts.scheme,
'user': parts.username,
'pw': parts.password,
'db': parts.path[1:],
'host': parts.hostname,
'port': parts.port}
_databases = {}
def database(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
if not dburl and not params:
dburl = os.environ['DATABASE_URL']
if dburl:
params = dburl2dict(dburl)
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB, dbn
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('postgres', PostgresDB)
register_database('sqlite', SqliteDB)
register_database('firebird', FirebirdDB)
register_database('mssql', MSSQLDB)
register_database('oracle', OracleDB)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
"""
Author: Dr. Mohamed Amine Bouhlel <[email protected]>
Some functions are copied from gaussian_process submodule (Scikit-learn 0.14)
This package is distributed under New BSD license.
"""
import numpy as np
from scipy import linalg, optimize
from copy import deepcopy
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.kriging_utils import differences
from smt.utils.kriging_utils import constant, linear, quadratic
from smt.utils.kriging_utils import (
squar_exp,
abs_exp,
act_exp,
standardization,
cross_distances,
matern52,
matern32,
)
from scipy.stats import multivariate_normal as m_norm
class KrgBased(SurrogateModel):
_regression_types = {"constant": constant, "linear": linear, "quadratic": quadratic}
_correlation_types = {
"abs_exp": abs_exp,
"squar_exp": squar_exp,
"act_exp": act_exp,
"matern52": matern52,
"matern32": matern32,
}
def _initialize(self):
super(KrgBased, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"poly",
"constant",
values=("constant", "linear", "quadratic"),
desc="Regression function type",
types=(str),
)
declare(
"corr",
"squar_exp",
values=("abs_exp", "squar_exp", "act_exp", "matern52", "matern32"),
desc="Correlation function type",
types=(str),
)
declare(
"nugget",
100.0 * np.finfo(np.double).eps,
types=(float),
desc="a jitter for numerical stability",
)
declare(
"theta0", [1e-2], types=(list, np.ndarray), desc="Initial hyperparameters"
)
# In practice, in 1D and for X in [0,1], theta^{-2} in [1e-2,infty), i.e.
# theta in (0,1e1], is a good choice to avoid overfitting. By standardising
# X in R, X_norm = (X-X_mean)/X_std, then X_norm in [-1,1] if considering
# one std intervals. This leads to theta in (0,2e1]
declare(
"theta_bounds",
[1e-6, 2e1],
types=(list, np.ndarray),
desc="bounds for hyperparameters",
)
declare(
"hyper_opt",
"Cobyla",
values=("Cobyla", "TNC"),
desc="Optimiser for hyperparameters optimisation",
types=(str),
)
declare(
"eval_noise",
False,
types=bool,
values=(True, False),
desc="noise evaluation flag",
)
declare(
"noise0",
[0.0],
types=(list, np.ndarray),
desc="Initial noise hyperparameters",
)
declare(
"noise_bounds",
[100.0 * np.finfo(np.double).eps, 1e10],
types=(list, np.ndarray),
desc="bounds for noise hyperparameters",
)
declare(
"use_het_noise",
False,
types=bool,
values=(True, False),
desc="heteroscedastic noise evaluation flag",
)
self.name = "KrigingBased"
self.best_iteration_fail = None
self.nb_ill_matrix = 5
supports["derivatives"] = True
supports["variances"] = True
def _new_train(self):
# Sampling points X and y
X = self.training_points[None][0][0]
y = self.training_points[None][0][1]
# Compute PLS-coefficients (attr of self) and modified X and y (if GEKPLS is used)
if self.name not in ["Kriging", "MGP"]:
X, y = self._compute_pls(X.copy(), y.copy())
self._check_param()
# Center and scale X and y
(
self.X_norma,
self.y_norma,
self.X_offset,
self.y_mean,
self.X_scale,
self.y_std,
) = standardization(X, y)
if not self.options["eval_noise"]:
self.optimal_noise = np.array(self.options["noise0"])
else:
if self.options["use_het_noise"]:
# hetGP works with unique design variables when noise variance are not given
(self.X_norma, index_unique, nt_reps,) = np.unique(
self.X_norma, return_inverse=True, return_counts=True, axis=0
)
self.nt = self.X_norma.shape[0]
# computing the mean of the output per unique design variable (see Binois et al., 2018)
y_norma_unique = []
for i in range(self.nt):
y_norma_unique.append(np.mean(self.y_norma[index_unique == i]))
# pointwise sensible estimates of the noise variances (see Ankenman et al., 2010)
self.optimal_noise = self.options["noise0"] * np.ones(self.nt)
for i in range(self.nt):
diff = self.y_norma[index_unique == i] - y_norma_unique[i]
if np.sum(diff ** 2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / nt_reps
self.y_norma = y_norma_unique
# Calculate matrix of distances D between samples
D, self.ij = cross_distances(self.X_norma)
if np.min(np.sum(np.abs(D), axis=1)) == 0.0:
print(
"Warning: multiple x input features have the same value (at least same row twice)."
)
####
# Regression matrix and parameters
self.F = self._regression_types[self.options["poly"]](self.X_norma)
n_samples_F = self.F.shape[0]
if self.F.ndim > 1:
p = self.F.shape[1]
else:
p = 1
self._check_F(n_samples_F, p)
# Optimization
(
self.optimal_rlf_value,
self.optimal_par,
self.optimal_theta,
) = self._optimize_hyperparam(D)
if self.name in ["MGP"]:
self._specific_train()
else:
if self.options["eval_noise"] and not self.options["use_het_noise"]:
self.optimal_noise = self.optimal_theta[-1]
self.optimal_theta = self.optimal_theta[:-1]
# if self.name != "MGP":
# del self.y_norma, self.D
def _train(self):
"""
Train the model
"""
# outputs['sol'] = self.sol
self._new_train()
def _reduced_likelihood_function(self, theta):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta: list(n_comp), optional
- An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Returns
-------
reduced_likelihood_function_value: real
- The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par: dict()
- A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or for Ordinary Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
Q, G
QR decomposition of the matrix Ft.
"""
# Initialize output
reduced_likelihood_function_value = -np.inf
par = {}
# Set up R
nugget = self.options["nugget"]
if self.options["eval_noise"]:
nugget = 0
noise = self.noise0
tmp_var = theta
if self.options["use_het_noise"]:
noise = self.optimal_noise
if self.options["eval_noise"] and not self.options["use_het_noise"]:
theta = tmp_var[0 : self.D.shape[1]]
noise = tmp_var[self.D.shape[1] :]
r = self._correlation_types[self.options["corr"]](theta, self.D).reshape(-1, 1)
R = np.eye(self.nt) * (1.0 + nugget + noise)
R[self.ij[:, 0], self.ij[:, 1]] = r[:, 0]
R[self.ij[:, 1], self.ij[:, 0]] = r[:, 0]
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except (linalg.LinAlgError, ValueError) as e:
print("exception : ", e)
# raise e
return reduced_likelihood_function_value, par
# Get generalized least squared solution
Ft = linalg.solve_triangular(C, self.F, lower=True)
Q, G = linalg.qr(Ft, mode="economic")
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(self.F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception(
"F is too ill conditioned. Poor combination "
"of regression model and observations."
)
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y_norma, lower=True)
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
rho = Yt - np.dot(Ft, beta)
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2.0 / self.nt)).prod()
# Compute/Organize output
p = 0
q = 0
if self.name in ["MFK", "MFKPLS", "MFKPLSK"]:
p = self.p
q = self.q
sigma2 = (rho ** 2.0).sum(axis=0) / (self.nt - p - q)
reduced_likelihood_function_value = -(self.nt - p - q) * np.log10(
sigma2.sum()
) - self.nt * np.log10(detR)
par["sigma2"] = sigma2 * self.y_std ** 2.0
par["beta"] = beta
par["gamma"] = linalg.solve_triangular(C.T, rho)
par["C"] = C
par["Ft"] = Ft
par["G"] = G
par["Q"] = Q
if self.name in ["MGP"]:
reduced_likelihood_function_value += self._reduced_log_prior(theta)
# A particular case when f_min_cobyla fail
if (self.best_iteration_fail is not None) and (
not np.isinf(reduced_likelihood_function_value)
):
if reduced_likelihood_function_value > self.best_iteration_fail:
self.best_iteration_fail = reduced_likelihood_function_value
self._thetaMemory = np.array(tmp_var)
elif (self.best_iteration_fail is None) and (
not np.isinf(reduced_likelihood_function_value)
):
self.best_iteration_fail = reduced_likelihood_function_value
self._thetaMemory = np.array(tmp_var)
return reduced_likelihood_function_value, par
def _reduced_likelihood_gradient(self, theta):
"""
Evaluates the reduced_likelihood_gradient at a set of hyperparameters.
Parameters
---------
theta : list(n_comp), optional
- An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Returns
-------
grad_red : np.ndarray (dim,1)
Derivative of the reduced_likelihood
par: dict()
- A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or for Ordinary Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
Q, G
QR decomposition of the matrix Ft.
dr
List of all the correlation matrix derivative
tr
List of all the trace part in the reduce likelihood derivatives
dmu
List of all the mean derivatives
arg
List of all minus_Cinv_dRdomega_gamma
dsigma
List of all sigma derivatives
"""
red, par = self._reduced_likelihood_function(theta)
C = par["C"]
gamma = par["gamma"]
Q = par["Q"]
G = par["G"]
sigma_2 = par["sigma2"]
nb_theta = len(theta)
grad_red = np.zeros(nb_theta)
dr_all = []
tr_all = []
dmu_all = []
arg_all = []
dsigma_all = []
dbeta_all = []
for i_der in range(nb_theta):
# Compute R derivatives
dr = self._correlation_types[self.options["corr"]](
theta, self.D, grad_ind=i_der
)
dr_all.append(dr)
dR = np.zeros((self.nt, self.nt))
dR[self.ij[:, 0], self.ij[:, 1]] = dr[:, 0]
dR[self.ij[:, 1], self.ij[:, 0]] = dr[:, 0]
# Compute beta derivatives
Cinv_dR_gamma = linalg.solve_triangular(C, np.dot(dR, gamma), lower=True)
dbeta = -linalg.solve_triangular(G, np.dot(Q.T, Cinv_dR_gamma))
arg_all.append(Cinv_dR_gamma)
dbeta_all.append(dbeta)
# Compute mu derivatives
dmu = np.dot(self.F, dbeta)
dmu_all.append(dmu)
# Compute log(detR) derivatives
tr_1 = linalg.solve_triangular(C, dR, lower=True)
tr = linalg.solve_triangular(C.T, tr_1)
tr_all.append(tr)
# Compute Sigma2 Derivatives
dsigma_2 = (
(1 / self.nt)
* (
-dmu.T.dot(gamma)
- gamma.T.dot(dmu)
- np.dot(gamma.T, dR.dot(gamma))
)
* self.y_std ** 2.0
)
dsigma_all.append(dsigma_2)
# Compute reduced log likelihood derivatives
grad_red[i_der] = (
-self.nt / np.log(10) * (dsigma_2 / sigma_2 + np.trace(tr) / self.nt)
)
par["dr"] = dr_all
par["tr"] = tr_all
par["dmu"] = dmu_all
par["arg"] = arg_all
par["dsigma"] = dsigma_all
par["dbeta_all"] = dbeta_all
grad_red = np.atleast_2d(grad_red).T
if self.name in ["MGP"]:
grad_red += self._reduced_log_prior(theta, grad=True)
return grad_red, par
def _reduced_likelihood_hessian(self, theta):
"""
Evaluates the reduced_likelihood_gradient at a set of hyperparameters.
Parameters
----------
theta : list(n_comp), optional
- An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Returns
-------
hess : np.ndarray
Hessian values.
hess_ij: np.ndarray [nb_theta * (nb_theta + 1) / 2, 2]
- The indices i and j of the vectors in theta associated to the hessian in hess.
par: dict()
- A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squared regression weights for
Universal Kriging or for Ordinary Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
Q, G
QR decomposition of the matrix Ft.
dr
List of all the correlation matrix derivative
tr
List of all the trace part in the reduce likelihood derivatives
dmu
List of all the mean derivatives
arg
List of all minus_Cinv_dRdomega_gamma
dsigma
List of all sigma derivatives
"""
dred, par = self._reduced_likelihood_gradient(theta)
C = par["C"]
gamma = par["gamma"]
Q = par["Q"]
G = par["G"]
sigma_2 = par["sigma2"]
nb_theta = len(theta)
dr_all = par["dr"]
tr_all = par["tr"]
dmu_all = par["dmu"]
arg_all = par["arg"]
dsigma = par["dsigma"]
Rinv_dRdomega_gamma_all = []
Rinv_dmudomega_all = []
n_val_hess = nb_theta * (nb_theta + 1) // 2
hess_ij = np.zeros((n_val_hess, 2), dtype=np.int)
hess = np.zeros((n_val_hess, 1))
ind_1 = 0
if self.name in ["MGP"]:
log_prior = self._reduced_log_prior(theta, hessian=True)
for omega in range(nb_theta):
ind_0 = ind_1
ind_1 = ind_0 + nb_theta - omega
hess_ij[ind_0:ind_1, 0] = omega
hess_ij[ind_0:ind_1, 1] = np.arange(omega, nb_theta)
dRdomega = np.zeros((self.nt, self.nt))
dRdomega[self.ij[:, 0], self.ij[:, 1]] = dr_all[omega][:, 0]
dRdomega[self.ij[:, 1], self.ij[:, 0]] = dr_all[omega][:, 0]
dmudomega = dmu_all[omega]
Cinv_dmudomega = linalg.solve_triangular(C, dmudomega, lower=True)
Rinv_dmudomega = linalg.solve_triangular(C.T, Cinv_dmudomega)
Rinv_dmudomega_all.append(Rinv_dmudomega)
Rinv_dRdomega_gamma = linalg.solve_triangular(C.T, arg_all[omega])
Rinv_dRdomega_gamma_all.append(Rinv_dRdomega_gamma)
for i, eta in enumerate(hess_ij[ind_0:ind_1, 1]):
dRdeta = np.zeros((self.nt, self.nt))
dRdeta[self.ij[:, 0], self.ij[:, 1]] = dr_all[eta][:, 0]
dRdeta[self.ij[:, 1], self.ij[:, 0]] = dr_all[eta][:, 0]
dr_eta_omega = self._correlation_types[self.options["corr"]](
theta, self.D, grad_ind=omega, hess_ind=eta
)
dRdetadomega = np.zeros((self.nt, self.nt))
dRdetadomega[self.ij[:, 0], self.ij[:, 1]] = dr_eta_omega[:, 0]
dRdetadomega[self.ij[:, 1], self.ij[:, 0]] = dr_eta_omega[:, 0]
# Compute beta second derivatives
dRdeta_Rinv_dmudomega = np.dot(dRdeta, Rinv_dmudomega)
dmudeta = dmu_all[eta]
Cinv_dmudeta = linalg.solve_triangular(C, dmudeta, lower=True)
Rinv_dmudeta = linalg.solve_triangular(C.T, Cinv_dmudeta)
dRdomega_Rinv_dmudeta = np.dot(dRdomega, Rinv_dmudeta)
dRdeta_Rinv_dRdomega_gamma = np.dot(dRdeta, Rinv_dRdomega_gamma)
Rinv_dRdeta_gamma = linalg.solve_triangular(C.T, arg_all[eta])
dRdomega_Rinv_dRdeta_gamma = np.dot(dRdomega, Rinv_dRdeta_gamma)
dRdetadomega_gamma = np.dot(dRdetadomega, gamma)
beta_sum = (
dRdeta_Rinv_dmudomega
+ dRdomega_Rinv_dmudeta
+ dRdeta_Rinv_dRdomega_gamma
+ dRdomega_Rinv_dRdeta_gamma
- dRdetadomega_gamma
)
Qt_Cinv_beta_sum = np.dot(
Q.T, linalg.solve_triangular(C, beta_sum, lower=True)
)
dbetadetadomega = linalg.solve_triangular(G, Qt_Cinv_beta_sum)
# Compute mu second derivatives
dmudetadomega = np.dot(self.F, dbetadetadomega)
# Compute sigma2 second derivatives
sigma_arg_1 = (
-np.dot(dmudetadomega.T, gamma)
+ np.dot(dmudomega.T, Rinv_dRdeta_gamma)
+ np.dot(dmudeta.T, Rinv_dRdomega_gamma)
)
sigma_arg_2 = (
-np.dot(gamma.T, dmudetadomega)
+ np.dot(gamma.T, dRdeta_Rinv_dmudomega)
+ np.dot(gamma.T, dRdomega_Rinv_dmudeta)
)
sigma_arg_3 = np.dot(dmudeta.T, Rinv_dmudomega) + np.dot(
dmudomega.T, Rinv_dmudeta
)
sigma_arg_4_in = (
-dRdetadomega_gamma
+ dRdeta_Rinv_dRdomega_gamma
+ dRdomega_Rinv_dRdeta_gamma
)
sigma_arg_4 = np.dot(gamma.T, sigma_arg_4_in)
dsigma2detadomega = (
(1 / self.nt)
* (sigma_arg_1 + sigma_arg_2 + sigma_arg_3 + sigma_arg_4)
* self.y_std ** 2.0
)
# Compute Hessian
dreddetadomega_tr_1 = np.trace(np.dot(tr_all[eta], tr_all[omega]))
dreddetadomega_tr_2 = np.trace(
linalg.solve_triangular(
C.T, linalg.solve_triangular(C, dRdetadomega, lower=True)
)
)
dreddetadomega_arg1 = (self.nt / sigma_2) * (
dsigma2detadomega - (1 / sigma_2) * dsigma[omega] * dsigma[eta]
)
dreddetadomega = (
-(dreddetadomega_arg1 - dreddetadomega_tr_1 + dreddetadomega_tr_2)
/ self.nt
)
hess[ind_0 + i, 0] = self.nt / np.log(10) * dreddetadomega
if self.name in ["MGP"] and eta == omega:
hess[ind_0 + i, 0] += log_prior[eta]
par["Rinv_dR_gamma"] = Rinv_dRdomega_gamma_all
par["Rinv_dmu"] = Rinv_dmudomega_all
return hess, hess_ij, par
def _predict_values(self, x):
"""
Evaluates the model at a set of points.
Parameters
----------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
# Compute the correlation function
r = self._correlation_types[self.options["corr"]](
self.optimal_theta, d
).reshape(n_eval, self.nt)
y = np.zeros(n_eval)
# Compute the regression function
f = self._regression_types[self.options["poly"]](x)
# Scaled predictor
y_ = np.dot(f, self.optimal_par["beta"]) + np.dot(r, self.optimal_par["gamma"])
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Parameters
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray
Derivative values.
"""
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
# Compute the correlation function
r = self._correlation_types[self.options["corr"]](
self.optimal_theta, d
).reshape(n_eval, self.nt)
if self.options["corr"] != "squar_exp":
raise ValueError(
"The derivative is only available for squared exponential kernel"
)
if self.options["poly"] == "constant":
df = np.zeros((1, self.nx))
elif self.options["poly"] == "linear":
df = np.zeros((self.nx + 1, self.nx))
df[1:, :] = np.eye(self.nx)
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
# Beta and gamma = R^-1(y-FBeta)
beta = self.optimal_par["beta"]
gamma = self.optimal_par["gamma"]
df_dx = np.dot(df.T, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma[:, kx].reshape((1, self.nt))
if self.name != "Kriging" and "KPLSK" not in self.name:
theta = np.sum(self.optimal_theta * self.coeff_pls ** 2, axis=1)
else:
theta = self.optimal_theta
y = (
(df_dx[kx] - 2 * theta[kx] * np.dot(d_dx * r, gamma))
* self.y_std
/ self.X_scale[kx]
)
return y
def _predict_variances(self, x):
"""
Provide uncertainty of the model at a set of points
Parameters
----------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
MSE : np.ndarray
Evaluation point output variable MSE
"""
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
# Compute the correlation function
r = self._correlation_types[self.options["corr"]](
self.optimal_theta, d
).reshape(n_eval, self.nt)
C = self.optimal_par["C"]
rt = linalg.solve_triangular(C, r.T, lower=True)
u = linalg.solve_triangular(
self.optimal_par["G"].T,
np.dot(self.optimal_par["Ft"].T, rt)
- self._regression_types[self.options["poly"]](x).T,
)
A = self.optimal_par["sigma2"]
B = 1.0 - (rt ** 2.0).sum(axis=0) + (u ** 2.0).sum(axis=0)
MSE = np.einsum("i,j -> ji", A, B)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.0] = 0.0
return MSE
def _predict_variance_derivatives(self, x):
"""
Provide the derivative of the variance of the model at a set of points
Parameters
-----------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
derived_variance: np.ndarray
The jacobian of the variance of the kriging model
"""
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
theta = self.optimal_theta
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
dd = self._componentwise_distance(
dx, theta=self.optimal_theta, return_derivative=True
)
sigma2 = self.optimal_par["sigma2"]
cholesky_k = self.optimal_par["C"]
derivative_dic = {"dx": dx, "dd": dd}
r, dr = self._correlation_types[self.options["corr"]](
theta, d, derivative_params=derivative_dic
)
rho1 = linalg.solve_triangular(cholesky_k, r, lower=True)
invKr = linalg.solve_triangular(cholesky_k.T, rho1)
p1 = np.dot(dr.T, invKr).T
p2 = np.dot(invKr.T, dr)
f_x = self._regression_types[self.options["poly"]](x).T
F = self.F
rho2 = linalg.solve_triangular(cholesky_k, F, lower=True)
invKF = linalg.solve_triangular(cholesky_k.T, rho2)
A = f_x.T - np.dot(r.T, invKF)
B = np.dot(F.T, invKF)
rho3 = linalg.cholesky(B, lower=True)
invBAt = linalg.solve_triangular(rho3, A.T, lower=True)
D = linalg.solve_triangular(rho3.T, invBAt)
if self.options["poly"] == "constant":
df = np.zeros((1, self.nx))
elif self.options["poly"] == "linear":
df = np.zeros((self.nx + 1, self.nx))
df[1:, :] = np.eye(self.nx)
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
dA = df.T - np.dot(dr.T, invKF)
p3 = np.dot(dA, D).T
p4 = np.dot(D.T, dA.T)
prime = -p1 - p2 + p3 + p4
derived_variance = []
x_std = np.resize(self.X_scale, self.nx)
for i in range(len(x_std)):
derived_variance.append(sigma2 * prime.T[i] / x_std[i])
return np.array(derived_variance).T
def _optimize_hyperparam(self, D):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
D: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
- The componentwise cross-spatial-correlation-distance between the
vectors in X.
Returns
-------
best_optimal_rlf_value: real
- The value of the reduced likelihood function associated to the
best autocorrelation parameters theta.
best_optimal_par: dict()
- A dictionary containing the requested Gaussian Process model
parameters.
best_optimal_theta: list(n_comp) or list(dim)
- The best hyperparameters found by the optimization.
"""
# reinitialize optimization best values
self.best_iteration_fail = None
self._thetaMemory = None
# Initialize the hyperparameter-optimization
if self.name in ["MGP"]:
def minus_reduced_likelihood_function(theta):
res = -self._reduced_likelihood_function(theta)[0]
return res
def grad_minus_reduced_likelihood_function(theta):
grad = -self._reduced_likelihood_gradient(theta)[0]
return grad
else:
def minus_reduced_likelihood_function(log10t):
return -self._reduced_likelihood_function(theta=10.0 ** log10t)[0]
def grad_minus_reduced_likelihood_function(log10t):
log10t_2d = np.atleast_2d(log10t).T
res = (
-np.log(10.0)
* (10.0 ** log10t_2d)
* (self._reduced_likelihood_gradient(10.0 ** log10t_2d)[0])
)
return res
limit, _rhobeg = 10 * len(self.options["theta0"]), 0.5
exit_function = False
if "KPLSK" in self.name:
n_iter = 1
else:
n_iter = 0
for ii in range(n_iter, -1, -1):
(
best_optimal_theta,
best_optimal_rlf_value,
best_optimal_par,
constraints,
) = (
[],
[],
[],
[],
)
bounds_hyp = []
self.theta0 = deepcopy(self.options["theta0"])
for i in range(len(self.theta0)):
# In practice, in 1D and for X in [0,1], theta^{-2} in [1e-2,infty),
# i.e. theta in (0,1e1], is a good choice to avoid overfitting.
# By standardising X in R, X_norm = (X-X_mean)/X_std, then
# X_norm in [-1,1] if considering one std intervals. This leads
# to theta in (0,2e1]
theta_bounds = self.options["theta_bounds"]
if self.theta0[i] < theta_bounds[0] or self.theta0[i] > theta_bounds[1]:
self.theta0[i] = np.random.rand()
self.theta0[i] = (
self.theta0[i] * (theta_bounds[1] - theta_bounds[0])
+ theta_bounds[0]
)
print(
"Warning: theta0 is out the feasible bounds. A random initialisation is used instead."
)
if self.name in ["MGP"]: # to be discussed with R. Priem
constraints.append(lambda theta, i=i: theta[i] + theta_bounds[1])
constraints.append(lambda theta, i=i: theta_bounds[1] - theta[i])
bounds_hyp.append((-theta_bounds[1], theta_bounds[1]))
else:
log10t_bounds = np.log10(theta_bounds)
constraints.append(lambda log10t, i=i: log10t[i] - log10t_bounds[0])
constraints.append(lambda log10t, i=i: log10t_bounds[1] - log10t[i])
bounds_hyp.append(log10t_bounds)
if self.name in ["MGP"]:
theta0_rand = m_norm.rvs(
self.options["prior"]["mean"] * len(self.theta0),
self.options["prior"]["var"],
1,
)
theta0 = self.theta0
else:
theta0_rand = np.random.rand(len(self.theta0))
theta0_rand = (
theta0_rand * (log10t_bounds[1] - log10t_bounds[0])
+ log10t_bounds[0]
)
theta0 = np.log10(self.theta0)
self.D = self._componentwise_distance(D, opt=ii)
# Initialization
k, incr, stop, best_optimal_rlf_value, max_retry = 0, 0, 1, -1e20, 10
while k < stop:
# Use specified starting point as first guess
self.noise0 = np.array(self.options["noise0"])
noise_bounds = self.options["noise_bounds"]
if self.options["eval_noise"] and not self.options["use_het_noise"]:
self.noise0[self.noise0 == 0.0] = noise_bounds[0]
for i in range(len(self.noise0)):
if (
self.noise0[i] < noise_bounds[0]
or self.noise0[i] > noise_bounds[1]
):
self.noise0[i] = noise_bounds[0]
print(
"Warning: noise0 is out the feasible bounds. The lowest possible value is used instead."
)
theta0 = np.concatenate(
[theta0, np.log10(np.array([self.noise0]).flatten())]
)
theta0_rand = np.concatenate(
[
theta0_rand,
np.log10(np.array([self.noise0]).flatten()),
]
)
for i in range(len(self.noise0)):
noise_bounds = np.log10(noise_bounds)
constraints.append(
lambda log10t: log10t[i + len(self.theta0)]
- noise_bounds[0]
)
constraints.append(
lambda log10t: noise_bounds[1]
- log10t[i + len(self.theta0)]
)
bounds_hyp.append(noise_bounds)
try:
if self.options["hyper_opt"] == "Cobyla":
optimal_theta_res = optimize.minimize(
minus_reduced_likelihood_function,
theta0,
constraints=[
{"fun": con, "type": "ineq"} for con in constraints
],
method="COBYLA",
options={"rhobeg": _rhobeg, "tol": 1e-4, "maxiter": limit},
)
optimal_theta_res_2 = optimal_theta_res
elif self.options["hyper_opt"] == "TNC":
optimal_theta_res = optimize.minimize(
minus_reduced_likelihood_function,
theta0,
method="TNC",
jac=grad_minus_reduced_likelihood_function,
bounds=bounds_hyp,
options={"maxiter": 100},
)
optimal_theta_res_2 = optimize.minimize(
minus_reduced_likelihood_function,
theta0_rand,
method="TNC",
jac=grad_minus_reduced_likelihood_function,
bounds=bounds_hyp,
options={"maxiter": 100},
)
if optimal_theta_res["fun"] > optimal_theta_res_2["fun"]:
optimal_theta_res = optimal_theta_res_2
optimal_theta = optimal_theta_res["x"]
if self.name not in ["MGP"]:
optimal_theta = 10 ** optimal_theta
optimal_rlf_value, optimal_par = self._reduced_likelihood_function(
theta=optimal_theta
)
# Compare the new optimizer to the best previous one
if k > 0:
if np.isinf(optimal_rlf_value):
stop += 1
if incr != 0:
return
if stop > max_retry:
raise ValueError(
"%d attempts to train the model failed" % max_retry
)
else:
if optimal_rlf_value >= self.best_iteration_fail:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
if (
self.best_iteration_fail
> best_optimal_rlf_value
):
best_optimal_theta = self._thetaMemory
(
best_optimal_rlf_value,
best_optimal_par,
) = self._reduced_likelihood_function(
theta=best_optimal_theta
)
else:
if np.isinf(optimal_rlf_value):
stop += 1
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
k += 1
except ValueError as ve:
# raise ve
# If iteration is max when fmin_cobyla fail is not reached
if self.nb_ill_matrix > 0:
self.nb_ill_matrix -= 1
k += 1
stop += 1
# One evaluation objectif function is done at least
if self.best_iteration_fail is not None:
if self.best_iteration_fail > best_optimal_rlf_value:
best_optimal_theta = self._thetaMemory
(
best_optimal_rlf_value,
best_optimal_par,
) = self._reduced_likelihood_function(
theta=best_optimal_theta
)
# Optimization fail
elif best_optimal_par == []:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
# Break the while loop
else:
k = stop + 1
print("fmin_cobyla failed but the best value is retained")
if "KPLSK" in self.name:
if self.options["eval_noise"]:
# best_optimal_theta contains [theta, noise] if eval_noise = True
theta = best_optimal_theta[:-1]
else:
# best_optimal_theta contains [theta] if eval_noise = False
theta = best_optimal_theta
if exit_function:
return best_optimal_rlf_value, best_optimal_par, best_optimal_theta
if self.options["corr"] == "squar_exp":
self.options["theta0"] = (theta * self.coeff_pls ** 2).sum(1)
else:
self.options["theta0"] = (theta * np.abs(self.coeff_pls)).sum(1)
self.options["n_comp"] = int(self.nx)
limit = 10 * self.options["n_comp"]
self.best_iteration_fail = None
exit_function = True
return best_optimal_rlf_value, best_optimal_par, best_optimal_theta
def _check_param(self):
"""
This function checks some parameters of the model.
"""
# FIXME: _check_param should be overriden in corresponding subclasses
if self.name in ["KPLS", "KPLSK", "GEKPLS"]:
d = self.options["n_comp"]
else:
d = self.nx
if self.options["corr"] == "act_exp":
raise ValueError("act_exp correlation function must be used with MGP")
if self.name in ["KPLS", "GEKPLS"]:
if self.options["corr"] not in ["squar_exp", "abs_exp"]:
raise ValueError(
"KPLS only works with a squared exponential or an absolute exponential kernel"
)
elif self.name in ["KPLSK"]:
if self.options["corr"] not in ["squar_exp"]:
raise ValueError(
"KPLSK only works with a squared exponential kernel (until we prove the contrary)"
)
if len(self.options["theta0"]) != d:
if len(self.options["theta0"]) == 1:
self.options["theta0"] *= np.ones(d)
else:
raise ValueError(
"the length of theta0 (%s) should be equal to the number of dim (%s)."
% (len(self.options["theta0"]), d)
)
if self.options["use_het_noise"] and not self.options["eval_noise"]:
if len(self.options["noise0"]) != self.nt:
if len(self.options["noise0"]) == 1:
self.options["noise0"] *= np.ones(self.nt)
else:
raise ValueError(
"for the heteroscedastic case, the length of noise0 (%s) should be equal to the number of observations (%s)."
% (len(self.options["noise0"]), self.nt)
)
if not self.options["use_het_noise"]:
if len(self.options["noise0"]) != 1:
raise ValueError(
"for the homoscedastic case, the length of noise0 (%s) should be equal to one."
% (len(self.options["noise0"]))
)
if self.supports["training_derivatives"]:
if not (1 in self.training_points[None]):
raise Exception(
"Derivative values are needed for using the GEKPLS model."
)
def _check_F(self, n_samples_F, p):
"""
This function check the F-parameters of the model.
"""
if n_samples_F != self.nt:
raise Exception(
"Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model."
)
if p > n_samples_F:
raise Exception(
(
"Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d."
)
% (self.nt, p)
)
|
|
import datetime
import mock
import pytest
import pandas as pd
import synapseclient
from genie.clinical import clinical
def createMockTable(dataframe):
table = mock.create_autospec(synapseclient.table.CsvFileTable)
table.asDataFrame.return_value = dataframe
return(table)
def table_query_results(*args):
return(table_query_results_map[args])
no_nan = pd.DataFrame(dict(
CODE=[1, 2, 3, 4, 99],
CBIO_LABEL=['Test', 'Why', 'foo', 'Me', 'Unknown'],
DESCRIPTION=['non', 'asdf', 'asdf', 'asdff', 'asdfasdf']))
sexdf = pd.DataFrame(dict(
CODE=[1, 2, 99],
CBIO_LABEL=['Male', 'Female', 'Unknown'],
DESCRIPTION=['Male', 'Female', 'Not coded']))
table_query_results_map = {
("SELECT * FROM syn7434222",): createMockTable(sexdf),
("SELECT * FROM syn7434236",): createMockTable(no_nan),
("SELECT * FROM syn7434242",): createMockTable(no_nan),
("SELECT * FROM syn7434273",): createMockTable(no_nan)}
syn = mock.create_autospec(synapseclient.Synapse)
syn.tableQuery.side_effect = table_query_results
clin_class = clinical(syn, "SAGE")
# oncotree_url = \
# 'http://oncotree.mskcc.org/api/tumor_types.txt?version=oncotree_latest_stable'
json_oncotreeurl = \
"http://oncotree.mskcc.org/api/tumorTypes/tree?version=oncotree_2017_06_21"
onco_map_dict = {
"AMPCA": {
'CANCER_TYPE': "Ampullary Cancer",
'CANCER_TYPE_DETAILED': "Ampullary Carcinoma",
'ONCOTREE_PRIMARY_NODE': "AMPULLA_OF_VATER",
'ONCOTREE_SECONDARY_NODE': "AMPCA"},
"TESTIS": {
'CANCER_TYPE': "Testicular Cancer, NOS",
'CANCER_TYPE_DETAILED': "Testis",
'ONCOTREE_PRIMARY_NODE': "TESTIS",
'ONCOTREE_SECONDARY_NODE': ''},
"UCEC": {
'CANCER_TYPE': "Endometrial Cancer",
'CANCER_TYPE_DETAILED': "Endometrial Carcinoma",
'ONCOTREE_PRIMARY_NODE': "UTERUS",
'ONCOTREE_SECONDARY_NODE': "UCEC"}}
def test_filetype():
assert clin_class._fileType == "clinical"
@pytest.fixture(params=[
(["foo"]),
(["foo", "data_clinical_supp_sample_SAGE.txt"])
])
def filename_fileformat_map(request):
return request.param
def test_incorrect_validatefilename(filename_fileformat_map):
filepath_list = filename_fileformat_map
with pytest.raises(AssertionError):
clin_class.validateFilename(filepath_list)
def test_correct_validatefilename():
assert clin_class.validateFilename(
["data_clinical_supp_SAGE.txt"]) == "clinical"
assert clin_class.validateFilename(
["data_clinical_supp_sample_SAGE.txt",
"data_clinical_supp_patient_SAGE.txt"]) == "clinical"
def test_patient_fillvs__process():
'''
Test filling out of vital status values
This will be removed once vital status values are required
- capitalized column headers
- remapping of values
- Fill out CENTER column
- Append GENIE-CENTER-..
'''
expected_patientdf = pd.DataFrame(dict(
PATIENT_ID=["GENIE-SAGE-ID1", "GENIE-SAGE-ID2", "GENIE-SAGE-ID3",
"GENIE-SAGE-ID4", "GENIE-SAGE-ID5"],
SEX=['Male', 'Female', 'Male', 'Female', 'Unknown'],
PRIMARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
SECONDARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
TERTIARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
ETHNICITY=['Test', 'Why', 'foo', 'Me', 'Unknown'],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["SAGE", "SAGE", "SAGE", "SAGE", "SAGE"],
INT_DOD=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
INT_CONTACT=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
DEAD=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
YEAR_DEATH=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
YEAR_CONTACT=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected']))
patientdf = pd.DataFrame(dict(
PATIENT_Id=["ID1", "ID2", "ID3", "ID4", "ID5"],
sex=[1, 2, 1, 2, 99],
PRIMARY_RACE=[1, 2, 3, 4, 99],
Secondary_RACE=[1, 2, 3, 4, 99],
TERTIARY_RACE=[1, 2, 3, 4, 99],
ETHNICITY=[1, 2, 3, 4, 99],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"]))
patient_cols = [
"PATIENT_ID", "SEX", "PRIMARY_RACE", "SECONDARY_RACE",
"TERTIARY_RACE", "ETHNICITY", "BIRTH_YEAR", "CENTER",
'YEAR_CONTACT', 'YEAR_DEATH', 'INT_CONTACT', 'INT_DOD', 'DEAD']
clinical_template = pd.DataFrame(columns=patient_cols)
new_patientdf = clin_class._process(patientdf, clinical_template)
assert new_patientdf.columns.isin(expected_patientdf.columns).all()
assert expected_patientdf.equals(new_patientdf[expected_patientdf.columns])
def test_patient_lesscoltemplate__process():
'''
Test scope is excluding values.
Only those value defined by the scope will be written out
'''
expected_patientdf = pd.DataFrame(dict(
PATIENT_ID=["GENIE-SAGE-ID1", "GENIE-SAGE-ID2", "GENIE-SAGE-ID3",
"GENIE-SAGE-ID4", "GENIE-SAGE-ID5"],
SEX=['Male', 'Female', 'Male', 'Female', 'Unknown'],
PRIMARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
SECONDARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
TERTIARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
ETHNICITY=['Test', 'Why', 'foo', 'Me', 'Unknown'],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["SAGE", "SAGE", "SAGE", "SAGE", "SAGE"],
INT_DOD=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
INT_CONTACT=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
DEAD=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
YEAR_DEATH=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
YEAR_CONTACT=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected']))
# TEST patient processing
patientdf = pd.DataFrame(dict(
PATIENT_Id=["ID1", "ID2", "ID3", "ID4", "ID5"],
sex=[1, 2, 1, 2, 99],
PRIMARY_RACE=[1, 2, 3, 4, 99],
Secondary_RACE=[1, 2, 3, 4, 99],
TERTIARY_RACE=[1, 2, 3, 4, 99],
ETHNICITY=[1, 2, 3, 4, 99],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"]))
patient_cols = [
"PATIENT_ID", "SEX", "PRIMARY_RACE", "SECONDARY_RACE",
"TERTIARY_RACE", "ETHNICITY", "BIRTH_YEAR", "CENTER",
'YEAR_CONTACT', 'YEAR_DEATH']
clinical_template = pd.DataFrame(columns=patient_cols)
new_patientdf = clin_class._process(patientdf, clinical_template)
assert new_patientdf.columns.isin(patient_cols).all()
assert expected_patientdf[patient_cols].equals(new_patientdf[patient_cols])
def test_patient_fillcols__process():
'''
Filling in of RACE/ETHNITICY columns as some centers don't require them
'''
expected_patientdf = pd.DataFrame(dict(
PATIENT_ID=["GENIE-SAGE-ID1", "GENIE-SAGE-ID2", "GENIE-SAGE-ID3",
"GENIE-SAGE-ID4", "GENIE-SAGE-ID5"],
SEX=['Male', 'Female', 'Male', 'Female', 'Unknown'],
PRIMARY_RACE=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
SECONDARY_RACE=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
TERTIARY_RACE=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
ETHNICITY=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["SAGE", "SAGE", "SAGE", "SAGE", "SAGE"],
INT_DOD=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
INT_CONTACT=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
DEAD=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
YEAR_DEATH=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected'],
YEAR_CONTACT=['Not Collected', 'Not Collected', 'Not Collected',
'Not Collected', 'Not Collected']))
# TEST patient processing
# Clinical file headers are capitalized prior to processing
patientdf = pd.DataFrame(dict(
PATIENT_Id=["ID1", "ID2", "ID3", "ID4", "ID5"],
sex=[1, 2, 1, 2, 99],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"]))
patient_cols = [
"PATIENT_ID", "SEX", "PRIMARY_RACE", "SECONDARY_RACE",
"TERTIARY_RACE", "ETHNICITY", "BIRTH_YEAR", "CENTER",
'YEAR_CONTACT', 'YEAR_DEATH', 'INT_CONTACT', 'INT_DOD', 'DEAD']
clinical_template = pd.DataFrame(columns=patient_cols)
new_patientdf = clin_class._process(patientdf, clinical_template)
assert new_patientdf.columns.isin(expected_patientdf.columns).all()
assert expected_patientdf.equals(new_patientdf[expected_patientdf.columns])
def test_patient_vs__process():
'''
Test vital status columns being propogated with same data
'''
expected_patientdf = pd.DataFrame(dict(
PATIENT_ID=["GENIE-SAGE-ID1", "GENIE-SAGE-ID2", "GENIE-SAGE-ID3",
"GENIE-SAGE-ID4", "GENIE-SAGE-ID5"],
SEX=['Male', 'Female', 'Male', 'Female', 'Unknown'],
PRIMARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
SECONDARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
TERTIARY_RACE=['Test', 'Why', 'foo', 'Me', 'Unknown'],
ETHNICITY=['Test', 'Why', 'foo', 'Me', 'Unknown'],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["SAGE", "SAGE", "SAGE", "SAGE", "SAGE"],
YEAR_DEATH=["Unknown", "Not Collected", "Not Applicable", 1990, 1990],
YEAR_CONTACT=["Unknown", "Not Collected", 1990, 1990, 1990],
INT_CONTACT=['>32485', '<6570', 'Unknown', 'Not Collected', 2000],
INT_DOD=['>32485', '<6570', 'Unknown', 'Not Collected',
'Not Applicable'],
DEAD=[True, False, 'Unknown', 'Not Collected', True]))
# TEST patient processing
# Clinical file headers are capitalized prior to processing
patientdf = pd.DataFrame(dict(
PATIENT_Id=["ID1", "ID2", "ID3", "ID4", "ID5"],
sex=[1, 2, 1, 2, 99],
PRIMARY_RACE=[1, 2, 3, 4, 99],
Secondary_RACE=[1, 2, 3, 4, 99],
TERTIARY_RACE=[1, 2, 3, 4, 99],
ETHNICITY=[1, 2, 3, 4, 99],
BIRTH_YEAR=[1990, 1990, 1990, 1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"],
YEAR_DEATH=["Unknown", "Not Collected", "Not Applicable",
1990, 1990],
YEAR_CONTACT=["Unknown", "Not Collected", 1990, 1990, 1990],
INT_CONTACT=['>32485', '<6570', 'Unknown', 'Not Collected', 2000],
INT_DOD=['>32485', '<6570', 'Unknown', 'Not Collected',
'Not Applicable'],
DEAD=[True, False, 'Unknown', 'Not Collected', True]))
patient_cols = [
"PATIENT_ID", "SEX", "PRIMARY_RACE", "SECONDARY_RACE",
"TERTIARY_RACE", "ETHNICITY", "BIRTH_YEAR", "CENTER",
'YEAR_CONTACT', 'YEAR_DEATH', 'INT_CONTACT', 'INT_DOD', 'DEAD']
clinical_template = pd.DataFrame(columns=patient_cols)
new_patientdf = clin_class._process(patientdf, clinical_template)
assert expected_patientdf.equals(new_patientdf[expected_patientdf.columns])
def test_sample__process():
'''
Test sample processing
- column headers are capitalized
- SEQ_DATE is normalized (Mon-YYYY, Release)
- Allow UNKNOWN oncotree value
- Add on GENIE-CENTER-...
- Remapping of SAMPLE_TYPE/SAMPLE_TYPE_DETAILED value
- SEQ_YEAR from SEQ_DATE, nan if SEQ_DATE is Release
'''
expected_sampledf = pd.DataFrame(dict(
SAMPLE_ID=["GENIE-SAGE-ID1-1", "GENIE-SAGE-ID2-1", "GENIE-SAGE-ID3-1",
"GENIE-SAGE-ID4-1", "GENIE-SAGE-ID5-1"],
PATIENT_ID=["GENIE-SAGE-ID1", "GENIE-SAGE-ID2", "GENIE-SAGE-ID3",
"GENIE-SAGE-ID4", "GENIE-SAGE-ID5"],
AGE_AT_SEQ_REPORT=[100000, 100000, 100000, 100000, 100000],
ONCOTREE_CODE=['AMPCA', 'UNKNOWN', 'AMPCA', 'AMPCA', 'AMPCA'],
SAMPLE_TYPE=['Test', 'Why', 'foo', 'Me', 'Me'],
CENTER=["SAGE", "SAGE", "SAGE", "SAGE", "SAGE"],
SAMPLE_TYPE_DETAILED=['non', 'asdf', 'asdf', 'asdff', 'asdff'],
SEQ_ASSAY_ID=['SAGE-1', 'SAGE-1', 'SAGE-1', 'SAGE-1', 'SAGE-1'],
SEQ_DATE=['Jan-2012', 'Apr-2013', 'Jul-2014', 'Oct-2015', 'Release'],
SEQ_YEAR=[2012, 2013, 2014, 2015, float('nan')]))
sample_cols = [
"SAMPLE_ID", "PATIENT_ID", "AGE_AT_SEQ_REPORT", "ONCOTREE_CODE",
"SAMPLE_TYPE", "SEQ_ASSAY_ID", 'SEQ_DATE', 'SAMPLE_TYPE_DETAILED',
'SEQ_YEAR']
clinical_template = pd.DataFrame(columns=sample_cols)
# patient = False
sampledf = pd.DataFrame(dict(
SAMPLE_ID=["ID1-1", "ID2-1", "ID3-1", "ID4-1", "ID5-1"],
PATIENT_ID=["ID1", "ID2", "ID3", "ID4", "ID5"],
Age_AT_SEQ_REPORT=[100000, 100000, 100000, 100000, 100000],
ONCOTree_CODE=['AMPCA', ' UNKNOWN', 'AMPCA', 'AMPCA', 'AMPCA'],
SAMPLE_TYPE=[1, 2, 3, 4, 4],
SEQ_ASSAY_ID=['SAGE-1', 'SAGE-1', 'SAGE-1', 'SAGE-1', 'SAGE-1'],
SEQ_DATE=['Jan-2012', 'Apr-2013', 'JUL-2014', 'Oct-2015', 'release']))
new_sampledf = clin_class._process(sampledf, clinical_template)
assert new_sampledf.columns.isin(expected_sampledf.columns).all()
assert expected_sampledf.equals(new_sampledf[expected_sampledf.columns])
def test_perfect__validate():
'''
Test perfect validation
'''
patientdf = pd.DataFrame(dict(
PATIENT_ID=["ID1", "ID2", "ID3", "ID4", "ID5"],
SEX=[1, 2, 1, 2, 99],
PRIMARY_RACE=[1, 2, 3, 4, 99],
SECONDARY_RACE=[1, 2, 3, 4, 99],
TERTIARY_RACE=[1, 2, 3, 4, 99],
ETHNICITY=[1, 2, 3, 4, 99],
BIRTH_YEAR=[1222, "Unknown", 1920, 1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"],
YEAR_DEATH=["Unknown", "Not Collected", "Not Applicable", 1990, 1990],
YEAR_CONTACT=["Unknown", "Not Collected", 1990, 1990, 1990],
INT_CONTACT=['>32485', '<6570', 'Unknown', 'Not Collected', 2000],
INT_DOD=['>32485', '<6570', 'Unknown',
'Not Collected', 'Not Applicable'],
DEAD=[True, False, 'Unknown', 'Not Collected', True]))
sampledf = pd.DataFrame(dict(
SAMPLE_ID=["ID1-1", "ID2-1", "ID3-1", "ID4-1", "ID5-1"],
PATIENT_ID=["ID1", "ID2", "ID3", "ID4", "ID5"],
AGE_AT_SEQ_REPORT=[100000, "Unknown", 20000, 20000, 100000],
ONCOTREE_CODE=['AMPCA', 'AMPCA', 'Unknown', 'AMPCA', 'AMPCA'],
SAMPLE_TYPE=[1, 2, 3, 4, 4],
SEQ_ASSAY_ID=['SAGE-1-1', 'SAGE-SAGE-1', 'SAGE-1', 'SAGE-1', 'SAGE-1'],
SEQ_DATE=['Jan-2013', 'ApR-2013', 'Jul-2013', 'Oct-2013', 'release']))
clinicaldf = patientdf.merge(sampledf, on="PATIENT_ID")
with mock.patch(
"genie.process_functions.get_oncotree_code_mappings",
return_value=onco_map_dict) as mock_get_onco_map:
error, warning = clin_class._validate(clinicaldf, json_oncotreeurl)
mock_get_onco_map.called_once_with(json_oncotreeurl)
assert error == ""
assert warning == ""
def test_nonull__validate():
'''
Test that no null values are allowed in the clinical dataframe
'''
patientdf = pd.DataFrame(dict(
PATIENT_ID=["ID1", "ID2", "ID3", "ID4", "ID5"],
SEX=[1, 2, 1, 2, float('nan')],
PRIMARY_RACE=[1, 2, 3, 4, float('nan')],
SECONDARY_RACE=[1, 2, 3, 4, float('nan')],
TERTIARY_RACE=[1, 2, 3, 4, float('nan')],
ETHNICITY=[1, 2, 3, 4, float('nan')],
BIRTH_YEAR=[float('nan'), "Unknown", 1920, 1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"],
YEAR_DEATH=["Unknown", "Not Collected", float('nan'), 1990, 1990],
YEAR_CONTACT=["Unknown", "Not Collected", float('nan'), 1990, 1990],
INT_CONTACT=['>32485', '<6570', 'Unknown', float('nan'), 2000],
INT_DOD=['>32485', '<6570', 'Unknown',
'Not Collected', float('nan')],
DEAD=[True, False, float('nan'), 'Not Collected', True]))
sampledf = pd.DataFrame(dict(
SAMPLE_ID=["ID1-1", "ID2-1", "ID3-1", "ID4-1", "ID5-1"],
PATIENT_ID=["ID1", "ID2", "ID3", "ID4", "ID5"],
AGE_AT_SEQ_REPORT=[100000, "Unknown", 20000, float('nan'), 100000],
ONCOTREE_CODE=['AMPCA', 'AMPCA', 'Unknown', 'AMPCA', 'AMPCA'],
SAMPLE_TYPE=[1, 2, 3, 4, float('nan')],
SEQ_ASSAY_ID=['SAGE-1-1', 'SAGE-SAGE-1', 'SAGE-1', 'SAGE-1', 'SAGE-1'],
SEQ_DATE=['Jan-2013', 'ApR-2013', 'Jul-2013', 'Oct-2013', 'release']))
clinicaldf = patientdf.merge(sampledf, on="PATIENT_ID")
with mock.patch(
"genie.process_functions.get_oncotree_code_mappings",
return_value=onco_map_dict) as mock_get_onco_map:
error, warning = clin_class._validate(clinicaldf, json_oncotreeurl)
mock_get_onco_map.called_once_with(json_oncotreeurl)
expected_errors = (
"Sample Clinical File: Please double check your "
"AGE_AT_SEQ_REPORT. It must be an integer or 'Unknown'.\n"
"Sample Clinical File: Please double check your SAMPLE_TYPE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Patient Clinical File: Please double check your BIRTH_YEAR "
"column, it must be an integer in YYYY format > {year} or "
"'Unknown'.\n"
"Patient Clinical File: Please double check your YEAR_DEATH "
"column, it must be an integer in YYYY format, 'Unknown', "
"'Not Applicable' or 'Not Collected'.\n"
"Patient Clinical File: Please double check your YEAR_CONTACT "
"column, it must be an integer in YYYY format, 'Unknown' or "
"'Not Collected'.\n"
"Patient Clinical File: Please double check your INT_CONTACT "
"column, it must be an integer, '>32485', '<6570', 'Unknown' "
"or 'Not Collected'.\n"
"Patient Clinical File: Please double check your INT_DOD "
"column, it must be an integer, '>32485', '<6570', 'Unknown', "
"'Not Collected' or 'Not Applicable'.\n"
"Patient Clinical File: Please double check your DEAD column, "
"it must be True, False, 'Unknown' or 'Not Collected'.\n"
"Patient Clinical File: Please double check your PRIMARY_RACE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Patient Clinical File: Please double check your SECONDARY_RACE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Patient Clinical File: Please double check your TERTIARY_RACE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Patient Clinical File: Please double check your SEX column. "
"This column must only be these values: 1, 2, 99\n"
"Patient Clinical File: Please double check your ETHNICITY "
"column. This column must only be these values: 1, 2, 3, 4, 99\n")
assert error == expected_errors.format(year=datetime.datetime.utcnow().year)
assert warning == ""
def test_missingcols__validate():
'''
Test for missing column errors
'''
clinicaldf = pd.DataFrame()
with mock.patch(
"genie.process_functions.get_oncotree_code_mappings",
return_value=onco_map_dict) as mock_get_onco_map:
error, warning = clin_class._validate(clinicaldf, json_oncotreeurl)
mock_get_onco_map.called_once_with(json_oncotreeurl)
expected_errors = (
"Sample Clinical File: Must have SAMPLE_ID column.\n"
"Patient Clinical File: Must have PATIENT_ID column.\n"
"Sample Clinical File: Must have AGE_AT_SEQ_REPORT column.\n"
"Sample Clinical File: Must have ONCOTREE_CODE column.\n"
"Sample Clinical File: Must have SAMPLE_TYPE column.\n"
"Sample Clinical File: Must have SEQ_ASSAY_ID column.\n"
"Sample Clinical File: Must have SEQ_DATE column.\n"
"Patient Clinical File: Must have BIRTH_YEAR column.\n"
"Patient Clinical File: Must have YEAR_DEATH column.\n"
"Patient Clinical File: Must have YEAR_CONTACT column.\n"
"Patient Clinical File: Must have INT_CONTACT column.\n"
"Patient Clinical File: Must have INT_DOD column.\n"
"Patient Clinical File: Must have DEAD column.\n"
"Patient Clinical File: Must have SEX column.\n")
expected_warnings = (
"Patient Clinical File: Doesn't have PRIMARY_RACE column. "
"This column will be added\n"
"Patient Clinical File: Doesn't have SECONDARY_RACE column. "
"This column will be added\n"
"Patient Clinical File: Doesn't have TERTIARY_RACE column. "
"This column will be added\n"
"Patient Clinical File: Doesn't have ETHNICITY column. "
"This column will be added\n")
print(error)
assert error == expected_errors
assert warning == expected_warnings
def test_errors__validate():
'''
Test for validation errors
'''
sampleDf = pd.DataFrame(dict(
SAMPLE_ID=[float('nan'), "ID2-1", "ID3-1", "ID4-1", "ID5-1"],
PATIENT_ID=["ID6", "ID2", "ID3", float('nan'), "ID5"],
AGE_AT_SEQ_REPORT=[10, 100000, "doo", 100000, 100000],
ONCOTREE_CODE=['AMPCAD', 'TESTIS', 'AMPCA', 'AMPCA', 'UCEC'],
SAMPLE_TYPE=[1, 2, 3, 4, 6],
SEQ_ASSAY_ID=[float('nan'), 'Sage-1', 'SAGE-1', 'S-SAGE-1', 'SAGE-1'],
SEQ_DATE=['Jane-2013', 'Jan-2013', 'Jan-2013', 'Jan-2013', 'Jan-2013'],
YEAR_DEATH=["Unknown", "Not Collected", "Not Applicable", 19930, 1990],
YEAR_CONTACT=["Unknown", "Not Collected", 1990, 1990, 19940],
INT_CONTACT=['>32485', '<6570', 'Unknown', 'Not Collected', "foobar"],
INT_DOD=['>32485', '<6570', 'Unknown', 'Not Collected', 'dense'],
DEAD=[1, False, 'Unknown', 'Not Collected', 'Not Applicable']))
patientDf = pd.DataFrame(dict(
PATIENT_ID=["ID6", "ID2", "ID3", float("nan"), "ID5"],
SEX=[1, 2, 1, 5, float('nan')],
PRIMARY_RACE=[1, 2, 3, 6, float('nan')],
SECONDARY_RACE=[1, 2, 3, 6, float('nan')],
TERTIARY_RACE=[1, 2, 3, 6, float('nan')],
ETHNICITY=[1, 2, 3, 6, float('nan')],
BIRTH_YEAR=[1990, 1990, datetime.datetime.utcnow().year + 1,
1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"]))
clinicalDf = patientDf.merge(sampleDf, on="PATIENT_ID")
with mock.patch(
"genie.process_functions.get_oncotree_code_mappings",
return_value=onco_map_dict) as mock_get_onco_map:
error, warning = clin_class._validate(clinicalDf, json_oncotreeurl)
mock_get_onco_map.called_once_with(json_oncotreeurl)
expectedErrors = (
"Sample Clinical File: PATIENT_ID's much be contained in the "
"SAMPLE_ID's (ex. SAGE-1 <-> SAGE-1-2)\n"
"Patient Clinical File: All samples must have associated patient "
"information and no null patient ids allowed. "
"These samples are missing patient data: ID4-1\n"
"Sample Clinical File: Please double check your "
"AGE_AT_SEQ_REPORT. It must be an integer or 'Unknown'.\n"
"Sample Clinical File: Please double check that all your "
"ONCOTREE CODES exist in the mapping. You have 1 samples that "
"don't map. These are the codes that don't map: AMPCAD\n"
"Sample Clinical File: Please double check your SAMPLE_TYPE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Sample Clinical File: Please double check your SEQ_ASSAY_ID "
"columns, there are empty rows.\n"
"Sample Clinical File: Please make sure your SEQ_ASSAY_IDs start "
"with your center abbreviation: S-SAGE-1.\n"
"Sample Clinical File: SEQ_DATE must be one of five values- "
"For Jan-March: use Jan-YEAR. "
"For Apr-June: use Apr-YEAR. "
"For July-Sep: use Jul-YEAR. "
"For Oct-Dec: use Oct-YEAR. (ie. Apr-2017) "
"For values that don't have SEQ_DATES that you want "
"released use 'release'.\n"
"Patient Clinical File: Please double check your BIRTH_YEAR "
"column, it must be an integer in YYYY format > {year} or "
"'Unknown'.\n"
"Patient Clinical File: Please double check your YEAR_DEATH "
"column, it must be an integer in YYYY format, 'Unknown', "
"'Not Applicable' or 'Not Collected'.\n"
"Patient Clinical File: Please double check your YEAR_CONTACT "
"column, it must be an integer in YYYY format, 'Unknown' or "
"'Not Collected'.\n"
"Patient Clinical File: Please double check your INT_CONTACT "
"column, it must be an integer, '>32485', '<6570', 'Unknown' or "
"'Not Collected'.\n"
"Patient Clinical File: Please double check your INT_DOD column, "
"it must be an integer, '>32485', '<6570', 'Unknown', "
"'Not Collected' or 'Not Applicable'.\n"
"Patient Clinical File: Please double check your DEAD column, "
"it must be True, False, 'Unknown' or 'Not Collected'.\n"
"Patient Clinical File: Please double check your PRIMARY_RACE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Patient Clinical File: Please double check your SECONDARY_RACE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Patient Clinical File: Please double check your TERTIARY_RACE "
"column. This column must only be these values: 1, 2, 3, 4, 99\n"
"Patient Clinical File: Please double check your SEX column. "
"This column must only be these values: 1, 2, 99\n"
"Patient Clinical File: Please double check your ETHNICITY "
"column. This column must only be these values: 1, 2, 3, 4, 99\n")
expectedWarnings = (
"Sample Clinical File: All patients must have associated sample "
"information. These patients are missing sample data: ID6\n"
"Sample Clinical File: Some SAMPLE_IDs have conflicting SEX and "
"ONCOTREE_CODES: ID2-1,ID5-1\n")
assert error == expectedErrors.format(
year=datetime.datetime.utcnow().year)
print(warning)
assert warning == expectedWarnings
def test_duplicated__validate():
'''
Test for duplicated SAMPLE_ID error and and in the case that
both sample and patient
are uploaded, it could be a duplicated PATIENT_ID error
'''
patientDf = pd.DataFrame(dict(
PATIENT_ID=["ID1", "ID1", "ID3", "ID4", "ID5"],
SEX=[1, 2, 1, 2, 99],
PRIMARY_RACE=[1, 2, 3, 4, 99],
SECONDARY_RACE=[1, 2, 3, 4, 99],
TERTIARY_RACE=[1, 2, 3, 4, 99],
ETHNICITY=[1, 2, 3, 4, 99],
BIRTH_YEAR=["Unknown", 1990, 1990, 1990, 1990],
CENTER=["FOO", "FOO", "FOO", "FOO", "FOO"],
YEAR_DEATH=["Unknown", "Not Collected", "Not Applicable",
1990, 1990],
YEAR_CONTACT=["Unknown", "Not Collected", 1990, 1990, 1990],
INT_CONTACT=['>32485', '<6570', 'Unknown', 'Not Collected', 2000],
INT_DOD=['>32485', '<6570', 'Unknown', 'Not Collected',
'Not Applicable'],
DEAD=[True, False, 'Unknown', 'Not Collected', True]))
sampleDf = pd.DataFrame(dict(
SAMPLE_ID=["ID1-1", "ID3-1", "ID4-1", "ID5-1"],
PATIENT_ID=["ID1", "ID3", "ID4", "ID5"],
AGE_AT_SEQ_REPORT=[100000, 100000, 100000, 100000],
ONCOTREE_CODE=['AMPCA', 'UNKNOWN', 'AMPCA', 'AMPCA'],
SAMPLE_TYPE=[1, 3, 4, 4],
SEQ_ASSAY_ID=['SAGE-1-1', 'SAGE-1', 'SAGE-1', 'SAGE-1'],
SEQ_DATE=['Jan-2013', 'Jul-2013', 'Oct-2013', 'release']))
clinicalDf = patientDf.merge(sampleDf, on="PATIENT_ID")
with mock.patch(
"genie.process_functions.get_oncotree_code_mappings",
return_value=onco_map_dict) as mock_get_onco_map:
error, warning = clin_class._validate(clinicalDf, json_oncotreeurl)
mock_get_onco_map.called_once_with(json_oncotreeurl)
expectedErrors = (
"Sample Clinical File: No duplicated SAMPLE_ID allowed.\n"
"If there are no duplicated SAMPLE_IDs, and both sample and "
"patient files are uploaded, then please check to make sure no "
"duplicated PATIENT_IDs exist in the patient clinical file.\n")
assert error == expectedErrors
assert warning == ""
class fake_oncotree():
import json
text = json.dumps({
'TISSUE': {
'children': {
'AMPCA': {
'level': 1,
'mainType': 'Ampullary Cancer',
'name': 'Ampullary Carcinoma',
'children': {
'TESTIS': {
'level': 2,
'mainType': 'Testicular Cancer, NOS',
'name': 'Testis',
'children': []},
'UCEC': {
'level': 2,
'mainType': 'Endometrial Cancer',
'name': 'Endometrial Carcinoma',
'children': []}}}}}})
expected_onco_mapping = {
'AMPCA': {
'CANCER_TYPE': 'Ampullary Cancer',
'CANCER_TYPE_DETAILED': 'Ampullary Carcinoma',
'ONCOTREE_PRIMARY_NODE': 'AMPCA',
'ONCOTREE_SECONDARY_NODE': ''},
'TESTIS': {
'CANCER_TYPE': 'Testicular Cancer, NOS',
'CANCER_TYPE_DETAILED': 'Testis',
'ONCOTREE_PRIMARY_NODE': 'AMPCA',
'ONCOTREE_SECONDARY_NODE': 'TESTIS'},
'UCEC': {
'CANCER_TYPE': 'Endometrial Cancer',
'CANCER_TYPE_DETAILED': 'Endometrial Carcinoma',
'ONCOTREE_PRIMARY_NODE': 'AMPCA',
'ONCOTREE_SECONDARY_NODE': 'UCEC'}}
def test_get_oncotree_code_mappings():
from genie import process_functions
with mock.patch(
"genie.process_functions.retry_get_url",
return_value=fake_oncotree) as retry_get_url:
onco_mapping = \
process_functions.get_oncotree_code_mappings(json_oncotreeurl)
retry_get_url.called_once_with(json_oncotreeurl)
assert onco_mapping == expected_onco_mapping
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A local Partial Component Analysis.
This module defines a PCA to make projections locally or
embedded into your application without needing to send requests to
BigML.io.
This module can help you enormously to
reduce the latency for each prediction and let you use your PCAs offline.
Example usage (assuming that you have previously set up the BIGML_USERNAME
and BIGML_API_KEY environment variables and that you own the
logisticregression/id below):
from bigml.api import BigML
from bigml.pca import PCA
api = BigML()
pca = PCA(
'pca/5026965515526876630001b2')
pca.projection({"petal length": 3, "petal width": 1,
"sepal length": 1, "sepal width": 0.5})
"""
import logging
import math
from bigml.api import FINISHED
from bigml.api import get_status, get_api_connection, get_pca_id
from bigml.util import cast, use_cache, load, NUMERIC
from bigml.basemodel import get_resource_dict
from bigml.modelfields import ModelFields
try:
from bigml.laminar.numpy_ops import dot
except ImportError:
from bigml.laminar.math_ops import dot
LOGGER = logging.getLogger('BigML')
EXPANSION_ATTRIBUTES = {"categorical": "categories", "text": "tag_clouds",
"items": "items"}
CATEGORICAL = "categorical"
def get_terms_array(terms, unique_terms, field, field_id):
""" Returns an array that represents the frequency of terms as ordered
in the reference `terms` parameter.
"""
input_terms = unique_terms.get(field_id, [])
terms_array = [0] * len(terms)
if field['optype'] == CATEGORICAL and \
field["summary"].get("missing_count", 0) > 0:
terms_array.append(int(field_id not in unique_terms))
try:
for term, frequency in input_terms:
index = terms.index(term)
terms_array[index] = frequency
except ValueError:
pass
return terms_array
class PCA(ModelFields):
""" A lightweight wrapper around a PCA.
Uses a BigML remote PCA to build a local version
that can be used to generate projections locally.
"""
def __init__(self, pca, api=None, cache_get=None):
if use_cache(cache_get):
# using a cache to store the model attributes
self.__dict__ = load(get_pca_id(pca), cache_get)
return
self.resource_id = None
self.input_fields = []
self.default_numeric_value = None
self.term_forms = {}
self.tag_clouds = {}
self.dataset_field_types = {}
self.term_analysis = {}
self.categories = {}
self.categories_probabilities = {}
self.items = {}
self.fields = {}
self.item_analysis = {}
self.standardize = None
self.famd_j = 1
api = get_api_connection(api)
self.resource_id, pca = get_resource_dict( \
pca, "pca", api=api)
if 'object' in pca and \
isinstance(pca['object'], dict):
pca = pca['object']
try:
self.input_fields = pca.get("input_fields", [])
self.default_numeric_value = pca.get("default_numeric_value")
self.dataset_field_types = pca.get("dataset_field_types", {})
self.famd_j = 1 if (self.dataset_field_types['categorical'] != \
self.dataset_field_types['total']) else \
self.dataset_field_types['categorical']
except KeyError:
raise ValueError("Failed to find the pca expected "
"JSON structure. Check your arguments.")
if 'pca' in pca and \
isinstance(pca['pca'], dict):
status = get_status(pca)
if 'code' in status and status['code'] == FINISHED:
pca_info = pca[ \
'pca']
fields = pca_info.get('fields', {})
self.fields = fields
if not self.input_fields:
self.input_fields = [ \
field_id for field_id, _ in
sorted(list(self.fields.items()),
key=lambda x: x[1].get("column_number"))]
missing_tokens = pca_info.get("missing_tokens")
for field_id, field in fields.items():
if field["optype"] == "categorical":
probabilities = [probability for _, probability in \
field["summary"]["categories"]]
if field["summary"].get("missing_count", 0) > 0:
probabilities.append(
field["summary"]["missing_count"])
total = float(sum(probabilities))
if total > 0:
probabilities = [probability / total for probability \
in probabilities]
self.categories_probabilities[field_id] = probabilities
ModelFields.__init__(
self, fields,
objective_id=None, categories=True,
numerics=False, missing_tokens=missing_tokens)
self.components = pca_info.get('components')
self.eigenvectors = pca_info.get('eigenvectors')
self.cumulative_variance = pca_info.get('cumulative_variance')
self.text_stats = pca_info.get('text_stats')
self.standardized = pca_info.get('standardized')
self.variance = pca_info.get('variance')
else:
raise Exception("The pca isn't finished yet")
else:
raise Exception("Cannot create the PCA instance."
" Could not find the 'pca' key"
" in the resource:\n\n%s" %
pca)
def projection(self, input_data, max_components=None,
variance_threshold=None, full=False):
"""Returns the projection of input data in the new components
input_data: Input data to be projected
"""
norm_input_data = self.filter_input_data( \
input_data,
add_unused_fields=False)
# Strips affixes for numeric values and casts to the final field type
cast(norm_input_data, self.fields)
# Computes text and categorical field expansion into an input array of
# terms and frequencies
unique_terms = self.get_unique_terms(norm_input_data)
# Creates an input vector with the values for all expanded fields.
# The input mask marks the non-missing or categorical fields
# The `missings` variable is a boolean indicating whether there's
# non-categorical fields missing
input_array, missings, input_mask = self.expand_input(norm_input_data,
unique_terms)
components = self.eigenvectors[:]
if max_components is not None:
components = components[0: max_components]
if variance_threshold is not None:
for index, cumulative in enumerate(self.cumulative_variance):
if cumulative > variance_threshold:
components = components[0: index + 1]
result = [value[0] for value in dot(components, [input_array])]
# if non-categorical fields values are missing in input data
# there's an additional normalization
if missings:
missing_sums = self.missing_factors(input_mask)
for index, value in enumerate(result):
result[index] = value / missing_sums[index] \
if missing_sums[index] > 0 else value
if full:
result = dict(list(zip(["PC%s" % index \
for index in range(1, len(components) + 1)], result)))
return result
def missing_factors(self, input_mask):
"""Returns the factors to divide the PCA values when input
data has missings
"""
sum_eigenvectors = []
for row in self.eigenvectors:
eigenvector = [a * b for a, b in zip(input_mask, row)]
sum_eigenvectors.append(dot([eigenvector], [eigenvector])[0][0])
return sum_eigenvectors
def _get_mean_stdev(self, field, field_id=None, index=None):
"""Returns the quantities to be used as mean and stddev to normalize
"""
if field['optype'] == CATEGORICAL and index is not None:
mean = self.categories_probabilities[field_id][index]
stdev = self.famd_j * math.sqrt(mean * self.famd_j)
return mean, stdev
if field['optype'] == NUMERIC:
return field["summary"]["mean"], \
field["summary"]["standard_deviation"]
return self.text_stats[field_id]['means'][index], \
self.text_stats[field_id]['standard_deviations'][index]
def expand_input(self, input_data, unique_terms):
""" Creates an input array with the values in input_data and
unique_terms and the following rules:
- fields are ordered as input_fields
- numeric fields contain the value or 0 if missing
- categorial fields are one-hot encoded and classes are sorted as
they appear in the field summary. If missing_count > 0 a last
missing element is added set to 1 if the field is missing and o
otherwise
- text and items fields are expanded into their elements as found
in the corresponding summmary information and their values treated
as numerics.
"""
input_array = []
input_mask = []
missings = False
for field_id in self.input_fields:
field = self.fields[field_id]
optype = field["optype"]
if optype == NUMERIC:
input_mask.append(int(field_id in input_data))
if field_id in input_data:
value = input_data.get(field_id, 0)
if self.standardized:
mean, stdev = self._get_mean_stdev(field)
value -= mean
if stdev > 0:
value /= stdev
else:
missings = True
value = 0
input_array.append(value)
else:
terms = getattr(self, EXPANSION_ATTRIBUTES[optype])[field_id]
if field_id in unique_terms:
new_inputs = get_terms_array( \
terms, unique_terms, field, field_id)
input_mask.extend( \
[1] * len(new_inputs))
else:
new_inputs = [0] * len(terms)
if optype != CATEGORICAL:
missings = True
input_mask.extend([0] * len(terms))
else:
input_mask.extend([1] * len(terms))
if field["summary"]["missing_count"] > 0:
new_inputs.append(1)
input_mask.append(1)
if self.standardized:
for index2, frequency in enumerate(new_inputs):
mean, stdev = self._get_mean_stdev( \
field, field_id, index2)
new_inputs[index2] = frequency - mean
if stdev > 0:
new_inputs[index2] /= stdev
# indexes of non-missing values
input_array.extend(new_inputs)
return input_array, missings, input_mask
|
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import datetime
import math
from distutils import version
import pytz
import numpy
import common
import strategy_test
import position_test
from pyalgotrade.barfeed import ninjatraderfeed
from pyalgotrade.barfeed import csvfeed
from pyalgotrade.stratanalyzer import trades
from pyalgotrade import broker
from pyalgotrade.broker import backtesting
def buildUTCDateTime(year, month, day, hour, minute):
ret = datetime.datetime(year, month, day, hour, minute)
ret = pytz.utc.localize(ret)
return ret
class TradesAnalyzerTestCase(common.TestCase):
TestInstrument = "spy"
def __loadBarFeed(self):
ret = ninjatraderfeed.Feed(ninjatraderfeed.Frequency.MINUTE)
barFilter = csvfeed.USEquitiesRTH()
ret.setBarFilter(barFilter)
ret.addBarsFromCSV(TradesAnalyzerTestCase.TestInstrument, common.get_data_file_path("nt-spy-minute-2011.csv"))
return ret
def __createStrategy(self):
barFeed = self.__loadBarFeed()
return strategy_test.TestStrategy(barFeed, 1000)
def __createPositionStrategy(self):
barFeed = self.__loadBarFeed()
return position_test.TestStrategy(barFeed, TradesAnalyzerTestCase.TestInstrument, 1000)
def testNoTrades(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
strat.run()
self.assertTrue(strat.getBroker().getCash() == 1000)
self.assertTrue(stratAnalyzer.getCount() == 0)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(stratAnalyzer.getProfitableCount() == 0)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 0)
def testSomeTrades_Position(self):
strat = self.__createPositionStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Winning trade
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 0), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 16)) # 127.16
# Losing trade
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 30), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 31)) # 127.16
# Winning trade
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 38), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 42)) # 127.26
# Unfinished trade not closed
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 47), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.34
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2) + (127.26 - 127.16) - 127.34, 2))
self.assertTrue(stratAnalyzer.getCount() == 3)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.03)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 2) == 0.07)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=0), 2) == 0.06)
self.assertTrue(stratAnalyzer.getProfitableCount() == 2)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=1), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=0), 2) == 0.04)
self.assertEqual(stratAnalyzer.getPositiveReturns()[0], (127.16 - 127.14) / 127.14)
self.assertEqual(stratAnalyzer.getPositiveReturns()[1], (127.26 - 127.16) / 127.16)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
if version.LooseVersion(numpy.__version__) >= version.LooseVersion("1.6.2"):
self.assertTrue(math.isnan(stratAnalyzer.getLosses().std(ddof=1)))
else:
self.assertTrue(stratAnalyzer.getLosses().std(ddof=1) == 0)
self.assertTrue(stratAnalyzer.getLosses().std(ddof=0) == 0)
self.assertEqual(stratAnalyzer.getNegativeReturns()[0], (127.16 - 127.2) / 127.2)
def testSomeTrades(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Winning trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Losing trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 31), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Winning trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 38), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 42), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.26
# Open trade.
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 47), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.34
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2) + (127.26 - 127.16) - 127.34, 2))
self.assertTrue(stratAnalyzer.getCount() == 3)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.03)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 2) == 0.07)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=0), 2) == 0.06)
self.assertTrue(stratAnalyzer.getProfitableCount() == 2)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=1), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=0), 2) == 0.04)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
if version.LooseVersion(numpy.__version__) >= version.LooseVersion("1.6.2"):
self.assertTrue(math.isnan(stratAnalyzer.getLosses().std(ddof=1)))
else:
self.assertTrue(stratAnalyzer.getLosses().std(ddof=1) == 0)
self.assertTrue(stratAnalyzer.getLosses().std(ddof=0) == 0)
def testSomeTradesWithCommissions(self):
strat = self.__createStrategy()
strat.getBroker().setCommission(backtesting.FixedPerTrade(0.01))
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Losing trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 31), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Winning trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 38), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 42), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.26
# Open trade.
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 47), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.34
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.2) + (127.26 - 127.16) - 127.34 - 0.01*5, 2))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForAllTrades(), numpy.array([0.02, 0.02])))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForProfitableTrades(), numpy.array([0.02])))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForUnprofitableTrades(), numpy.array([0.02])))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForEvenTrades(), numpy.array([])))
def testProportionalCommissionBug(self):
# Regression test for a bug reported by 'Jackson Sam' on 30/Aug/2013.
strat = self.__createStrategy()
strat.getBroker().setCommission(backtesting.FixedPerTrade(0.01))
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# There are 3 trades here:
# Trade 1 (Long)
# Buy 1 @ 127.16 Commission: 0.01
# Sell 1 @ 127.26 Commission: 0.005
# Trade 2 (Short)
# Sell 1 @ 127.26 Commission: 0.005
# Buy 1 @ 127.37 Commission: 0.005
# Trade 3 (Long)
# Buy 1 @ 127.37 Commission: 0.005
# Sell 1 @ 127.4 Commission: 0.01
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 38), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # Fill at 127.16
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 42), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 2) # Fill at 127.26
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 53), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 2) # Fill at 127.37
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 58), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # Fill at 127.4
strat.run()
allReturns = stratAnalyzer.getAllReturns()
self.assertEqual(round(allReturns[0], 6), 0.000668)
self.assertEqual(round(allReturns[1], 6), -0.000943)
self.assertEqual(round(allReturns[2], 6), 0.000118)
def testLongShort(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit long and enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 2) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.02)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
def testLongShort2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.02)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
def testShortLong(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit short and enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 2) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.16) + (127.2 - 127.16), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.04)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.02)
def testShortLong2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.16) + (127.2 - 127.16), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.04)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.02)
def testLong2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Extend long position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 2) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.2 - 127.14) + (127.2 - 127.16), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.1)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.1)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 0)
def testLong3(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 2) # 127.14
# Decrease long position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.2 - 127.14) + (127.16 - 127.14), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.08)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.08)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 0)
def testShort2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Extend short position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 2) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.2) + (127.16 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.1)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.1)
self.assertTrue(stratAnalyzer.getProfitableCount() == 0)
def testShort3(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 2) # 127.14
# Decrease short position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.16) + (127.14 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.08)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.08)
self.assertTrue(stratAnalyzer.getProfitableCount() == 0)
|
|
# Standard library
from math import log
import os
import sys
# Third-party
from astropy.io import fits
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import logsumexp
from scipy.integrate import simps
from scipy.stats import norm
from tqdm import tqdm
import emcee
from emcee.utils import MPIPool
# Project
from gwb.coords import get_tangent_basis
from gwb.data import TGASData
pc_mas_yr_per_km_s = (1*u.km/u.s).to(u.pc*u.mas/u.yr, u.dimensionless_angles()).value
km_s_per_pc_mas_yr = 1/pc_mas_yr_per_km_s
def get_icrs_samples(data, size=1, seed=None):
if seed is not None:
np.random.seed(seed)
all_samples = []
for i in range(len(data)):
star = data[i]
y = np.array([star.parallax.value, star.pmra.value, star.pmdec.value,
star.rv.to(u.km/u.s).value])
Cov = star.get_cov()[2:,2:]
Cov[-1,-1] = star.rv_err.to(u.km/u.s).value**2
all_samples.append(np.random.multivariate_normal(y, Cov, size=size))
all_samples = np.array(all_samples)
ra = np.repeat(data.ra.to(u.radian).value[:,None], size, axis=1)
dec = np.repeat(data.dec.to(u.radian).value[:,None], size, axis=1)
# ra : radian
# dec : radian
# parallax : mas
# pm_ra_cosdec : mas/yr
# pm_dec : mas/yr
# rv : km/s
return np.dstack((ra[...,None], dec[...,None], all_samples))
def ln_dv_pdf(x, sigma):
return 2*np.log(x) - x**2/(4*sigma**2) - 1.2655121234846456 - 3*np.log(sigma)
def comoving_ln_pdf(x, sigma):
""" Normal distribution truncated at 0 (x >= 0) """
x = np.abs(x)
return -0.5*(x/sigma)**2 - 0.22579135264472741 - np.log(sigma)
class MixtureModel:
def __init__(self, data1, data2, n_dv_samples=512, n_dist_grid=5,
field_vdisp=18.*u.km/u.s, name=''):
"""
Parameters
----------
data1 : `~gwb.data.TGASData`
Data for one star in each pair.
data2 : `~gwb.data.TGASData`
Data for the other star in each pair.
"""
assert len(data1) == len(data2)
self.n_data = len(data1)
self.n_dv_samples = int(n_dv_samples)
self.n_dist_grid = int(n_dist_grid)
self.name = name
self._field_vdisp = field_vdisp.to(u.km/u.s).value
self._icrs1 = get_icrs_samples(data1, n_dv_samples)
self._icrs2 = get_icrs_samples(data2, n_dv_samples)
# We can pre-compute the tangent basis matrices given the sky positions
# of each star. We transpose it using swapaxes()
self.M1 = np.swapaxes(get_tangent_basis(self._icrs1[:,0,0],
self._icrs1[:,0,1]), 1, 2)
self.M2 = np.swapaxes(get_tangent_basis(self._icrs2[:,0,0],
self._icrs2[:,0,1]), 1, 2)
# We can also pre-compute sample vectors in proper motion components
# and in radial velocity and make units consistent. To get velocity
# samples we just need to multiply in the distances
self._v1_samples = np.array([self._icrs1[...,3].T * km_s_per_pc_mas_yr,
self._icrs1[...,4].T * km_s_per_pc_mas_yr,
self._icrs1[...,5].T])
self._v2_samples = np.array([self._icrs2[...,3].T * km_s_per_pc_mas_yr,
self._icrs2[...,4].T * km_s_per_pc_mas_yr,
self._icrs2[...,5].T])
self._plx1 = data1.parallax.to(u.mas).value
self._plx1_err = data1.parallax_error.to(u.mas).value
self._plx2 = data2.parallax.to(u.mas).value
self._plx2_err = data2.parallax_error.to(u.mas).value
# The last thing we can cache are the distance grids
self.d1_grids = self._get_d_grids(self._plx1, self._plx1_err,
n_dist_grid)
self.d2_grids = self._get_d_grids(self._plx2, self._plx2_err,
n_dist_grid)
def _get_d_grids(self, plx, plx_err, size=1):
# distance grid ends in pc
d_min = 1000 / (plx + 3*plx_err)
d_max = 1000 / (plx - 3*plx_err)
return np.array([np.linspace(d_min[i], d_max[i], size)
for i in range(self.n_data)])
def get_dv_samples(self, d1, d2):
v1_tmp = self._v1_samples* np.vstack((d1, d1, np.ones_like(d1)))[:,None]
v2_tmp = self._v2_samples* np.vstack((d2, d2, np.ones_like(d2)))[:,None]
v1_samples = np.array([self.M1[n].dot(v1_tmp[...,n])
for n in range(self.n_data)])
v2_samples = np.array([self.M2[n].dot(v2_tmp[...,n])
for n in range(self.n_data)])
return np.linalg.norm(v1_samples - v2_samples, axis=1).T
def ln_likelihood_at_d1d2(self, p, d1, d2):
f = p[0]
dv_samples = self.get_dv_samples(d1, d2)
term1 = comoving_ln_pdf(dv_samples, 1.) + log(f)
term2 = ln_dv_pdf(dv_samples, self._field_vdisp) + log(1-f)
return (logsumexp(term1, axis=0) - log(self.n_dv_samples),
logsumexp(term2, axis=0) - log(self.n_dv_samples))
def ln_likelihood(self, p):
ll_grid1 = np.zeros((self.n_data, self.n_dist_grid, self.n_dist_grid))
ll_grid2 = np.zeros((self.n_data, self.n_dist_grid, self.n_dist_grid))
terms = self.ln_likelihood_at_d1d2(p, self.d1_grids[:,2],
self.d2_grids[:,2])
for i in range(self.n_dist_grid):
for j in range(self.n_dist_grid):
terms = self.ln_likelihood_at_d1d2(p,
self.d1_grids[:,i],
self.d2_grids[:,j])
log_d_pdf = (norm.logpdf(1000 / self.d1_grids[:,i],
self._plx1, self._plx1_err) +
norm.logpdf(1000 / self.d2_grids[:,j],
self._plx2, self._plx2_err))
ll_grid1[:,i,j] = terms[0] + log_d_pdf
ll_grid2[:,i,j] = terms[1] + log_d_pdf
l_grid1 = np.exp(ll_grid1)
lls1 = np.log([simps(simps(l_grid1[n], self.d2_grids[n]),
self.d1_grids[n])
for n in range(self.n_data)])
l_grid2 = np.exp(ll_grid2)
lls2 = np.log([simps(simps(l_grid2[n], self.d2_grids[n]),
self.d2_grids[n])
for n in range(self.n_data)])
return np.logaddexp(lls1, lls2), (lls1, lls2)
def ln_prior(self, p):
f = p[0]
if f <= 0 or f >= 1:
return -np.inf
return 0.
def ln_posterior(self, p):
lp = self.ln_prior(p)
if not np.isfinite(lp):
return -np.inf, None
ll, blobs = self.ln_likelihood(p)
if np.any(np.logical_not(np.isfinite(ll))):
return -np.inf, None
return lp + ll.sum(), blobs
def __call__(self, p):
# print(p[0])
return self.ln_posterior(p)
def plot_posterior(mm):
# Test: plot the posterior curve
lls = []
fs = np.linspace(0.15, 0.7, 32)
for f in tqdm(fs):
ll = mm.ln_likelihood([f])[0].sum()
print(f, ll)
lls.append(ll)
lls = np.array(lls)
plt.plot(fs, np.exp(lls - lls.max()))
plt.show()
def run_emcee(model, pool, chain_file, blobs_file):
n_walkers = 28
n_steps = 1024
n_batches = 8
p0 = np.random.normal(0.5, 1E-3, size=(n_walkers, 1))
sampler = emcee.EnsembleSampler(n_walkers, 1, model)
for batch in range(n_batches):
print("Batch: {0}".format(batch))
pos, *_ = sampler.run_mcmc(p0, n_steps // n_batches)
p0 = pos
np.save('../data/sampler_chain{0}_{1}.npy'.format(batch, model.name),
sampler.chain)
np.save('../data/sampler_blobs{0}_{1}.npy'.format(batch, model.name),
sampler.blobs)
sampler.reset()
# Now collect all the individual files into one...
chains = []
blobs = []
for batch in range(n_batches):
chains.append(np.load('../data/sampler_chain{0}_{1}.npy'
.format(batch, model.name)))
blobs.append(np.load('../data/sampler_blobs{0}_{1}.npy'
.format(batch, model.name)))
chain = np.hstack(chains)
blobs = np.vstack(blobs)
np.save(chain_file, chain)
np.save(blobs_file, blobs)
# Now clean up / delete the files
for batch in range(n_batches):
os.remove('../data/sampler_chain{0}_{1}.npy'.format(batch, model.name))
os.remove('../data/sampler_blobs{0}_{1}.npy'.format(batch, model.name))
def analyze_chain(chain, blobs, probs_file):
# MAGIC NUMBER: index after which walkers are converged
ix = 256
trim_chain = chain[:,ix:]
trim_blobs = blobs[ix:]
# Downsample chains because correlation
flat_f = np.vstack(trim_chain[:,::8])[:,0]
med_f = np.median(flat_f)
std_f = 1.5 * np.median(np.abs(flat_f - med_f))
print('f = {0:.2f} +/- {1:.2f}'.format(med_f, std_f))
# Now we compute the per-pair probability
norm = 0.0
post_prob = np.zeros(blobs.shape[-1])
for i in range(trim_chain.shape[1]): # steps
for j in range(trim_chain.shape[0]): # walkers
ll_fg, ll_bg = trim_blobs[i][j]
post_prob += np.exp(ll_fg - np.logaddexp(ll_fg, ll_bg))
norm += 1
post_prob /= norm
np.save(probs_file, post_prob)
if __name__ == "__main__":
import schwimmbad
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument('--mpi', action='store_true', default=False,
dest='mpi')
parser.add_argument('--sim', action='store_true', default=False,
dest='simulated_data')
parser.add_argument('--name', required=True, dest='name',
help='Name of the data - can be "apw" or "rave"')
args = parser.parse_args()
if args.mpi:
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
else:
pool = schwimmbad.SerialPool()
if args.simulated_data:
print("Loading simulated data")
# Load simulated data
_tbl1 = fits.getdata('../notebooks/data1.fits')
data1 = TGASData(_tbl1, rv=_tbl1['RV']*u.km/u.s,
rv_err=_tbl1['RV_err']*u.km/u.s)
_tbl2 = fits.getdata('../notebooks/data2.fits')
data2 = TGASData(_tbl2, rv=_tbl2['RV']*u.km/u.s,
rv_err=_tbl2['RV_err']*u.km/u.s)
else:
print("Loading real data")
if args.name not in ['apw', 'rave']:
raise ValueError("Invalid name '{0}'".format(args.name))
# Load real data
_tbl1 = fits.getdata('../data/tgas_{0}1.fits'.format(args.name))
data1 = TGASData(_tbl1, rv=_tbl1['RV']*u.km/u.s,
rv_err=_tbl1['RV_err']*u.km/u.s)
_tbl2 = fits.getdata('../data/tgas_{0}2.fits'.format(args.name))
data2 = TGASData(_tbl2, rv=_tbl2['RV']*u.km/u.s,
rv_err=_tbl2['RV_err']*u.km/u.s)
print("Data loaded, creating model...")
mm = MixtureModel(data1, data2, name=args.name, field_vdisp=25.*u.km/u.s)
print("Model created")
# plot_posterior(data1, data2)
chain_file = '../data/sampler_chain_{0}.npy'.format(args.name)
blobs_file = '../data/sampler_blobs_{0}.npy'.format(args.name)
if not os.path.exists(chain_file):
print("Couldn't find cached chain file - starting sampling")
run_emcee(mm, pool, chain_file=chain_file, blobs_file=blobs_file)
pool.close()
analyze_chain(np.load(chain_file),
np.load(blobs_file),
'../data/pair_probs_{0}.npy'.format(args.name))
sys.exit(0)
|
|
import os
import sys
import re
import glob
import copy
import subprocess
"""
args:
- parallel: max number of parallel sessions mobatch will use. default=10.
- bin_path: path, if moshell/mobatch binaries are installed in a
non-standard location.
"""
class Amos:
def __init__(self, **kwargs):
self.bin_path = None
self.moshellbin = None
self.mobatchbin = None
self.parallel = 10
allowed = ('parallel', 'bin_path')
for k, v in kwargs.items():
if not k in allowed:
raise KeyError("Invalid option-key: %s" % k)
setattr(self, k, v)
if not self.moshellbin:
try:
self.moshellbin = self.__amos_location(self.bin_path)
except:
raise RuntimeError('amos or moshell program not found')
if not self.mobatchbin:
try:
self.mobatchbin = self.__amosbatch_location(self.bin_path)
except:
raise RuntimeError('amosbatch or mobatch program not found')
"""
moshell()
send amos command to node, and get results
params:
node name (or ip address)
command string
optional keyword-args (valid amos optional variables only)
returns:
tuple (return-code[0 ok|1 fail], stdout text, stderr text)
"""
def moshell(self, node, cmd, **kwargs):
opts = self.__parse_kwargs(kwargs)
return self.__amos_runner(node, cmd, opts)
"""
mobatch()
send amosbatch(mobatch) commands to nodes, and get result logs.
WARNING! mobatch commands can take a very, very long time to complete,
depending on number of nodes and commands to be run. commands run against
thousands of nodes may take 6-10 hours(or more) to complete!
Also, using over 30 parallel sessions is not recommended.
params:
node list (or path to existing sitefile)
command string (or path to existing mos command file)
optional keyword-args (valid amos optional variables only)
returns:
a list-of-tuples. Each result tuple contains the following:
(node-name, exit-code, path-to-logfile)
"""
def mobatch(self, nodes, cmd, **kwargs):
opts = self.__parse_kwargs(kwargs)
sitefile = None
cmdfile = None
rmv_sitefile = False
rmv_cmdfile = False
if len(nodes) == 1:
# only one node? seems odd. possibly it is a sitefile?
if os.path.isfile(nodes[0]):
sitefile = nodes[0]
# write the sitefile if required
if not sitefile:
rmv_sitefile = True
sitefile = '/tmp/pymobatch.' + str(os.getpid()) + '.sitefile'
fh = open(sitefile, 'w')
for n in nodes:
fh.write(n + "\n")
fh.close()
# write amos commands to a file
if len(cmd) == 1 and os.path.isfile(cmd):
cmdfile = cmd
else:
rmv_cmdfile = True
cmdfile = '/tmp/pymobatch.' + str(os.getpid()) + '.mos'
fh = open(cmdfile, 'w')
atoms = cmd.split(';')
for a in atoms:
fh.write(a.strip() + "\n")
fh.close()
results = self.__amosbatch_runner(sitefile, cmdfile, opts)
if rmv_sitefile:
os.unlink(sitefile)
if rmv_cmdfile:
os.unlink(cmdfile)
return results
"""
__amos_location()
PRIVATE
get full path to either the amos or moshell binary
params:
path to search(optional)
returns:
full path to binary | None
"""
def __amos_location(self, path):
loc = self.__find_possibles(('amos','moshell'), path)
if not loc:
raise
else:
return loc
"""
__amosbatch_location()
PRIVATE
get full path to either the amosbatch or mobatch binary
params:
path to search(optional)
returns:
full path to binary | None
"""
def __amosbatch_location(self, path):
loc = self.__find_possibles(('amosbatch','mobatch'), path)
if not loc:
raise
else:
return loc
"""
__find_possibles()
PRIVATE
return the first binary found from a list of possibles
params:
a list of binary names
a search path (optional)
returns:
full path to binary | None
"""
def __find_possibles(self, possibles, path):
if not possibles or len(possibles) < 1:
return None
if not path:
for p in possibles:
target = self.__which(p)
if target:
return target
else:
for p in possibles:
target = path + "/" + p
if os.path.isfile(target) and os.access(fpath, os.X_OK):
return target
return None
"""
__which()
PRIVATE
duplicates function of unix 'which' command to find a program in the path
params:
a program name
returns:
full path to program | None
"""
def __which(self, program):
fpath, fname = os.path.split(program)
if fpath:
if os.path.isfile(program) and os.access(program, os.X_OK):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return exe_file
return None
"""
__parse_kwargs()
PRIVATE
parse any amos options that were passed in, and filter out invalid options.
See Ericsson Advanced Moshell Scripting user guide for variable information.
params:
a dict
returns:
a dict
"""
def __parse_kwargs(self, kwargs):
if not kwargs:
return None
opts = copy.copy(kwargs)
valid = (
'amos_debug',
'ask_for_attribute_type',
'bldebset_confirmation',
'credential',
'commandlog_path',
'corba_class',
'csnotiflist',
'default_mom',
'del_confirmation',
'dontfollowlist',
'editor',
'fast_lh_threshold',
'fast_cab_threshold',
'ftp_port',
'followlist',
'ftp_timeout',
'http_port',
'inactivity_timeout',
'include_nonpm',
'ip_connection_timeout',
'ip_database',
'ip_inactivity_timeout',
'java_settings_high',
'java_settings_low',
'java_settings_medium',
'keepLmList',
'lt_confirmation',
'loginfo_print',
'logdir', # custom option, not E/// supported. see documentation
'muteFactor',
'nm_credential',
'node_login',
'print_lmid',
'PrintProxyLDN',
'PrintProxySilent',
'prompt_highlight',
'pm_wait',
'pm_logdir',
'sa_credential',
'sa_password',
'secure_ftp',
'secure_port',
'secure_shell',
'set_window_title',
'show_timestamp',
'telnet_port',
'transaction_timeout',
'username',
'xmlmomlist', )
for k, v in opts.items():
if k not in valid:
raise KeyError("Invalid option-key: %s" % k)
return opts
"""
__amos_runner()
PRIVATE
run a moshell/amos command subprocess against a specific node
params:
1. a node name or ipaddress
2. a command string
3. an option dict (optional)
returns:
A tuple. two elements.
(return-code(0=ok, 1=fail), stdout, stderr)
"""
def __amos_runner(self, node, cmd, opts=None):
v = None;
script = [self.moshellbin]
logdir = None
if opts:
atoms = []
for k, v in opts.items():
if k == 'logdir':
logdir = v
continue
else:
atoms.append("=".join((k, str(v))))
v = "-v"
v += ",".join(atoms)
script.append(v)
if logdir:
script.append('-o')
script.append(logdir)
script.append(node)
script.append(cmd)
child = subprocess.Popen(script,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE )
output, errors = child.communicate()
return (child.returncode, output, errors)
"""
__amosbatch_runner()
PRIVATE
run a moshell/amos command against a several nodes in parallel.
the results for a node is the path to the logfile containing the
amos results for that node.
params:
1. a path to a sitefile
2. a command string
3. an option dict (optional)
returns:
A list of tuples:
[(node, rval, results-file), (node, rval, results-file)... ]
On error, returns an empty list
"""
def __amosbatch_runner(self, sitefile, cmdfile, opts=None):
v = None;
logdir = None
script = [self.mobatchbin]
script.append('-p')
script.append(str(self.parallel))
if opts:
atoms = []
for k, v in opts.items():
if k == 'logdir':
logdir = v
continue
else:
atoms.append("=".join((k, str(v))))
v = "-v"
v += ",".join(atoms)
script.append(v)
if logdir:
script.append('-o')
script.append(logdir)
script.append(sitefile)
script.append(cmdfile)
child = subprocess.Popen(script,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE )
output, errors = child.communicate()
if child.returncode:
sys.stderr.write(errors)
return []
# find results of all the logfiles
for line in output.splitlines():
match = re.match(r'Logfiles stored in\s+(.+)', line)
if match:
return self.__amosbatch_result_parser(match.group(1))
raise RuntimeError('could not find amosbatch result path from results')
"""
__amosbatch_result_parser()
PRIVATE
Parse the directory contents of an amosbatch results dir
params:
a path to the amosbatch log dir
returns:
A list of tuples:
[(node, rval, results), (node, rval, results)... ]
"""
def __amosbatch_result_parser(self, path):
results = []
# find results text log, and pull out any nodes that failed to connect
rlog = glob.glob(path + '/*result.txt')[0]
if not rlog:
raise RuntimeError('amosbatch results file not found in ' + path)
nocontact = self.__amosbatch_nocontact_nodes(rlog)
for n in nocontact:
results.append((n, 1, 'no contact'))
# store the path for each node output log
logs = glob.glob(path + '/*log')
for log in logs:
nodename = None
match = re.match(r'^.+/(\S+)\.log', log)
if match:
node = match.group(1)
if node in nocontact:
continue
results.append((node, 0, log))
return results
"""
__amosbatch_nocontact_nodes()
PRIVATE
Parse amosbatch results.txt log for any nodes that could not be reached
params:
a path to the amosbatch results text file
returns:
A list of node names.
An empty list is returned if nothing is found
"""
def __amosbatch_nocontact_nodes(self, fname):
results = []
"""
Look for lines like this:
OK 0m13s PSLEeNB04
OK 0m13s PSLEeNB02
no contact 0m15s PSLEeNB01
"""
fh = open(fname, 'r+')
for line in fh.readlines():
match = re.match(r'^\s*no contact\s+\S+\s+(\S+)\s*$', line)
if match:
results.append(match.group(1))
fh.close()
return results
|
|
# coding=utf-8
"""
test
"""
import logging
import math
from struct import unpack, pack, calcsize
from pycolo import PROTOCOL_VERSION as v
from pycolo.codes import options as refOptions, opt_i, msgType
from pycolo.codes import codes as refCodes
from pycolo.codes import msgType as refType
from pycolo.request import request
class Message:
"""
The Class Message provides the object representation of a CoAP message.
The class is responsible for parsing and serializing the objects from/to
byte arrays.
:param VERSION_BITS:
number of bits used for the encoding of the CoAP version field
:param TYPE_BITS:
number of bits used for the encoding of the message type field
:param OPTION_COUNT_BITS:
number of bits used for the encoding of the option count field
:param CODE_BITS:
number of bits used for the encoding of the request method/response code field
:param ID_BITS:
number of bits used for the encoding of the transaction ID
:param OPTION_DELTA_BITS:
number of bits used for the encoding of the option delta
:param OPTION_LENGTH_BASE_BITS:
number of bits used for the encoding of the base option length field
if all bits in this field are set to one, the extended option length
field is additionally used to encode the option length
:param OPTION_LENGTH_EXTENDED_BITS:
number of bits used for the encoding of the extended option length field
this field is used when all bits in the base option length field
are set to one
:param MAX_OPTION_DELTA:
maximum option delta that can be encoded without using fencepost
options
:param MAX_OPTION_LENGTH_BASE:
maximum option length that can be encoded using the base option
length field only
:param code: Message code
"""
VERSION_BITS = 2
TYPE_BITS = 2
OPTION_COUNT_BITS = 4
CODE_BITS = 8
ID_BITS = 16
OPTION_DELTA_BITS = 4
OPTION_LENGTH_BASE_BITS = 4
OPTION_LENGTH_EXTENDED_BITS = 8
MAX_OPTION_DELTA = (1 << OPTION_DELTA_BITS) - 1
MAX_OPTION_LENGTH_BASE = (1 << OPTION_LENGTH_BASE_BITS) - 2
# The receiver for this message.
peerAddress = None
# URI
uri = None
# indicates if the message requires a token
# this is required to handle implicit empty tokens (default value)
requiresToken = True
requiresBlockwise = False
def __init__(self,
msg_type=refType.con,
status_code=refCodes.empty,
payload=None,
peerAddress=None,
timestamp=0,
message_id=None,
options={},
version=1):
"""
Constructor for a new CoAP message
:param uri: the URI of the CoAP message
:param type: the type of the CoAP message
:param payload: the payload of the CoAP message
:param code: the code of the CoAP message (See class CodeRegistry)
:param version: The CoAP version used. For now, this must be set to 1.
:param options: The list of header options set for the message.
"""
self.msg_type = msg_type
self.version = version
self.options = options
self.status_code = status_code
self.msg_type = msg_type
self.message_id = message_id
self.payload = payload
self.peerAddress= peerAddress
# self.retransmissioned = False
# self.retransmissioned = 0
# # A time stamp associated with the message.
# self.timestamp = timestamp # TODO: Attention aux initialisations.
def is_reply(self):
"""
:return:
"""
return self.msg_type == refType.ack or self.isReset()
def is_emptyACK(self):
"""
:return:
"""
return self.msg_type == refType.ack and self.status_code == refCodes.empty
def new_accept(self):
"""
Creates a new ACK message with peer address and MID matching to this message.
:return: A new ACK message
"""
return Message(
peerAddress=self.peerAddress,
msg_type=refType.ack,
status_code=refCodes.empty,
messageID=self.message_id)
def new_reject(self):
"""
Creates a new RST message with peer address and MID matching to this
message.
:return: A new RST message
"""
return Message(
msg_type=refType.rst,
status_code=refCodes.empty,
messageID=self.message_id,
peerAddress=self.peerAddress)
def new_reply(self, ack):
"""
This method creates a matching reply for requests. It is addressed to
the peer and has the same message ID and token.
:param ack set true to send ACK else RST
:param ack:
"""
reply = Message(
messageID=self.message_id,
status_code=refCodes.empty,
peerAddress=self.peerAddress
)
if self.msg_type == self.messageType.CON:
reply.msg_type = self.message_type.ACK if msgType.ack else msgType.RST
else:
reply.msg_type = self.messageType.NON
return reply
def _encode_header(self):
header_format = "!BBH"
token_format = ""
if hasattr(self, "token"):
tkl, token = len(self.token), self.token
token_format = tkl * "B"
else:
tkl, token = 0, b""
msg_type = self.msg_type if hasattr(self, "msg_type") else 0
version_msgType_tkl = v << 6 & 192 | msg_type << 4 & 48 | tkl & 15
header = [pack(header_format, version_msgType_tkl, self.status_code, self.message_id)]
if token_format:
header.append(pack("!" + token_format, token))
return b"".join(header)
def _encode_options(self):
"""
This function is used to dump byte array representation of
a options dictionary.
:return: Encoded bytes array representing options
"""
lastOptionNumber = 0
list_encoded_options = []
for option_number in sorted(self.options):
delta = self.options[option_number]["num"] - lastOptionNumber
list_encoded_options.append(
self.options[option_number]["encoder"](delta, self.options[option_number]))
lastOptionNumber = self.options[option_number]["num"]
return b"".join(list_encoded_options)
def _encode_payload(self):
if hasattr(self, "payload"):
if hasattr(self.payload, "encode"):
return b"\xff" + self.payload.encode("utf-8")
else:
return b""
else:
return b""
def to_raw(self):
"""
Encodes the message into its raw binary representation
as specified in draft from IETF
:return A byte array containing the CoAP encoding of the message
"""
return b"".join([self._encode_header(), self._encode_options(), self._encode_payload()])
def from_raw(self, raw):
"""
Decodes the message from the its binary representation
:param byteArray: CoAP binary form message
"""
PAYLOAD_MARKER = b"\xff"
pointer, last_option = 0, 0
# Header decoding
ver_t_tkl_pattern = "!B"
ver_t_tkl = unpack(ver_t_tkl_pattern, raw[pointer: pointer + calcsize(ver_t_tkl_pattern)])
ver_t_tkl = ver_t_tkl[0]
self.version = ver_t_tkl & 192 >> 6
self.message_type = ver_t_tkl & 48 >> 4
tkl = ver_t_tkl & 15
pointer += calcsize(ver_t_tkl_pattern)
code_pattern = "!B"
code = unpack(code_pattern, raw[pointer: pointer + calcsize(code_pattern)])
self.status_code = code[0]
pointer += calcsize(code_pattern)
message_id_pattern = "!H"
message_id = unpack(message_id_pattern, raw[pointer: pointer + calcsize(message_id_pattern)])
self.message_id = message_id[0]
pointer += calcsize(message_id_pattern)
# Token decoding
if tkl:
token_pattern = "!" + (tkl * "B")
token = unpack(token_pattern, raw[pointer + calcsize(token_pattern)])
self.token = token[0]
pointer += calcsize(token_pattern)
# Options decoding
payload_marker_pattern = "!B"
while raw[pointer: pointer + calcsize(payload_marker_pattern)] != PAYLOAD_MARKER and len(raw[pointer:]):
common_option_pattern = "!B"
option_header = unpack(common_option_pattern, raw[pointer: pointer + calcsize(common_option_pattern)])
raw_delta, raw_length = option_header & 240, option_header & 15
pointer += calcsize(common_option_pattern)
# Delta decoding
if 0 <= raw_delta <= 12:
option_num = raw_delta + last_option
last_option = option_num
elif raw_delta == 13:
delta_pattern_1byte = "!B"
option_num = unpack(delta_pattern_1byte, raw[pointer:pointer + calcsize(delta_pattern_1byte)]) - 13
last_option = option_num
pointer += calcsize(delta_pattern_1byte)
elif raw_delta == 14:
delta_pattern_2bytes = "!2B"
option_num = unpack(delta_pattern_2bytes, raw[pointer:pointer + calcsize(delta_pattern_2bytes)]) - 269
last_option = option_num
pointer += calcsize(delta_pattern_2bytes)
elif raw_delta == 15:
logging.error("Message delta encoding : 15. Reserved for future use.")
return None
# Length decoding
if 0 <= raw_length <= 12:
length = raw_length
elif raw_length == 13:
length_pattern_1byte = "!B"
length = unpack(length_pattern_1byte, raw[pointer:pointer + calcsize(length_pattern_1byte)]) - 13
pointer += calcsize(length_pattern_1byte)
elif raw_length == 14:
length_pattern_2bytes = "!2B"
length = unpack(length_pattern_2bytes, raw[pointer:pointer + calcsize(length_pattern_2bytes)]) - 269
pointer += calcsize(length_pattern_2bytes)
elif raw_length == 15:
logging.error("Message Length encoding : 15. Reserved for future use.")
return None
if length not in opt_i[option_num]["range"]:
logging.error("Option too big. Encoding error")
return None
if not opt_i[option_num]["repeat"]:
self.options[opt_i[option_num]] = opt_i[option_num]["decoder"](raw[pointer:pointer + length])
else:
self.options.setdefault(opt_i[option_num], [])\
.append(opt_i[option_num]["decoder"](raw[pointer:pointer + length]))
pointer += length
# Payload decoding
if len(raw[pointer:]) and raw[pointer] == b"\xff"[0]:
self.payload = raw[pointer + 1:].decode("utf-8")
else:
self.payload = None
return self
def send(self):
"""
:raise:
"""
raise NotImplementedError
# try:
# Communicator.getInstance().sendMessage(self)
# except IOException as e:
# logging.severe("Could not respond to message: %s%s", key(), e.getMessage())
def accept(self):
"""
Accepts this message with an empty ACK. Use this method only at
application level, as the ACK will be sent through the whole stack.
Within the stack use {@link #newAccept()} and send it through the
corresponding {@link UpperLayer#sendMessageOverLowerLayer(Message)}.
"""
raise NotImplementedError
# if self.isConfirmable():
# Message ack = newAccept()
# ack.send()
def key(self):
"""
Returns a string that is assumed to uniquely identify a message.
:return: A string identifying the message
"""
return "%s|%d|%s" % (
self.peerAddress if self.peerAddress else "local",
self.message_id,
self.msg_type)
def transactionKey(self):
"""
Returns a string that is assumed to uniquely identify a transaction.
A transaction matches two buddies that have the same message ID between
one this and the peer endpoint.
:return: A string identifying the transaction
"""
return "%s|%d" % (
self.peerAddress if self.peerAddress else "local",
self.message_id
)
def sequenceKey(self):
"""
Returns a string that is assumed to uniquely identify a transfer. A
transfer exceeds matching message IDs, as multiple transactions are
involved, e.g., for separate responses or blockwise transfers.
The transfer matching is done using the token (including the empty
default token.
:return: A string identifying the transfer
"""
return "%s#%s" % (
self.peerAddress if self.peerAddress else "local",
self.token)
def __str__(self):
header = "==[ CoAP Message ]================================="
info = {
"address": self.peerAddress,
"message ID": self.message_id,
"msg type": self.msg_type,
"status code": self.status_code,
}
# options pprint.pformat(options) <= from pprint import pformat
# Known options will be displayed with their common name attributes.
# for opt in self.options:
# logging.info("%s: %s (%d Bytes)", opt.name, str(opt), len(opt))
#
# logging.info("Payload: %d Bytes", self.payloadSize)
# if payload and isPrintable(self.contentType):
# logging.info(getPayloadString())
footer = "======================================================="
return "".join([header, "\n", str(info), "\n", footer])
|
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tool to restart erroneously downed virtual machines.
This program and set of classes implement a watchdog to restart
virtual machines in a Ganeti cluster that have crashed or been killed
by a node reboot. Run from cron or similar.
"""
import os
import os.path
import sys
import signal
import time
import logging
import errno
from optparse import OptionParser
from ganeti import utils
from ganeti import wconfd
from ganeti import constants
from ganeti import compat
from ganeti import errors
from ganeti import opcodes
from ganeti import cli
import ganeti.rpc.errors as rpcerr
from ganeti import rapi
from ganeti import netutils
from ganeti import qlang
from ganeti import ssconf
from ganeti import ht
from ganeti import pathutils
import ganeti.rapi.client # pylint: disable=W0611
from ganeti.rapi.client import UsesRapiClient
from ganeti.watcher import nodemaint
from ganeti.watcher import state
MAXTRIES = 5
BAD_STATES = compat.UniqueFrozenset([
constants.INSTST_ERRORDOWN,
])
HELPLESS_STATES = compat.UniqueFrozenset([
constants.INSTST_NODEDOWN,
constants.INSTST_NODEOFFLINE,
])
NOTICE = "NOTICE"
ERROR = "ERROR"
#: Number of seconds to wait between starting child processes for node groups
CHILD_PROCESS_DELAY = 1.0
#: How many seconds to wait for instance status file lock
INSTANCE_STATUS_LOCK_TIMEOUT = 10.0
class NotMasterError(errors.GenericError):
"""Exception raised when this host is not the master."""
def ShouldPause():
"""Check whether we should pause.
"""
return bool(utils.ReadWatcherPauseFile(pathutils.WATCHER_PAUSEFILE))
def StartNodeDaemons():
"""Start all the daemons that should be running on all nodes.
"""
# on master or not, try to start the node daemon
utils.EnsureDaemon(constants.NODED)
# start confd as well. On non candidates it will be in disabled mode.
utils.EnsureDaemon(constants.CONFD)
# start mond as well: all nodes need monitoring
if constants.ENABLE_MOND:
utils.EnsureDaemon(constants.MOND)
# start kvmd, which will quit if not needed to run
utils.EnsureDaemon(constants.KVMD)
def RunWatcherHooks():
"""Run the watcher hooks.
"""
hooks_dir = utils.PathJoin(pathutils.HOOKS_BASE_DIR,
constants.HOOKS_NAME_WATCHER)
if not os.path.isdir(hooks_dir):
return
try:
results = utils.RunParts(hooks_dir)
except Exception, err: # pylint: disable=W0703
logging.exception("RunParts %s failed: %s", hooks_dir, err)
return
for (relname, status, runresult) in results:
if status == constants.RUNPARTS_SKIP:
logging.debug("Watcher hook %s: skipped", relname)
elif status == constants.RUNPARTS_ERR:
logging.warning("Watcher hook %s: error (%s)", relname, runresult)
elif status == constants.RUNPARTS_RUN:
if runresult.failed:
logging.warning("Watcher hook %s: failed (exit: %d) (output: %s)",
relname, runresult.exit_code, runresult.output)
else:
logging.debug("Watcher hook %s: success (output: %s)", relname,
runresult.output)
else:
raise errors.ProgrammerError("Unknown status %s returned by RunParts",
status)
class Instance(object):
"""Abstraction for a Virtual Machine instance.
"""
def __init__(self, name, status, config_state, config_state_source,
disks_active, snodes, disk_template):
self.name = name
self.status = status
self.config_state = config_state
self.config_state_source = config_state_source
self.disks_active = disks_active
self.snodes = snodes
self.disk_template = disk_template
def Restart(self, cl):
"""Encapsulates the start of an instance.
"""
op = opcodes.OpInstanceStartup(instance_name=self.name, force=False)
op.reason = [(constants.OPCODE_REASON_SRC_WATCHER,
"Restarting instance %s" % self.name,
utils.EpochNano())]
cli.SubmitOpCode(op, cl=cl)
def ActivateDisks(self, cl):
"""Encapsulates the activation of all disks of an instance.
"""
op = opcodes.OpInstanceActivateDisks(instance_name=self.name)
op.reason = [(constants.OPCODE_REASON_SRC_WATCHER,
"Activating disks for instance %s" % self.name,
utils.EpochNano())]
cli.SubmitOpCode(op, cl=cl)
def NeedsCleanup(self):
"""Determines whether the instance needs cleanup.
Determines whether the instance needs cleanup after having been
shutdown by the user.
@rtype: bool
@return: True if the instance needs cleanup, False otherwise.
"""
return self.status == constants.INSTST_USERDOWN and \
self.config_state != constants.ADMINST_DOWN
class Node(object):
"""Data container representing cluster node.
"""
def __init__(self, name, bootid, offline, secondaries):
"""Initializes this class.
"""
self.name = name
self.bootid = bootid
self.offline = offline
self.secondaries = secondaries
def _CleanupInstance(cl, notepad, inst, locks):
n = notepad.NumberOfCleanupAttempts(inst.name)
if inst.name in locks:
logging.info("Not cleaning up instance '%s', instance is locked",
inst.name)
return
if n > MAXTRIES:
logging.warning("Not cleaning up instance '%s', retries exhausted",
inst.name)
return
logging.info("Instance '%s' was shutdown by the user, cleaning up instance",
inst.name)
op = opcodes.OpInstanceShutdown(instance_name=inst.name,
admin_state_source=constants.USER_SOURCE)
op.reason = [(constants.OPCODE_REASON_SRC_WATCHER,
"Cleaning up instance %s" % inst.name,
utils.EpochNano())]
try:
cli.SubmitOpCode(op, cl=cl)
if notepad.NumberOfCleanupAttempts(inst.name):
notepad.RemoveInstance(inst.name)
except Exception: # pylint: disable=W0703
logging.exception("Error while cleaning up instance '%s'", inst.name)
notepad.RecordCleanupAttempt(inst.name)
def _CheckInstances(cl, notepad, instances, locks):
"""Make a pass over the list of instances, restarting downed ones.
"""
notepad.MaintainInstanceList(instances.keys())
started = set()
for inst in instances.values():
if inst.NeedsCleanup():
_CleanupInstance(cl, notepad, inst, locks)
elif inst.status in BAD_STATES:
n = notepad.NumberOfRestartAttempts(inst.name)
if n > MAXTRIES:
logging.warning("Not restarting instance '%s', retries exhausted",
inst.name)
continue
if n == MAXTRIES:
notepad.RecordRestartAttempt(inst.name)
logging.error("Could not restart instance '%s' after %s attempts,"
" giving up", inst.name, MAXTRIES)
continue
try:
logging.info("Restarting instance '%s' (attempt #%s)",
inst.name, n + 1)
inst.Restart(cl)
except Exception: # pylint: disable=W0703
logging.exception("Error while restarting instance '%s'", inst.name)
else:
started.add(inst.name)
notepad.RecordRestartAttempt(inst.name)
else:
if notepad.NumberOfRestartAttempts(inst.name):
notepad.RemoveInstance(inst.name)
if inst.status not in HELPLESS_STATES:
logging.info("Restart of instance '%s' succeeded", inst.name)
return started
def _CheckDisks(cl, notepad, nodes, instances, started):
"""Check all nodes for restarted ones.
"""
check_nodes = []
for node in nodes.values():
old = notepad.GetNodeBootID(node.name)
if not node.bootid:
# Bad node, not returning a boot id
if not node.offline:
logging.debug("Node '%s' missing boot ID, skipping secondary checks",
node.name)
continue
if old != node.bootid:
# Node's boot ID has changed, probably through a reboot
check_nodes.append(node)
if check_nodes:
# Activate disks for all instances with any of the checked nodes as a
# secondary node.
for node in check_nodes:
for instance_name in node.secondaries:
try:
inst = instances[instance_name]
except KeyError:
logging.info("Can't find instance '%s', maybe it was ignored",
instance_name)
continue
if not inst.disks_active:
logging.info("Skipping disk activation for instance with not"
" activated disks '%s'", inst.name)
continue
if inst.name in started:
# we already tried to start the instance, which should have
# activated its drives (if they can be at all)
logging.debug("Skipping disk activation for instance '%s' as"
" it was already started", inst.name)
continue
try:
logging.info("Activating disks for instance '%s'", inst.name)
inst.ActivateDisks(cl)
except Exception: # pylint: disable=W0703
logging.exception("Error while activating disks for instance '%s'",
inst.name)
# Keep changed boot IDs
for node in check_nodes:
notepad.SetNodeBootID(node.name, node.bootid)
def _CheckForOfflineNodes(nodes, instance):
"""Checks if given instances has any secondary in offline status.
@param instance: The instance object
@return: True if any of the secondary is offline, False otherwise
"""
return compat.any(nodes[node_name].offline for node_name in instance.snodes)
def _GetPendingVerifyDisks(cl, uuid):
"""Checks if there are any currently running or pending group verify jobs and
if so, returns their id.
"""
qfilter = qlang.MakeSimpleFilter("status",
frozenset([constants.JOB_STATUS_RUNNING,
constants.JOB_STATUS_QUEUED,
constants.JOB_STATUS_WAITING]))
qresult = cl.Query(constants.QR_JOB, ["id", "summary"], qfilter)
ids = [jobid for ((_, jobid), (_, (job, ))) in qresult.data
if job == ("GROUP_VERIFY_DISKS(%s)" % uuid)]
return ids
def _VerifyDisks(cl, uuid, nodes, instances, is_strict):
"""Run a per-group "gnt-cluster verify-disks".
"""
existing_jobs = _GetPendingVerifyDisks(cl, uuid)
if existing_jobs:
logging.info("There are verify disks jobs already pending (%s), skipping "
"VerifyDisks step for %s.",
utils.CommaJoin(existing_jobs), uuid)
return
op = opcodes.OpGroupVerifyDisks(
group_name=uuid, priority=constants.OP_PRIO_LOW, is_strict=is_strict)
op.reason = [(constants.OPCODE_REASON_SRC_WATCHER,
"Verifying disks of group %s" % uuid,
utils.EpochNano())]
job_id = cl.SubmitJob([op])
((_, offline_disk_instances, _), ) = \
cli.PollJob(job_id, cl=cl, feedback_fn=logging.debug)
cl.ArchiveJob(job_id)
if not offline_disk_instances:
# nothing to do
logging.debug("Verify-disks reported no offline disks, nothing to do")
return
logging.debug("Will activate disks for instance(s) %s",
utils.CommaJoin(offline_disk_instances))
# We submit only one job, and wait for it. Not optimal, but this puts less
# load on the job queue.
job = []
for name in offline_disk_instances:
try:
inst = instances[name]
except KeyError:
logging.info("Can't find instance '%s', maybe it was ignored", name)
continue
if inst.status in HELPLESS_STATES or _CheckForOfflineNodes(nodes, inst):
logging.info("Skipping instance '%s' because it is in a helpless state"
" or has offline secondaries", name)
continue
op = opcodes.OpInstanceActivateDisks(instance_name=name)
op.reason = [(constants.OPCODE_REASON_SRC_WATCHER,
"Activating disks for instance %s" % name,
utils.EpochNano())]
job.append(op)
if job:
job_id = cli.SendJob(job, cl=cl)
try:
cli.PollJob(job_id, cl=cl, feedback_fn=logging.debug)
except Exception: # pylint: disable=W0703
logging.exception("Error while activating disks")
def IsRapiResponding(hostname):
"""Connects to RAPI port and does a simple test.
Connects to RAPI port of hostname and does a simple test. At this time, the
test is GetVersion.
If RAPI responds with error code "401 Unauthorized", the test is successful,
because the aim of this function is to assess whether RAPI is responding, not
if it is accessible.
@type hostname: string
@param hostname: hostname of the node to connect to.
@rtype: bool
@return: Whether RAPI is working properly
"""
curl_config = rapi.client.GenericCurlConfig()
rapi_client = rapi.client.GanetiRapiClient(hostname,
curl_config_fn=curl_config)
try:
master_version = rapi_client.GetVersion()
except rapi.client.CertificateError, err:
logging.warning("RAPI certificate error: %s", err)
return False
except rapi.client.GanetiApiError, err:
if err.code == 401:
# Unauthorized, but RAPI is alive and responding
return True
else:
logging.warning("RAPI error: %s", err)
return False
else:
logging.debug("Reported RAPI version %s", master_version)
return master_version == constants.RAPI_VERSION
def IsWconfdResponding():
"""Probes an echo RPC to WConfD.
"""
probe_string = "ganeti watcher probe %d" % time.time()
try:
result = wconfd.Client().Echo(probe_string)
except Exception, err: # pylint: disable=W0703
logging.warning("WConfd connection error: %s", err)
return False
if result != probe_string:
logging.warning("WConfd echo('%s') returned '%s'", probe_string, result)
return False
return True
def ParseOptions():
"""Parse the command line options.
@return: (options, args) as from OptionParser.parse_args()
"""
parser = OptionParser(description="Ganeti cluster watcher",
usage="%prog [-d]",
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option(cli.DEBUG_OPT)
parser.add_option(cli.NODEGROUP_OPT)
parser.add_option("-A", "--job-age", dest="job_age", default=6 * 3600,
help="Autoarchive jobs older than this age (default"
" 6 hours)")
parser.add_option("--ignore-pause", dest="ignore_pause", default=False,
action="store_true", help="Ignore cluster pause setting")
parser.add_option("--wait-children", dest="wait_children",
action="store_true", help="Wait for child processes")
parser.add_option("--no-wait-children", dest="wait_children",
action="store_false",
help="Don't wait for child processes")
parser.add_option("--no-verify-disks", dest="no_verify_disks", default=False,
action="store_true", help="Do not verify disk status")
parser.add_option("--no-strict", dest="no_strict",
default=False, action="store_true",
help="Do not run group verify in strict mode")
parser.add_option("--rapi-ip", dest="rapi_ip",
default=constants.IP4_ADDRESS_LOCALHOST,
help="Use this IP to talk to RAPI.")
# See optparse documentation for why default values are not set by options
parser.set_defaults(wait_children=True)
options, args = parser.parse_args()
options.job_age = cli.ParseTimespec(options.job_age)
if args:
parser.error("No arguments expected")
return (options, args)
def _WriteInstanceStatus(filename, data):
"""Writes the per-group instance status file.
The entries are sorted.
@type filename: string
@param filename: Path to instance status file
@type data: list of tuple; (instance name as string, status as string)
@param data: Instance name and status
"""
logging.debug("Updating instance status file '%s' with %s instances",
filename, len(data))
utils.WriteFile(filename,
data="\n".join("%s %s" % (n, s) for (n, s) in sorted(data)))
def _UpdateInstanceStatus(filename, instances):
"""Writes an instance status file from L{Instance} objects.
@type filename: string
@param filename: Path to status file
@type instances: list of L{Instance}
"""
_WriteInstanceStatus(filename, [(inst.name, inst.status)
for inst in instances])
def _ReadInstanceStatus(filename):
"""Reads an instance status file.
@type filename: string
@param filename: Path to status file
@rtype: tuple; (None or number, list of lists containing instance name and
status)
@return: File's mtime and instance status contained in the file; mtime is
C{None} if file can't be read
"""
logging.debug("Reading per-group instance status from '%s'", filename)
statcb = utils.FileStatHelper()
try:
content = utils.ReadFile(filename, preread=statcb)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
logging.error("Can't read '%s', does not exist (yet)", filename)
else:
logging.exception("Unable to read '%s', ignoring", filename)
return (None, None)
else:
return (statcb.st.st_mtime, [line.split(None, 1)
for line in content.splitlines()])
def _MergeInstanceStatus(filename, pergroup_filename, groups):
"""Merges all per-group instance status files into a global one.
@type filename: string
@param filename: Path to global instance status file
@type pergroup_filename: string
@param pergroup_filename: Path to per-group status files, must contain "%s"
to be replaced with group UUID
@type groups: sequence
@param groups: UUIDs of known groups
"""
# Lock global status file in exclusive mode
lock = utils.FileLock.Open(filename)
try:
lock.Exclusive(blocking=True, timeout=INSTANCE_STATUS_LOCK_TIMEOUT)
except errors.LockError, err:
# All per-group processes will lock and update the file. None of them
# should take longer than 10 seconds (the value of
# INSTANCE_STATUS_LOCK_TIMEOUT).
logging.error("Can't acquire lock on instance status file '%s', not"
" updating: %s", filename, err)
return
logging.debug("Acquired exclusive lock on '%s'", filename)
data = {}
# Load instance status from all groups
for group_uuid in groups:
(mtime, instdata) = _ReadInstanceStatus(pergroup_filename % group_uuid)
if mtime is not None:
for (instance_name, status) in instdata:
data.setdefault(instance_name, []).append((mtime, status))
# Select last update based on file mtime
inststatus = [(instance_name, sorted(status, reverse=True)[0][1])
for (instance_name, status) in data.items()]
# Write the global status file. Don't touch file after it's been
# updated--there is no lock anymore.
_WriteInstanceStatus(filename, inststatus)
def GetLuxiClient(try_restart):
"""Tries to connect to the luxi daemon.
@type try_restart: bool
@param try_restart: Whether to attempt to restart the master daemon
"""
try:
return cli.GetClient()
except errors.OpPrereqError, err:
# this is, from cli.GetClient, a not-master case
raise NotMasterError("Not on master node (%s)" % err)
except (rpcerr.NoMasterError, rpcerr.TimeoutError), err:
if not try_restart:
raise
logging.warning("Luxi daemon seems to be down (%s), trying to restart",
err)
if not utils.EnsureDaemon(constants.LUXID):
raise errors.GenericError("Can't start the master daemon")
# Retry the connection
return cli.GetClient()
def _StartGroupChildren(cl, wait):
"""Starts a new instance of the watcher for every node group.
"""
assert not compat.any(arg.startswith(cli.NODEGROUP_OPT_NAME)
for arg in sys.argv)
result = cl.QueryGroups([], ["name", "uuid"], False)
children = []
for (idx, (name, uuid)) in enumerate(result):
if idx > 0:
# Let's not kill the system
time.sleep(CHILD_PROCESS_DELAY)
logging.debug("Spawning child for group %r (%s).", name, uuid)
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
try:
pid = os.fork()
except OSError:
logging.exception("Failed to fork for group %r (%s)", name, uuid)
if pid == 0:
(options, _) = ParseOptions()
options.nodegroup = uuid
_GroupWatcher(options)
return
else:
logging.debug("Started with PID %s", pid)
children.append(pid)
if wait:
for child in children:
logging.debug("Waiting for child PID %s", child)
try:
result = utils.RetryOnSignal(os.waitpid, child, 0)
except EnvironmentError, err:
result = str(err)
logging.debug("Child PID %s exited with status %s", child, result)
def _ArchiveJobs(cl, age):
"""Archives old jobs.
"""
(arch_count, left_count) = cl.AutoArchiveJobs(age)
logging.debug("Archived %s jobs, left %s", arch_count, left_count)
def _CheckMaster(cl):
"""Ensures current host is master node.
"""
(master, ) = cl.QueryConfigValues(["master_node"])
if master != netutils.Hostname.GetSysName():
raise NotMasterError("This is not the master node")
@UsesRapiClient
def _GlobalWatcher(opts):
"""Main function for global watcher.
At the end child processes are spawned for every node group.
"""
StartNodeDaemons()
RunWatcherHooks()
# Run node maintenance in all cases, even if master, so that old masters can
# be properly cleaned up
if nodemaint.NodeMaintenance.ShouldRun(): # pylint: disable=E0602
nodemaint.NodeMaintenance().Exec() # pylint: disable=E0602
try:
client = GetLuxiClient(True)
except NotMasterError:
# Don't proceed on non-master nodes
return constants.EXIT_SUCCESS
# we are on master now
utils.EnsureDaemon(constants.RAPI)
utils.EnsureDaemon(constants.WCONFD)
utils.EnsureDaemon(constants.MAINTD)
# If RAPI isn't responding to queries, try one restart
logging.debug("Attempting to talk to remote API on %s",
opts.rapi_ip)
if not IsRapiResponding(opts.rapi_ip):
logging.warning("Couldn't get answer from remote API, restaring daemon")
utils.StopDaemon(constants.RAPI)
utils.EnsureDaemon(constants.RAPI)
logging.debug("Second attempt to talk to remote API")
if not IsRapiResponding(opts.rapi_ip):
logging.fatal("RAPI is not responding")
logging.debug("Successfully talked to remote API")
# If WConfD isn't responding to queries, try one restart
logging.debug("Attempting to talk to WConfD")
if not IsWconfdResponding():
logging.warning("WConfD not responsive, restarting daemon")
utils.StopDaemon(constants.WCONFD)
utils.EnsureDaemon(constants.WCONFD)
logging.debug("Second attempt to talk to WConfD")
if not IsWconfdResponding():
logging.fatal("WConfD is not responding")
_CheckMaster(client)
_ArchiveJobs(client, opts.job_age)
# Spawn child processes for all node groups
_StartGroupChildren(client, opts.wait_children)
return constants.EXIT_SUCCESS
def _GetGroupData(qcl, uuid):
"""Retrieves instances and nodes per node group.
"""
locks = qcl.Query(constants.QR_LOCK, ["name", "mode"], None)
prefix = "instance/"
prefix_len = len(prefix)
locked_instances = set()
for [[_, name], [_, lock]] in locks.data:
if name.startswith(prefix) and lock:
locked_instances.add(name[prefix_len:])
queries = [
(constants.QR_INSTANCE,
["name", "status", "admin_state", "admin_state_source", "disks_active",
"snodes", "pnode.group.uuid", "snodes.group.uuid", "disk_template"],
[qlang.OP_EQUAL, "pnode.group.uuid", uuid]),
(constants.QR_NODE,
["name", "bootid", "offline"],
[qlang.OP_EQUAL, "group.uuid", uuid]),
]
results_data = [
qcl.Query(what, field, qfilter).data
for (what, field, qfilter) in queries
]
# Ensure results are tuples with two values
assert compat.all(
ht.TListOf(ht.TListOf(ht.TIsLength(2)))(d) for d in results_data)
# Extract values ignoring result status
(raw_instances, raw_nodes) = [[map(compat.snd, values)
for values in res]
for res in results_data]
secondaries = {}
instances = []
# Load all instances
for (name, status, config_state, config_state_source, disks_active, snodes,
pnode_group_uuid, snodes_group_uuid, disk_template) in raw_instances:
if snodes and set([pnode_group_uuid]) != set(snodes_group_uuid):
logging.error("Ignoring split instance '%s', primary group %s, secondary"
" groups %s", name, pnode_group_uuid,
utils.CommaJoin(snodes_group_uuid))
else:
instances.append(Instance(name, status, config_state, config_state_source,
disks_active, snodes, disk_template))
for node in snodes:
secondaries.setdefault(node, set()).add(name)
# Load all nodes
nodes = [Node(name, bootid, offline, secondaries.get(name, set()))
for (name, bootid, offline) in raw_nodes]
return (dict((node.name, node) for node in nodes),
dict((inst.name, inst) for inst in instances),
locked_instances)
def _LoadKnownGroups():
"""Returns a list of all node groups known by L{ssconf}.
"""
groups = ssconf.SimpleStore().GetNodegroupList()
result = list(line.split(None, 1)[0] for line in groups
if line.strip())
if not compat.all(utils.UUID_RE.match(r) for r in result):
raise errors.GenericError("Ssconf contains invalid group UUID")
return result
def _GroupWatcher(opts):
"""Main function for per-group watcher process.
"""
group_uuid = opts.nodegroup.lower()
if not utils.UUID_RE.match(group_uuid):
raise errors.GenericError("Node group parameter (%s) must be given a UUID,"
" got '%s'" %
(cli.NODEGROUP_OPT_NAME, group_uuid))
logging.info("Watcher for node group '%s'", group_uuid)
known_groups = _LoadKnownGroups()
# Check if node group is known
if group_uuid not in known_groups:
raise errors.GenericError("Node group '%s' is not known by ssconf" %
group_uuid)
# Group UUID has been verified and should not contain any dangerous
# characters
state_path = pathutils.WATCHER_GROUP_STATE_FILE % group_uuid
inst_status_path = pathutils.WATCHER_GROUP_INSTANCE_STATUS_FILE % group_uuid
logging.debug("Using state file %s", state_path)
# Group watcher file lock
statefile = state.OpenStateFile(state_path) # pylint: disable=E0602
if not statefile:
return constants.EXIT_FAILURE
notepad = state.WatcherState(statefile) # pylint: disable=E0602
try:
# Connect to master daemon
client = GetLuxiClient(False)
_CheckMaster(client)
(nodes, instances, locks) = _GetGroupData(client, group_uuid)
# Update per-group instance status file
_UpdateInstanceStatus(inst_status_path, instances.values())
_MergeInstanceStatus(pathutils.INSTANCE_STATUS_FILE,
pathutils.WATCHER_GROUP_INSTANCE_STATUS_FILE,
known_groups)
started = _CheckInstances(client, notepad, instances, locks)
_CheckDisks(client, notepad, nodes, instances, started)
except Exception, err:
logging.info("Not updating status file due to failure: %s", err)
raise
else:
# Save changes for next run
notepad.Save(state_path)
notepad.Close()
# Check if the nodegroup only has ext storage type
only_ext = compat.all(i.disk_template == constants.DT_EXT
for i in instances.values())
# We skip current NodeGroup verification if there are only external storage
# devices. Currently we provide an interface for external storage provider
# for disk verification implementations, however current ExtStorageDevice
# does not provide an API for this yet.
#
# This check needs to be revisited if ES_ACTION_VERIFY on ExtStorageDevice
# is implemented.
if not opts.no_verify_disks and not only_ext:
is_strict = not opts.no_strict
_VerifyDisks(client, group_uuid, nodes, instances, is_strict=is_strict)
return constants.EXIT_SUCCESS
def Main():
"""Main function.
"""
(options, _) = ParseOptions()
utils.SetupLogging(pathutils.LOG_WATCHER, sys.argv[0],
debug=options.debug, stderr_logging=options.debug)
if ShouldPause() and not options.ignore_pause:
logging.debug("Pause has been set, exiting")
return constants.EXIT_SUCCESS
# Try to acquire global watcher lock in shared mode.
# In case we are in the global watcher process, this lock will be held by all
# children processes (one for each nodegroup) and will only be released when
# all of them have finished running.
lock = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
try:
lock.Shared(blocking=False)
except (EnvironmentError, errors.LockError), err:
logging.error("Can't acquire lock on %s: %s",
pathutils.WATCHER_LOCK_FILE, err)
return constants.EXIT_SUCCESS
if options.nodegroup is None:
fn = _GlobalWatcher
else:
# Per-nodegroup watcher
fn = _GroupWatcher
try:
return fn(options)
except (SystemExit, KeyboardInterrupt):
raise
except NotMasterError:
logging.debug("Not master, exiting")
return constants.EXIT_NOTMASTER
except errors.ResolverError, err:
logging.error("Cannot resolve hostname '%s', exiting", err.args[0])
return constants.EXIT_NODESETUP_ERROR
except errors.JobQueueFull:
logging.error("Job queue is full, can't query cluster state")
except errors.JobQueueDrainError:
logging.error("Job queue is drained, can't maintain cluster state")
except Exception, err: # pylint: disable=W0703
logging.exception(str(err))
return constants.EXIT_FAILURE
return constants.EXIT_SUCCESS
|
|
from pipes import quote as shell_quote
from characteristic import attributes
from eliot import Message, MessageType, Field
from effect import (
sync_performer, TypeDispatcher, ComposedDispatcher, Effect,
)
from effect.twisted import (
make_twisted_dispatcher,
)
from effect.twisted import (
perform, deferred_performer)
from twisted.conch.endpoints import (
SSHCommandClientEndpoint,
# https://twistedmatrix.com/trac/ticket/7861
_NewConnectionHelper,
# https://twistedmatrix.com/trac/ticket/7862
_ReadFile, ConsoleUI,
)
from twisted.conch.client.knownhosts import KnownHostsFile
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.endpoints import UNIXClientEndpoint, connectProtocol
from twisted.internet.error import ConnectionDone
from twisted.protocols.basic import LineOnlyReceiver
from twisted.python.filepath import FilePath
import os
from flocker.testtools import loop_until
from ._model import Run, Sudo, Put, Comment, RunRemotely
from .._effect import dispatcher as base_dispatcher
from ._monkeypatch import patch_twisted_7672
RUN_OUTPUT_MESSAGE = MessageType(
message_type="flocker.provision.ssh:run:output",
fields=[
Field.for_types(u"line", [bytes], u"The output."),
],
description=u"A line of command output.",
)
def extReceived(self, type, data):
from twisted.conch.ssh.connection import EXTENDED_DATA_STDERR
if type == EXTENDED_DATA_STDERR:
self.dataReceived(data)
@attributes([
"deferred",
"context",
])
class CommandProtocol(LineOnlyReceiver, object):
"""
Protocol that logs the lines of a remote command.
:ivar Deferred deferred: Deferred to fire when the command finishes
If the command finished successfully, will fire with ``None``.
Otherwise, errbacks with the reason.
:ivar Message context: The eliot message context to log.
"""
delimiter = b'\n'
def connectionMade(self):
from functools import partial
self.transport.disconnecting = False
# SSHCommandClientEndpoint doesn't support capturing stderr.
# We patch the SSHChannel to interleave it.
# https://twistedmatrix.com/trac/ticket/7893
self.transport.extReceived = partial(extReceived, self)
def connectionLost(self, reason):
if reason.check(ConnectionDone):
self.deferred.callback(None)
else:
self.deferred.errback(reason)
def lineReceived(self, line):
self.context.bind(
message_type="flocker.provision.ssh:run:output",
line=line,
).write()
@sync_performer
def perform_sudo(dispatcher, intent):
"""
See :py:class:`Sudo`.
"""
return Effect(Run(command='sudo ' + intent.command))
@sync_performer
def perform_put(dispatcher, intent):
"""
See :py:class:`Put`.
"""
return Effect(Run(command='printf -- %s > %s'
% (shell_quote(intent.content),
shell_quote(intent.path))))
@sync_performer
def perform_comment(dispatcher, intent):
"""
See :py:class:`Comment`.
"""
def get_ssh_dispatcher(connection, context):
"""
:param Message context: The eliot message context to log.
:param connection: The SSH connection run commands on.
"""
@deferred_performer
def perform_run(dispatcher, intent):
context.bind(
message_type="flocker.provision.ssh:run",
command=intent.command,
).write()
endpoint = SSHCommandClientEndpoint.existingConnection(
connection, intent.command)
d = Deferred()
connectProtocol(endpoint, CommandProtocol(
deferred=d, context=context))
return d
return TypeDispatcher({
Run: perform_run,
Sudo: perform_sudo,
Put: perform_put,
Comment: perform_comment,
})
def get_connection_helper(address, username, port):
"""
Get a :class:`twisted.conch.endpoints._ISSHConnectionCreator` to connect to
the given remote.
:param bytes address: The address of the remote host to connect to.
:param bytes username: The user to connect as.
:param int port: The port of the ssh server to connect to.
:return _ISSHConnectionCreator:
"""
try:
agentEndpoint = UNIXClientEndpoint(
reactor, os.environ["SSH_AUTH_SOCK"])
except KeyError:
agentEndpoint = None
return _NewConnectionHelper(
reactor, address, port, None, username,
keys=None,
password=None,
agentEndpoint=agentEndpoint,
knownHosts=KnownHostsFile.fromPath(FilePath("/dev/null")),
ui=ConsoleUI(lambda: _ReadFile(b"yes")))
@deferred_performer
@inlineCallbacks
def perform_run_remotely(base_dispatcher, intent):
connection_helper = get_connection_helper(
username=intent.username, address=intent.address, port=intent.port)
context = Message.new(
username=intent.username, address=intent.address, port=intent.port)
def connect():
connection = connection_helper.secureConnection()
connection.addErrback(lambda _: False)
return connection
connection = yield loop_until(connect)
dispatcher = ComposedDispatcher([
get_ssh_dispatcher(
connection=connection,
context=context,
),
base_dispatcher,
])
yield perform(dispatcher, intent.commands)
yield connection_helper.cleanupConnection(
connection, False)
def make_dispatcher(reactor):
patch_twisted_7672()
return ComposedDispatcher([
TypeDispatcher({
RunRemotely: perform_run_remotely,
}),
make_twisted_dispatcher(reactor),
base_dispatcher,
])
|
|
"""Async gunicorn worker for aiohttp.web"""
import asyncio
import logging
import os
import re
import signal
import ssl
import sys
import gunicorn.workers.base as base
from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat
from aiohttp.helpers import AccessLogger, ensure_future
__all__ = ('GunicornWebWorker', 'GunicornUVLoopWebWorker')
class GunicornWebWorker(base.Worker):
DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT
DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default
def __init__(self, *args, **kw): # pragma: no cover
super().__init__(*args, **kw)
self.servers = {}
self.exit_code = 0
def init_process(self):
# create new event_loop after fork
asyncio.get_event_loop().close()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
super().init_process()
def run(self):
self.loop.run_until_complete(self.wsgi.startup())
self._runner = ensure_future(self._run(), loop=self.loop)
try:
self.loop.run_until_complete(self._runner)
finally:
self.loop.close()
sys.exit(self.exit_code)
def make_handler(self, app):
if hasattr(self.cfg, 'debug'):
is_debug = self.cfg.debug
else:
is_debug = self.log.loglevel == logging.DEBUG
return app.make_handler(
logger=self.log,
debug=is_debug,
slow_request_timeout=self.cfg.timeout,
keepalive_timeout=self.cfg.keepalive,
access_log=self.log.access_log,
access_log_format=self._get_valid_log_format(
self.cfg.access_log_format))
@asyncio.coroutine
def close(self):
if self.servers:
servers = self.servers
self.servers = None
# stop accepting connections
for server, handler in servers.items():
self.log.info("Stopping server: %s, connections: %s",
self.pid, len(handler.connections))
server.close()
yield from server.wait_closed()
# send on_shutdown event
yield from self.wsgi.shutdown()
# stop alive connections
tasks = [
handler.finish_connections(
timeout=self.cfg.graceful_timeout / 100 * 95)
for handler in servers.values()]
yield from asyncio.gather(*tasks, loop=self.loop)
# cleanup application
yield from self.wsgi.cleanup()
@asyncio.coroutine
def _run(self):
ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None
for sock in self.sockets:
handler = self.make_handler(self.wsgi)
srv = yield from self.loop.create_server(handler, sock=sock.sock,
ssl=ctx)
self.servers[srv] = handler
# If our parent changed then we shut down.
pid = os.getpid()
try:
while self.alive:
self.notify()
cnt = sum(handler.requests_count
for handler in self.servers.values())
if self.cfg.max_requests and cnt > self.cfg.max_requests:
self.alive = False
self.log.info("Max requests, shutting down: %s", self)
elif pid == os.getpid() and self.ppid != os.getppid():
self.alive = False
self.log.info("Parent changed, shutting down: %s", self)
else:
yield from asyncio.sleep(1.0, loop=self.loop)
except BaseException:
pass
yield from self.close()
def init_signals(self):
# Set up signals through the event loop API.
self.loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,
signal.SIGQUIT, None)
self.loop.add_signal_handler(signal.SIGTERM, self.handle_exit,
signal.SIGTERM, None)
self.loop.add_signal_handler(signal.SIGINT, self.handle_quit,
signal.SIGINT, None)
self.loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,
signal.SIGWINCH, None)
self.loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,
signal.SIGUSR1, None)
self.loop.add_signal_handler(signal.SIGABRT, self.handle_abort,
signal.SIGABRT, None)
# Don't let SIGTERM and SIGUSR1 disturb active requests
# by interrupting system calls
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
def handle_quit(self, sig, frame):
self.alive = False
def handle_abort(self, sig, frame):
self.alive = False
self.exit_code = 1
@staticmethod
def _create_ssl_context(cfg):
""" Creates SSLContext instance for usage in asyncio.create_server.
See ssl.SSLSocket.__init__ for more details.
"""
ctx = ssl.SSLContext(cfg.ssl_version)
ctx.load_cert_chain(cfg.certfile, cfg.keyfile)
ctx.verify_mode = cfg.cert_reqs
if cfg.ca_certs:
ctx.load_verify_locations(cfg.ca_certs)
if cfg.ciphers:
ctx.set_ciphers(cfg.ciphers)
return ctx
def _get_valid_log_format(self, source_format):
if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT:
return self.DEFAULT_AIOHTTP_LOG_FORMAT
elif re.search(r'%\([^\)]+\)', source_format):
raise ValueError(
"Gunicorn's style options in form of `%(name)s` are not "
"supported for the log formatting. Please use aiohttp's "
"format specification to configure access log formatting: "
"http://aiohttp.readthedocs.io/en/stable/logging.html"
"#format-specification"
)
else:
return source_format
class GunicornUVLoopWebWorker(GunicornWebWorker):
def init_process(self):
import uvloop
# Close any existing event loop before setting a
# new policy.
asyncio.get_event_loop().close()
# Setup uvloop policy, so that every
# asyncio.get_event_loop() will create an instance
# of uvloop event loop.
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
super().init_process()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualMachineScaleSetVMsOperations(object):
"""VirtualMachineScaleSetVMsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-12-01"
self.config = config
def _reimage_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reimage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reimage(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Reimages (upgrade the operating system) a specific virtual machine in a
VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reimage_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage'}
def _reimage_all_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reimage_all.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reimage_all(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Allows you to re-image all the disks ( including data disks ) in the a
VM scale set instance. This operation is only supported for managed
disks.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reimage_all_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reimage_all.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall'}
def _deallocate_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.deallocate.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def deallocate(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deallocates a specific virtual machine in a VM scale set. Shuts down
the virtual machine and releases the compute resources it uses. You are
not billed for the compute resources of this virtual machine once it is
deallocated.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._deallocate_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate'}
def _update_initial(
self, resource_group_name, vm_scale_set_name, instance_id, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualMachineScaleSetVM')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetVM', response)
if response.status_code == 202:
deserialized = self._deserialize('VirtualMachineScaleSetVM', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, vm_scale_set_name, instance_id, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual machine of a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the
extension should be create or updated.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param parameters: Parameters supplied to the Update Virtual Machine
Scale Sets VM operation.
:type parameters:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVM
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VirtualMachineScaleSetVM or
ClientRawResponse<VirtualMachineScaleSetVM> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVM]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVM]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualMachineScaleSetVM', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'}
def _delete_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'}
def get(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineScaleSetVM or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVM or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetVM', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'}
def get_instance_view(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Gets the status of a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineScaleSetVMInstanceView or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVMInstanceView
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_instance_view.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetVMInstanceView', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView'}
def list(
self, resource_group_name, virtual_machine_scale_set_name, filter=None, select=None, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all virtual machines in a VM scale sets.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the VM scale set.
:type virtual_machine_scale_set_name: str
:param filter: The filter to apply to the operation.
:type filter: str
:param select: The list parameters.
:type select: str
:param expand: The expand expression to apply to the operation.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualMachineScaleSetVM
:rtype:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVMPaged[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVM]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineScaleSetVMPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineScaleSetVMPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines'}
def _power_off_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.power_off.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def power_off(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Power off (stop) a virtual machine in a VM scale set. Note that
resources are still attached and you are getting charged for the
resources. Instead, use deallocate to release resources and avoid
charges.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._power_off_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff'}
def _restart_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.restart.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def restart(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Restarts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._restart_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart'}
def _start_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.start.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def start(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Starts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._start_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start'}
def _redeploy_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.redeploy.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def redeploy(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Redeploys a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._redeploy_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/redeploy'}
def _perform_maintenance_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.perform_maintenance.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def perform_maintenance(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Performs maintenance on a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_12_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._perform_maintenance_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
perform_maintenance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/performMaintenance'}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import mox
from heat.engine import environment
from heat.tests.v1_1 import fakes
from heat.common import exception
from heat.common import template_format
from heat.engine import parser
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.resources import instance as instances
from heat.engine.resources import network_interface
from heat.engine.resources import nova_utils
from heat.openstack.common import uuidutils
from heat.tests.common import HeatTestCase
from heat.tests import utils
from neutronclient.v2_0 import client as neutronclient
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
class InstancesTest(HeatTestCase):
def setUp(self):
super(InstancesTest, self).setUp()
self.fc = fakes.FakeClient()
utils.setup_dummy_db()
def _setup_test_stack(self, stack_name):
t = template_format.parse(wp_template)
template = parser.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, template,
environment.Environment({'KeyName': 'test'}),
stack_id=str(uuid.uuid4()))
return (t, stack)
def _setup_test_instance(self, return_server, name, image_id=None,
stub_create=True):
stack_name = '%s_s' % name
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['ImageId'] = \
image_id or 'CentOS 5.2'
t['Resources']['WebServer']['Properties']['InstanceType'] = \
'256 MB Server'
instance = instances.Instance(name, t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(instance, 'nova')
instance.nova().MultipleTimes().AndReturn(self.fc)
instance.t = instance.stack.resolve_runtime_data(instance.t)
if stub_create:
# need to resolve the template functions
server_userdata = nova_utils.build_userdata(
instance,
instance.t['Properties']['UserData'])
self.m.StubOutWithMock(nova_utils, 'build_userdata')
nova_utils.build_userdata(
instance,
instance.t['Properties']['UserData']).AndReturn(
server_userdata)
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1, flavor=1, key_name='test',
name=utils.PhysName(
stack_name,
instance.name,
limit=instance.physical_resource_name_limit),
security_groups=None,
userdata=server_userdata, scheduler_hints=None,
meta=None, nics=None, availability_zone=None).AndReturn(
return_server)
return instance
def _create_test_instance(self, return_server, name, stub_create=True):
instance = self._setup_test_instance(return_server, name,
stub_create=stub_create)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
return instance
def test_instance_create(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_create')
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
expected_ip = return_server.networks['public'][0]
self.assertEqual(expected_ip, instance.FnGetAtt('PublicIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateDnsName'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateDnsName'))
self.m.VerifyAll()
def test_instance_create_with_image_id(self):
return_server = self.fc.servers.list()[1]
instance = self._setup_test_instance(return_server,
'in_create_imgid',
image_id='1')
self.m.StubOutWithMock(uuidutils, "is_uuid_like")
uuidutils.is_uuid_like('1').AndReturn(True)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
expected_ip = return_server.networks['public'][0]
self.assertEqual(expected_ip, instance.FnGetAtt('PublicIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PublicDnsName'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateDnsName'))
self.m.VerifyAll()
def test_instance_create_image_name_err(self):
stack_name = 'test_instance_create_image_name_err_stack'
(t, stack) = self._setup_test_stack(stack_name)
# create an instance with non exist image name
t['Resources']['WebServer']['Properties']['ImageId'] = 'Slackware'
instance = instances.Instance('instance_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(instance, 'nova')
instance.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertRaises(exception.ImageNotFound, instance.handle_create)
self.m.VerifyAll()
def test_instance_create_duplicate_image_name_err(self):
stack_name = 'test_instance_create_image_name_err_stack'
(t, stack) = self._setup_test_stack(stack_name)
# create an instance with a non unique image name
t['Resources']['WebServer']['Properties']['ImageId'] = 'CentOS 5.2'
instance = instances.Instance('instance_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(instance, 'nova')
instance.nova().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc.client, "get_images_detail")
self.fc.client.get_images_detail().AndReturn((
200, {'images': [{'id': 1, 'name': 'CentOS 5.2'},
{'id': 4, 'name': 'CentOS 5.2'}]}))
self.m.ReplayAll()
self.assertRaises(exception.PhysicalResourceNameAmbiguity,
instance.handle_create)
self.m.VerifyAll()
def test_instance_create_image_id_err(self):
stack_name = 'test_instance_create_image_id_err_stack'
(t, stack) = self._setup_test_stack(stack_name)
# create an instance with non exist image Id
t['Resources']['WebServer']['Properties']['ImageId'] = '1'
instance = instances.Instance('instance_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(instance, 'nova')
instance.nova().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(uuidutils, "is_uuid_like")
uuidutils.is_uuid_like('1').AndReturn(True)
self.m.StubOutWithMock(self.fc.client, "get_images_1")
self.fc.client.get_images_1().AndRaise(
instances.clients.novaclient.exceptions.NotFound(404))
self.m.ReplayAll()
self.assertRaises(exception.ImageNotFound, instance.handle_create)
self.m.VerifyAll()
class FakeVolumeAttach:
def started(self):
return False
def test_instance_create_unexpected_status(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'test_instance_create')
return_server.get = lambda: None
return_server.status = 'BOGUS'
self.assertRaises(exception.Error,
instance.check_create_complete,
(return_server, self.FakeVolumeAttach()))
def test_instance_create_error_status(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'test_instance_create')
return_server.status = 'ERROR'
return_server.fault = {
'message': 'NoValidHost',
'code': 500,
'created': '2013-08-14T03:12:10Z'
}
self.m.StubOutWithMock(return_server, 'get')
return_server.get()
self.m.ReplayAll()
self.assertRaises(exception.Error,
instance.check_create_complete,
(return_server, self.FakeVolumeAttach()))
self.m.VerifyAll()
def test_instance_create_error_no_fault(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_create')
return_server.status = 'ERROR'
self.m.StubOutWithMock(return_server, 'get')
return_server.get()
self.m.ReplayAll()
e = self.assertRaises(
exception.Error, instance.check_create_complete,
(return_server, self.FakeVolumeAttach()))
self.assertEqual(
'Creation of server sample-server2 failed: Unknown (500)',
str(e))
self.m.VerifyAll()
def test_instance_validate(self):
stack_name = 'test_instance_validate_stack'
(t, stack) = self._setup_test_stack(stack_name)
# create an instance with non exist image Id
t['Resources']['WebServer']['Properties']['ImageId'] = '1'
instance = instances.Instance('instance_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(instance, 'nova')
instance.nova().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(uuidutils, "is_uuid_like")
uuidutils.is_uuid_like('1').AndReturn(True)
self.m.ReplayAll()
self.assertIsNone(instance.validate())
self.m.VerifyAll()
def test_instance_create_delete(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_cr_del')
instance.resource_id = 1234
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndRaise(instances.clients.novaclient.exceptions.NotFound(404))
mox.Replay(get)
scheduler.TaskRunner(instance.delete)()
self.assertIsNone(instance.resource_id)
self.assertEqual((instance.DELETE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_metadata(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'ud_md')
update_template = copy.deepcopy(instance.t)
update_template['Metadata'] = {'test': 123}
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual({'test': 123}, instance.metadata)
def test_instance_update_instance_type(self):
"""
Instance.handle_update supports changing the InstanceType, and makes
the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = 1234
instance = self._create_test_instance(return_server,
'ud_type')
update_template = copy.deepcopy(instance.t)
update_template['Properties']['InstanceType'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(1234).AndReturn(return_server)
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
self.fc.client.post_servers_1234_action(
body={'confirmResize': None}).AndReturn((202, None))
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_instance_type_failed(self):
"""
If the status after a resize is not VERIFY_RESIZE, it means the resize
call failed, so we raise an explicit error.
"""
return_server = self.fc.servers.list()[1]
return_server.id = 1234
instance = self._create_test_instance(return_server,
'ud_type_f')
update_template = copy.deepcopy(instance.t)
update_template['Properties']['InstanceType'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(1234).AndReturn(return_server)
def activate_status(server):
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
self.m.ReplayAll()
updater = scheduler.TaskRunner(instance.update, update_template)
error = self.assertRaises(exception.ResourceFailure, updater)
self.assertEqual(
"Error: Resizing to 'm1.small' failed, status 'ACTIVE'",
str(error))
self.assertEqual((instance.UPDATE, instance.FAILED), instance.state)
self.m.VerifyAll()
def test_instance_update_replace(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_update1')
update_template = copy.deepcopy(instance.t)
update_template['Notallowed'] = {'test': 123}
updater = scheduler.TaskRunner(instance.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
def test_instance_update_properties(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_update2')
update_template = copy.deepcopy(instance.t)
update_template['Properties']['KeyName'] = 'mustreplace'
updater = scheduler.TaskRunner(instance.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
def test_instance_status_build(self):
return_server = self.fc.servers.list()[0]
instance = self._setup_test_instance(return_server,
'in_sts_build')
instance.resource_id = 1234
# Bind fake get method which Instance.check_create_complete will call
def activate_status(server):
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
def test_instance_status_suspend_immediate(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_suspend')
instance.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d['server']['status'] = 'SUSPENDED'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d))
mox.Replay(get)
scheduler.TaskRunner(instance.suspend)()
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_status_resume_immediate(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_resume')
instance.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d['server']['status'] = 'ACTIVE'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d))
mox.Replay(get)
instance.state_set(instance.SUSPEND, instance.COMPLETE)
scheduler.TaskRunner(instance.resume)()
self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_status_suspend_wait(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_suspend_wait')
instance.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED, but
# return the ACTIVE state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'ACTIVE'
d2['server']['status'] = 'SUSPENDED'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndReturn((200, d2))
self.m.ReplayAll()
scheduler.TaskRunner(instance.suspend)()
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_status_resume_wait(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_resume_wait')
instance.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to ACTIVE, but
# return the SUSPENDED state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'SUSPENDED'
d2['server']['status'] = 'ACTIVE'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndReturn((200, d2))
self.m.ReplayAll()
instance.state_set(instance.SUSPEND, instance.COMPLETE)
scheduler.TaskRunner(instance.resume)()
self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_suspend_volumes_step(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_suspend_vol')
instance.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d['server']['status'] = 'SUSPENDED'
# Return a dummy PollingTaskGroup to make check_suspend_complete step
def dummy_detach():
yield
dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
self.m.StubOutWithMock(instance, '_detach_volumes_task')
instance._detach_volumes_task().AndReturn(dummy_tg)
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d))
self.m.ReplayAll()
scheduler.TaskRunner(instance.suspend)()
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_resume_volumes_step(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_resume_vol')
instance.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to ACTIVE
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d['server']['status'] = 'ACTIVE'
# Return a dummy PollingTaskGroup to make check_resume_complete step
def dummy_attach():
yield
dummy_tg = scheduler.PollingTaskGroup([dummy_attach, dummy_attach])
self.m.StubOutWithMock(instance, '_attach_volumes_task')
instance._attach_volumes_task().AndReturn(dummy_tg)
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d))
self.m.ReplayAll()
instance.state_set(instance.SUSPEND, instance.COMPLETE)
scheduler.TaskRunner(instance.resume)()
self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_status_build_spawning(self):
self._test_instance_status_not_build_active('BUILD(SPAWNING)')
def test_instance_status_hard_reboot(self):
self._test_instance_status_not_build_active('HARD_REBOOT')
def test_instance_status_password(self):
self._test_instance_status_not_build_active('PASSWORD')
def test_instance_status_reboot(self):
self._test_instance_status_not_build_active('REBOOT')
def test_instance_status_rescue(self):
self._test_instance_status_not_build_active('RESCUE')
def test_instance_status_resize(self):
self._test_instance_status_not_build_active('RESIZE')
def test_instance_status_revert_resize(self):
self._test_instance_status_not_build_active('REVERT_RESIZE')
def test_instance_status_shutoff(self):
self._test_instance_status_not_build_active('SHUTOFF')
def test_instance_status_suspended(self):
self._test_instance_status_not_build_active('SUSPENDED')
def test_instance_status_verify_resize(self):
self._test_instance_status_not_build_active('VERIFY_RESIZE')
def _test_instance_status_not_build_active(self, uncommon_status):
return_server = self.fc.servers.list()[0]
instance = self._setup_test_instance(return_server,
'in_sts_bld')
instance.resource_id = 1234
# Bind fake get method which Instance.check_create_complete will call
def activate_status(server):
if hasattr(server, '_test_check_iterations'):
server._test_check_iterations += 1
else:
server._test_check_iterations = 1
if server._test_check_iterations == 1:
server.status = uncommon_status
if server._test_check_iterations > 2:
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_build_nics(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'build_nics')
self.assertIsNone(instance._build_nics([]))
self.assertIsNone(instance._build_nics(None))
self.assertEqual([
{'port-id': 'id3'}, {'port-id': 'id1'}, {'port-id': 'id2'}],
instance._build_nics([
'id3', 'id1', 'id2']))
self.assertEqual(
[{'port-id': 'id1'},
{'port-id': 'id2'},
{'port-id': 'id3'}],
instance._build_nics([
{'NetworkInterfaceId': 'id3', 'DeviceIndex': '3'},
{'NetworkInterfaceId': 'id1', 'DeviceIndex': '1'},
{'NetworkInterfaceId': 'id2', 'DeviceIndex': 2},
]))
self.assertEqual(
[{'port-id': 'id1'},
{'port-id': 'id2'},
{'port-id': 'id3'},
{'port-id': 'id4'},
{'port-id': 'id5'}],
instance._build_nics([
{'NetworkInterfaceId': 'id3', 'DeviceIndex': '3'},
{'NetworkInterfaceId': 'id1', 'DeviceIndex': '1'},
{'NetworkInterfaceId': 'id2', 'DeviceIndex': 2},
'id4',
'id5']
))
def test_build_nics_with_security_groups(self):
"""
Test the security groups defined in heat template can be associated
to a new created port.
"""
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'build_nics2')
security_groups = ['security_group_1']
self._test_security_groups(instance, security_groups)
security_groups = ['0389f747-7785-4757-b7bb-2ab07e4b09c3']
self._test_security_groups(instance, security_groups, all_uuids=True)
security_groups = ['0389f747-7785-4757-b7bb-2ab07e4b09c3',
'384ccd91-447c-4d83-832c-06974a7d3d05']
self._test_security_groups(instance, security_groups,
sg='two', all_uuids=True)
security_groups = ['security_group_1',
'384ccd91-447c-4d83-832c-06974a7d3d05']
self._test_security_groups(instance, security_groups, sg='two')
security_groups = ['wrong_group_name']
self._test_security_groups(
instance,
security_groups,
sg='zero',
get_secgroup_raises=exception.PhysicalResourceNotFound)
security_groups = ['wrong_group_name',
'0389f747-7785-4757-b7bb-2ab07e4b09c3']
self._test_security_groups(
instance,
security_groups,
get_secgroup_raises=exception.PhysicalResourceNotFound)
security_groups = ['wrong_group_name', 'security_group_1']
self._test_security_groups(
instance,
security_groups,
get_secgroup_raises=exception.PhysicalResourceNotFound)
security_groups = ['duplicate_group_name', 'security_group_1']
self._test_security_groups(
instance,
security_groups,
get_secgroup_raises=exception.PhysicalResourceNameAmbiguity)
def _test_security_groups(self, instance, security_groups, sg='one',
all_uuids=False, get_secgroup_raises=None):
fake_groups_list, props = self._get_fake_properties(sg)
nclient = neutronclient.Client()
self.m.StubOutWithMock(instance, 'neutron')
instance.neutron().MultipleTimes().AndReturn(nclient)
if not all_uuids:
# list_security_groups only gets called when none of the requested
# groups look like UUIDs.
self.m.StubOutWithMock(
neutronclient.Client, 'list_security_groups')
neutronclient.Client.list_security_groups().AndReturn(
fake_groups_list)
net_interface = network_interface.NetworkInterface
self.m.StubOutWithMock(net_interface, 'network_id_from_subnet_id')
net_interface.network_id_from_subnet_id(
nclient,
'fake_subnet_id').MultipleTimes().AndReturn('fake_network_id')
if not get_secgroup_raises:
self.m.StubOutWithMock(neutronclient.Client, 'create_port')
neutronclient.Client.create_port(
{'port': props}).MultipleTimes().AndReturn(
{'port': {'id': 'fake_port_id'}})
self.m.ReplayAll()
if get_secgroup_raises:
self.assertRaises(get_secgroup_raises, instance._build_nics, None,
security_groups=security_groups,
subnet_id='fake_subnet_id')
else:
self.assertEqual(
[{'port-id': 'fake_port_id'}],
instance._build_nics(None,
security_groups=security_groups,
subnet_id='fake_subnet_id'))
self.m.VerifyAll()
self.m.UnsetStubs()
def _get_fake_properties(self, sg='one'):
fake_groups_list = {
'security_groups': [
{
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'security_group_1',
'security_group_rules': [],
'description': 'no protocol'
},
{
'id': '384ccd91-447c-4d83-832c-06974a7d3d05',
'name': 'security_group_2',
'security_group_rules': [],
'description': 'no protocol'
},
{
'id': 'e91a0007-06a6-4a4a-8edb-1d91315eb0ef',
'name': 'duplicate_group_name',
'security_group_rules': [],
'description': 'no protocol'
},
{
'id': '8be37f3c-176d-4826-aa17-77d1d9df7b2e',
'name': 'duplicate_group_name',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
fixed_ip = {'subnet_id': 'fake_subnet_id'}
props = {
'admin_state_up': True,
'network_id': 'fake_network_id',
'fixed_ips': [fixed_ip],
'security_groups': ['0389f747-7785-4757-b7bb-2ab07e4b09c3']
}
if sg == 'zero':
props['security_groups'] = []
elif sg == 'one':
props['security_groups'] = ['0389f747-7785-4757-b7bb-2ab07e4b09c3']
elif sg == 'two':
props['security_groups'] = ['0389f747-7785-4757-b7bb-2ab07e4b09c3',
'384ccd91-447c-4d83-832c-06974a7d3d05']
return fake_groups_list, props
def test_instance_without_ip_address(self):
return_server = self.fc.servers.list()[3]
instance = self._create_test_instance(return_server,
'wo_ipaddr')
self.assertEqual('0.0.0.0', instance.FnGetAtt('PrivateIp'))
|
|
# Copyright (C) 2012 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests import run_webkit_tests
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_run_results import TestRunResults
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.layout_tests.models.test_results import TestResult
from webkitpy.layout_tests.port.test import TestPort
TestExpectations = test_expectations.TestExpectations
class FakePrinter(object):
num_completed = 0
num_tests = 0
def print_expected(self, run_results, get_tests_with_result_type):
pass
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
pass
def print_started_test(self, test_name):
pass
def print_finished_test(self, result, expected, exp_str, got_str):
pass
def write(self, msg):
pass
def write_update(self, msg):
pass
def flush(self):
pass
class LockCheckingRunner(LayoutTestRunner):
def __init__(self, port, options, printer, tester, http_lock):
super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), lambda test_name: False)
self._finished_list_called = False
self._tester = tester
self._should_have_http_lock = http_lock
def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
if not self._finished_list_called:
self._tester.assertEqual(list_name, 'locked_tests')
self._tester.assertTrue(self._remaining_locked_shards)
self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
super(LockCheckingRunner, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
if not self._finished_list_called:
self._tester.assertEqual(self._remaining_locked_shards, [])
self._tester.assertFalse(self._has_http_lock)
self._finished_list_called = True
class LayoutTestRunnerTests(unittest.TestCase):
def _runner(self, port=None):
# FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0]
options.child_processes = '1'
host = MockHost()
port = port or host.port_factory.get(options.platform, options=options)
return LockCheckingRunner(port, options, FakePrinter(), self, True)
def _run_tests(self, runner, tests):
test_inputs = [TestInput(test, 6000) for test in tests]
expectations = TestExpectations(runner._port, tests)
runner.run_tests(expectations, test_inputs, set(), num_workers=1, retrying=False)
def test_interrupt_if_at_failure_limits(self):
runner = self._runner()
runner._options.exit_after_n_failures = None
runner._options.exit_after_n_crashes_or_times = None
test_names = ['passes/text.html', 'passes/image.html']
runner._test_inputs = [TestInput(test_name, 6000) for test_name in test_names]
run_results = TestRunResults(TestExpectations(runner._port, test_names), len(test_names))
run_results.unexpected_failures = 100
run_results.unexpected_crashes = 50
run_results.unexpected_timeouts = 50
# No exception when the exit_after* options are None.
runner._interrupt_if_at_failure_limits(run_results)
# No exception when we haven't hit the limit yet.
runner._options.exit_after_n_failures = 101
runner._options.exit_after_n_crashes_or_timeouts = 101
runner._interrupt_if_at_failure_limits(run_results)
# Interrupt if we've exceeded either limit:
runner._options.exit_after_n_crashes_or_timeouts = 10
self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)
runner._options.exit_after_n_crashes_or_timeouts = None
runner._options.exit_after_n_failures = 10
exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
def test_update_summary_with_result(self):
# Reftests expected to be image mismatch should be respected when pixel_tests=False.
runner = self._runner()
runner._options.pixel_tests = False
test = 'failures/expected/reftest.html'
expectations = TestExpectations(runner._port, tests=[test])
runner._expectations = expectations
run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
runner._update_summary_with_result(run_results, result)
self.assertEqual(1, run_results.expected)
self.assertEqual(0, run_results.unexpected)
run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[], reftest_type=['=='])
runner._update_summary_with_result(run_results, result)
self.assertEqual(0, run_results.expected)
self.assertEqual(1, run_results.unexpected)
class SharderTests(unittest.TestCase):
test_list = [
"http/tests/websocket/tests/unicode.htm",
"animations/keyframes.html",
"http/tests/security/view-source-no-refresh.html",
"http/tests/websocket/tests/websocket-protocol-ignored.html",
"fast/css/display-none-inline-style-change-crash.html",
"http/tests/xmlhttprequest/supported-xml-content-types.html",
"dom/html/level2/html/HTMLAnchorElement03.html",
"ietestcenter/Javascript/11.1.5_4-4-c-1.html",
"dom/html/level2/html/HTMLAnchorElement06.html",
"perf/object-keys.html",
"virtual/threaded/dir/test.html",
"virtual/threaded/fast/foo/test.html",
]
def get_test_input(self, test_file):
return TestInput(test_file, requires_lock=(test_file.startswith('http') or test_file.startswith('perf')))
def get_shards(self, num_workers, fully_parallel, test_list=None, max_locked_shards=1):
port = TestPort(MockSystemHost())
self.sharder = Sharder(port.split_test, max_locked_shards)
test_list = test_list or self.test_list
return self.sharder.shard_tests([self.get_test_input(test) for test in test_list], num_workers, fully_parallel)
def assert_shards(self, actual_shards, expected_shard_names):
self.assertEqual(len(actual_shards), len(expected_shard_names))
for i, shard in enumerate(actual_shards):
expected_shard_name, expected_test_names = expected_shard_names[i]
self.assertEqual(shard.name, expected_shard_name)
self.assertEqual([test_input.test_name for test_input in shard.test_inputs],
expected_test_names)
def test_shard_by_dir(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False)
# Note that although there are tests in multiple dirs that need locks,
# they are crammed into a single shard in order to reduce the # of
# workers hitting the server at once.
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
self.assert_shards(unlocked,
[('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
('animations', ['animations/keyframes.html']),
('dom/html/level2/html', ['dom/html/level2/html/HTMLAnchorElement03.html',
'dom/html/level2/html/HTMLAnchorElement06.html']),
('fast/css', ['fast/css/display-none-inline-style-change-crash.html']),
('ietestcenter/Javascript', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
def test_shard_every_file(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/websocket/tests/unicode.htm',
'http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/websocket-protocol-ignored.html']),
('locked_shard_2',
['http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])]),
self.assert_shards(unlocked,
[('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
('.', ['animations/keyframes.html']),
('.', ['fast/css/display-none-inline-style-change-crash.html']),
('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
('.', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html']),
('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
def test_shard_in_two(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False)
self.assert_shards(locked,
[('locked_tests',
['http/tests/websocket/tests/unicode.htm',
'http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
self.assert_shards(unlocked,
[('unlocked_tests',
['animations/keyframes.html',
'fast/css/display-none-inline-style-change-crash.html',
'dom/html/level2/html/HTMLAnchorElement03.html',
'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
'dom/html/level2/html/HTMLAnchorElement06.html',
'virtual/threaded/dir/test.html',
'virtual/threaded/fast/foo/test.html'])])
def test_shard_in_two_has_no_locked_shards(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
test_list=['animations/keyframe.html'])
self.assertEqual(len(locked), 0)
self.assertEqual(len(unlocked), 1)
def test_shard_in_two_has_no_unlocked_shards(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
test_list=['http/tests/websocket/tests/unicode.htm'])
self.assertEqual(len(locked), 1)
self.assertEqual(len(unlocked), 0)
def test_multiple_locked_shards(self):
locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html']),
('locked_shard_2',
['http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
|
|
from unittest2 import TestCase
from whitepages.location import WhitePagesLocation
class TestWhitePagesLocation(TestCase):
def setUp(self):
self.basic_input = {
"results": [
{
"id": {
"key": "Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable",
"url": "https://proapi.whitepages.com/2.1/entity/"
"Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable.json?api_key=KEYVAL",
"type": "Location",
"uuid": "efe46385-b057-40c3-8b67-f5a5278e0710",
"durability": "Durable"
},
"type": "Address",
"legal_entities_at": [
{
"id": {
"key": "Person.cf0993f8-ea1a-4fe6-9bae-cbca443a09f2.Durable",
"url": "https://proapi.whitepages.com/2.1/entity/"
"Person.cf0993f8-ea1a-4fe6-9bae-cbca443a09f2.Durable.json?api_key=KEYVAL",
"type": "Person",
"uuid": "cf0993f8-ea1a-4fe6-9bae-cbca443a09f2",
"durability": "Durable"
},
"valid_for": {
"start": {
"year": 2011,
"month": 6,
"day": 28
},
"stop": None
},
"type": "Full",
"names": [
{
"salutation": None,
"first_name": "Roey",
"middle_name": "F",
"last_name": "Horns",
"suffix": None,
"valid_for": None
}
],
"age_range": {
"start": 45,
"end": 49
},
"gender": None,
"locations": [
{
"id": {
"key": "Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable",
"url": "https://proapi.whitepages.com/2.1/"
"entity/Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable.json?"
"api_key=KEYVAL",
"type": "Location",
"uuid": "efe46385-b057-40c3-8b67-f5a5278e0710",
"durability": "Durable"
},
"contact_type": "Home",
"type": "Address",
"legal_entities_at": None,
"city": "Seattle",
"postal_code": "98119",
"zip4": "2043",
"state_code": "WA",
"country_code": "US",
"address": "413 W Howe St, Seattle WA 98119-2043",
"house": "413",
"street_name": "Howe",
"street_type": "St",
"apt_type": None,
"is_receiving_mail": True,
"not_receiving_mail_reason": None,
"usage": "Residential",
"delivery_point": "SingleUnit",
"box_type": None,
"address_type": "Street",
"lat_long": {
"latitude": 47.636105,
"longitude": -122.362549,
"accuracy": "RoofTop"
},
"is_deliverable": True,
"contained_by_locations": None
}
],
"phones": [],
"best_name": "Roey F Horns"
},
{
"id": {
"key": "Person.3c812ed6-4319-44de-b573-c458e4346c9c.Durable",
"url": "https://proapi.whitepages.com/2.1/"
"entity/Person.3c812ed6-4319-44de-b573-c458e4346c9c.Durable.json?api_key=KEYVAL",
"type": "Person",
"uuid": "3c812ed6-4319-44de-b573-c458e4346c9c",
"durability": "Durable"
},
"valid_for": {
"start": {
"year": 2011,
"month": 6,
"day": 28
},
"stop": None
},
"type": "Full",
"names": [
{
"salutation": None,
"first_name": "Andrea",
"middle_name": None,
"last_name": "Horns",
"suffix": None,
"valid_for": None
}
],
"age_range": {
"start": 45,
"end": 49
},
"gender": "Female",
"locations": [
{
"id": {
"key": "Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable",
"url": "https://proapi.whitepages.com/2.1/entity"
"/Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable.json?"
"api_key=KEYVAL",
"type": "Location",
"uuid": "efe46385-b057-40c3-8b67-f5a5278e0710",
"durability": "Durable"
},
"contact_type": "Home",
"type": "Address",
"legal_entities_at": None,
"city": "Seattle",
"postal_code": "98119",
"zip4": "3045",
"state_code": "WA",
"country_code": "US",
"address": "402 W Howe St, Seattle WA 98119-3045",
"house": "402",
"street_name": "Howe",
"street_type": "St",
"apt_type": None,
"is_receiving_mail": True,
"not_receiving_mail_reason": None,
"usage": "Residential",
"delivery_point": "SingleUnit",
"box_type": None,
"address_type": "Street",
"lat_long": {
"latitude": 47.636105,
"longitude": -122.362549,
"accuracy": "RoofTop"
},
"is_deliverable": True,
"contained_by_locations": None
}
],
"phones": [],
"best_name": "Andrea Horns"
},
{
"id": {
"key": "Business.d2a27cbc-4760-49f5-99f3-65cac7911716.Durable",
"url": "https://proapi.whitepages.com/2.1/entity"
"/Business.d2a27cbc-4760-49f5-99f3-65cac7911716.Durable.json?api_key=KEYVAL",
"type": "Business",
"uuid": "d2a27cbc-4760-49f5-99f3-65cac7911716",
"durability": "Durable"
},
"valid_for": None,
"name": "Andrea Horns Photography",
"locations": [
{
"id": {
"key": "Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable",
"url": "https://proapi.whitepages.com/2.1/entity/"
"Location.efe46385-b057-40c3-8b67-f5a5278e0710.Durable.json?"
"api_key=KEYVAL",
"type": "Location",
"uuid": "efe46385-b057-40c3-8b67-f5a5278e0710",
"durability": "Durable"
},
"contact_type": "Business",
"type": "Address",
"legal_entities_at": None,
"city": "Seattle",
"postal_code": "98119",
"zip4": "3045",
"state_code": "WA",
"country_code": "US",
"address": "402 W Howe St, Seattle WA 98119-3045",
"house": "402",
"street_name": "Howe",
"street_type": "St",
"apt_type": None,
"is_receiving_mail": True,
"not_receiving_mail_reason": None,
"usage": "Residential",
"delivery_point": "SingleUnit",
"box_type": None,
"address_type": "Street",
"lat_long": {
"latitude": 47.636105,
"longitude": -122.362549,
"accuracy": "RoofTop"
},
"is_deliverable": True,
"contained_by_locations": None
}
],
"phones": [
{
"id": {
"key": "Phone.c81d6fef-a2df-4b08-cfe3-bc7128b6f5f1.Durable",
"url": "https://proapi.whitepages.com/2.1/entity/"
"Phone.c81d6fef-a2df-4b08-cfe3-bc7128b6f5f1.Durable.json?api_key=KEYVAL",
"type": "Phone",
"uuid": "c81d6fef-a2df-4b08-cfe3-bc7128b6f5f1",
"durability": "Durable"
},
"contact_type": "Business",
"line_type": "Landline",
"belongs_to": None,
"associated_locations": None,
"is_valid": None,
"phone_number": "2063131662",
"country_calling_code": "1",
"extension": None,
"carrier": None,
"do_not_call": None,
"reputation": None,
"is_prepaid": None,
"best_location": None
}
]
}
],
"city": "Seattle",
"postal_code": "98119",
"zip4": "2043",
"state_code": "WA",
"country_code": "US",
"address": "413 W Howe St, Seattle WA 98119-2043",
"house": "413",
"street_name": "Howe",
"street_type": "St",
"apt_type": None,
"is_receiving_mail": True,
"not_receiving_mail_reason": None,
"usage": "Residential",
"delivery_point": "SingleUnit",
"box_type": None,
"address_type": "Street",
"lat_long": {
"latitude": 47.636105,
"longitude": -122.362549,
"accuracy": "RoofTop"
},
"is_deliverable": True
}
],
"messages": []
}
def test_location(self):
location_test = [WhitePagesLocation(location) for location in self['results']]
self.assertEqual(location_test[0].city, 'Seattle')
self.assertEqual(location_test[0].is_deliverable, True)
|
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <[email protected]>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import netscaler
netscaler.__salt__ = {}
class MockJson(Exception):
'''
Mock Json class
'''
@staticmethod
def loads(content):
'''
Mock loads method
'''
return content
@staticmethod
def dumps(dumps):
'''
Mock dumps method
'''
return dumps
class MockNSNitroError(Exception):
'''
Mock NSNitroError class
'''
def __init__(self, message='error'):
self._message = message
super(MockNSNitroError, self).__init__(self.message)
def _get_message(self):
'''
get_message method
'''
return self._message
def _set_message(self, message):
'''
set_message method
'''
self._message = message
message = property(_get_message, _set_message)
class MockNSNitro(object):
'''
Mock NSNitro class
'''
flag = None
def __init__(self, host, user, passwd, bol):
pass
@staticmethod
def login():
'''
Mock login method
'''
return True
@staticmethod
def logout():
'''
Mock logout method
'''
return True
class MockNSServiceGroup(object):
'''
Mock NSServiceGroup class
'''
def __init__(self):
self.sg_name = None
def set_servicegroupname(self, sg_name):
'''
Mock set_servicegroupname method
'''
self.sg_name = sg_name
return MockNSServiceGroup()
@staticmethod
def get(obj, servicegroup):
'''
Mock get method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSServiceGroup()
@staticmethod
def add(obj, servicegroup):
'''
Mock add method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSServiceGroup()
@staticmethod
def delete(obj, servicegroup):
'''
Mock delete method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSServiceGroup()
@staticmethod
def get_servers(obj, servicegroup):
'''
Mock get_servers method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return [MockNSServiceGroup()]
@staticmethod
def enable_server(obj, servicegroup):
'''
Mock enable_server method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSServiceGroup()
@staticmethod
def disable_server(obj, servicegroup):
'''
Mock disable_server method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSServiceGroup()
@staticmethod
def get_servername():
'''
Mock get_servername method
'''
return 'serviceGroupName'
@staticmethod
def get_state():
'''
Mock get_state method
'''
return 'ENABLED'
@staticmethod
def get_servicetype():
'''
Mock get_servicetype method
'''
return ''
@staticmethod
def set_servicetype(bol):
'''
Mock set_servicetype method
'''
return bol
class MockNSServiceGroupServerBinding(object):
'''
Mock NSServiceGroupServerBinding class
'''
def __init__(self):
self.sg_name = None
def set_servername(self, sg_name):
'''
Mock set_servername method
'''
self.sg_name = sg_name
return MockNSServiceGroupServerBinding()
def set_servicegroupname(self, sg_name):
'''
Mock set_servicegroupname method
'''
self.sg_name = sg_name
return MockNSServiceGroupServerBinding()
def set_port(self, sg_name):
'''
Mock set_port method
'''
self.sg_name = sg_name
return MockNSServiceGroupServerBinding()
@staticmethod
def add(obj, servicegroup):
'''
Mock add method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSServiceGroupServerBinding()
@staticmethod
def delete(obj, servicegroup):
'''
Mock delete method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSServiceGroupServerBinding()
class MockNSService(object):
'''
Mock NSService class
'''
def __init__(self):
self.sg_name = None
def set_name(self, sg_name):
'''
Mock set_name method
'''
self.sg_name = sg_name
return MockNSService()
@staticmethod
def get(obj, servicegroup):
'''
Mock get method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSService()
@staticmethod
def enable(obj, servicegroup):
'''
Mock enable method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSService()
@staticmethod
def disable(obj, servicegroup):
'''
Mock disable method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSService()
@staticmethod
def get_svrstate():
'''
Mock get_svrstate method
'''
return 'UP'
class MockNSServer(object):
'''
Mock NSServer class
'''
flag = None
def __init__(self):
self.sg_name = None
def set_name(self, sg_name):
'''
Mock set_name method
'''
self.sg_name = sg_name
return MockNSServer()
@staticmethod
def get(obj, servicegroup):
'''
Mock get method
'''
return MockNSServer()
@staticmethod
def add(obj, servicegroup):
'''
Mock add method
'''
return MockNSServer()
@staticmethod
def delete(obj, servicegroup):
'''
Mock delete method
'''
return MockNSServer()
@staticmethod
def update(obj, servicegroup):
'''
Mock update method
'''
return MockNSServer()
@staticmethod
def enable(obj, servicegroup):
'''
Mock enable method
'''
return MockNSServer()
@staticmethod
def disable(obj, servicegroup):
'''
Mock disable method
'''
return MockNSServer()
@staticmethod
def get_ipaddress():
'''
Mock get_ipaddress method
'''
return ''
@staticmethod
def set_ipaddress(s_ip):
'''
Mock set_ipaddress method
'''
return s_ip
def get_state(self):
'''
Mock get_state method
'''
if self.flag == 1:
return ''
elif self.flag == 2:
return 'DISABLED'
return 'ENABLED'
class MockNSLBVServer(object):
'''
Mock NSLBVServer class
'''
def __init__(self):
self.sg_name = None
def set_name(self, sg_name):
'''
Mock set_name method
'''
self.sg_name = sg_name
return MockNSLBVServer()
@staticmethod
def get(obj, servicegroup):
'''
Mock get method
'''
return MockNSLBVServer()
@staticmethod
def set_ipv46(v_ip):
'''
Mock set_ipv46 method
'''
return v_ip
@staticmethod
def set_port(v_port):
'''
Mock set_port method
'''
return v_port
@staticmethod
def set_servicetype(v_type):
'''
Mock set_servicetype method
'''
return v_type
@staticmethod
def get_ipv46():
'''
Mock get_ipv46 method
'''
return ''
@staticmethod
def get_port():
'''
Mock get_port method
'''
return ''
@staticmethod
def get_servicetype():
'''
Mock get_servicetype method
'''
return ''
@staticmethod
def add(obj, servicegroup):
'''
Mock add method
'''
return MockNSLBVServer()
@staticmethod
def delete(obj, servicegroup):
'''
Mock delete method
'''
return MockNSLBVServer()
class MockNSLBVServerServiceGroupBinding(object):
'''
Mock NSLBVServerServiceGroupBinding class
'''
flag = None
def __init__(self):
self.sg_name = None
def set_name(self, sg_name):
'''
Mock set_name method
'''
self.sg_name = sg_name
return MockNSLBVServerServiceGroupBinding()
@staticmethod
def get(obj, servicegroup):
'''
Mock get method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return [MockNSLBVServerServiceGroupBinding()]
@staticmethod
def get_servicegroupname():
'''
Mock get_servicegroupname method
'''
return 'serviceGroupName'
def set_servicegroupname(self, sg_name):
'''
Mock set_servicegroupname method
'''
self.sg_name = sg_name
if self.flag:
return None
return MockNSLBVServerServiceGroupBinding()
@staticmethod
def add(obj, servicegroup):
'''
Mock add method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSLBVServerServiceGroupBinding()
@staticmethod
def delete(obj, servicegroup):
'''
Mock delete method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSLBVServerServiceGroupBinding()
class MockNSSSLVServerSSLCertKeyBinding(object):
'''
Mock NSSSLVServerSSLCertKeyBinding class
'''
def __init__(self):
self.sg_name = None
def set_vservername(self, sg_name):
'''
Mock set_vservername method
'''
self.sg_name = sg_name
return MockNSSSLVServerSSLCertKeyBinding()
@staticmethod
def get(obj, servicegroup):
'''
Mock get method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return [MockNSSSLVServerSSLCertKeyBinding()]
@staticmethod
def get_certkeyname():
'''
Mock get_certkeyname method
'''
return 'serviceGroupName'
def set_certkeyname(self, sg_name):
'''
Mock set_certkeyname method
'''
self.sg_name = sg_name
return MockNSSSLVServerSSLCertKeyBinding()
@staticmethod
def add(obj, servicegroup):
'''
Mock add method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSSSLVServerSSLCertKeyBinding()
@staticmethod
def delete(obj, servicegroup):
'''
Mock delete method
'''
if MockNSNitro.flag:
raise MockNSNitroError
return MockNSSSLVServerSSLCertKeyBinding()
netscaler.NSNitro = MockNSNitro
netscaler.NSServiceGroup = MockNSServiceGroup
netscaler.NSServiceGroupServerBinding = MockNSServiceGroupServerBinding
netscaler.NSLBVServerServiceGroupBinding = MockNSLBVServerServiceGroupBinding
netscaler.NSService = MockNSService
netscaler.NSServer = MockNSServer
netscaler.NSLBVServer = MockNSLBVServer
netscaler.NSNitroError = MockNSNitroError
netscaler.NSSSLVServerSSLCertKeyBinding = MockNSSSLVServerSSLCertKeyBinding
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NetscalerTestCase(TestCase):
'''
TestCase for salt.modules.netscaler
'''
# 'servicegroup_exists' function tests: 1
def test_servicegroup_exists(self):
'''
Tests if it checks if a service group exists
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
MockNSNitro.flag = None
self.assertTrue(netscaler.servicegroup_exists('serviceGrpName'))
self.assertFalse(netscaler.servicegroup_exists('serviceGrpName',
sg_type='HTTP'))
MockNSNitro.flag = True
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.servicegroup_exists('serGrpNme'))
# 'servicegroup_add' function tests: 1
def test_servicegroup_add(self):
'''
Tests if it add a new service group
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.servicegroup_add('serviceGroupName'))
MockNSNitro.flag = True
self.assertFalse(netscaler.servicegroup_add('serviceGroupName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.servicegroup_add('serveGrpName'))
# 'servicegroup_delete' function tests: 1
def test_servicegroup_delete(self):
'''
Tests if it delete a new service group
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
MockNSNitro.flag = None
self.assertTrue(netscaler.servicegroup_delete('serviceGrpName'))
mock = MagicMock(side_effect=[None, MockNSServiceGroup()])
with patch.object(netscaler, '_servicegroup_get', mock):
MockNSNitro.flag = True
self.assertFalse(netscaler.servicegroup_delete('srGrpName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.servicegroup_delete('sGNam'))
# 'servicegroup_server_exists' function tests: 1
def test_servicegroup_server_exists(self):
'''
Tests if it check if a server:port combination
is a member of a servicegroup
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.servicegroup_server_exists
('serviceGrpName', 'serverName', 'serverPort'))
# 'servicegroup_server_up' function tests: 1
def test_servicegroup_server_up(self):
'''
Tests if it check if a server:port combination
is a member of a servicegroup
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.servicegroup_server_up
('serviceGrpName', 'serverName', 'serverPort'))
# 'servicegroup_server_enable' function tests: 1
def test_servicegroup_server_enable(self):
'''
Tests if it enable a server:port member of a servicegroup
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.servicegroup_server_enable
('serviceGrpName', 'serverName', 'serverPort'))
with patch.object(netscaler, '_servicegroup_get_server',
MagicMock(return_value=MockNSServiceGroup())):
MockNSNitro.flag = None
self.assertTrue(netscaler.servicegroup_server_enable
('servGrpName', 'serverName', 'serPort'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.servicegroup_server_enable
('serGrpName', 'serverName', 'sPort'))
# 'servicegroup_server_disable' function tests: 1
def test_sergrp_server_disable(self):
'''
Tests if it disable a server:port member of a servicegroup
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.servicegroup_server_disable
('serviceGrpName', 'serverName', 'serverPort'))
with patch.object(netscaler, '_servicegroup_get_server',
MagicMock(return_value=MockNSServiceGroup())):
MockNSNitro.flag = None
self.assertTrue(netscaler.servicegroup_server_disable
('serveGrpName', 'serverName', 'serPort'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.servicegroup_server_disable
('servGrpName', 'serverName', 'sPort'))
# 'servicegroup_server_add' function tests: 1
def test_servicegroup_server_add(self):
'''
Tests if it add a server:port member to a servicegroup
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.servicegroup_server_add
('serGrpName', 'serverName', 'sPort'))
MockNSNitro.flag = None
self.assertTrue(netscaler.servicegroup_server_add
('serGrpName', 'serverName', 'serverPort'))
mock = MagicMock(return_value=
MockNSServiceGroupServerBinding())
with patch.object(netscaler, '_servicegroup_get_server',
mock):
MockNSNitro.flag = True
self.assertFalse(netscaler.servicegroup_server_add
('serviceGroupName', 'serverName',
'serPort'))
# 'servicegroup_server_delete' function tests: 1
def test_servicegroup_server_delete(self):
'''
Tests if it remove a server:port member to a servicegroup
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.servicegroup_server_delete
('servGrpName', 'serverName', 'sPort'))
self.assertFalse(netscaler.servicegroup_server_delete
('serviceGroupName', 'serverName',
'serverPort'))
mock = MagicMock(return_value=
MockNSServiceGroupServerBinding())
with patch.object(netscaler, '_servicegroup_get_server',
mock):
MockNSNitro.flag = None
self.assertTrue(netscaler.servicegroup_server_delete
('serviceGroupName', 'serverName',
'serPort'))
# 'service_up' function tests: 1
def test_service_up(self):
'''
Tests if it checks if a service is UP
'''
mock = MagicMock(return_value=MockNSService())
with patch.object(netscaler, '_service_get', mock):
self.assertTrue(netscaler.service_up('serviceGrpName'))
# 'service_exists' function tests: 1
def test_service_exists(self):
'''
Tests if it checks if a service is UP
'''
mock = MagicMock(return_value=MockNSService())
with patch.object(netscaler, '_service_get', mock):
self.assertTrue(netscaler.service_exists('serviceGrpName'))
# 'service_enable' function tests: 1
def test_service_enable(self):
'''
Tests if it enable a service
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.service_enable('serviceGrpName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.service_enable('serviceGrpName'))
mock = MagicMock(return_value=MockNSService())
with patch.object(netscaler, '_service_get', mock):
self.assertFalse(netscaler.service_enable('serGrpName'))
# 'service_disable' function tests: 1
def test_service_disable(self):
'''
Tests if it disable a service
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.service_disable('serviceGrpName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.service_disable('serceGrpName'))
mock = MagicMock(return_value=MockNSService())
with patch.object(netscaler, '_service_get', mock):
self.assertFalse(netscaler.service_disable('seGrpName'))
# 'server_exists' function tests: 1
def test_server_exists(self):
'''
Tests if it checks if a server exists
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.server_exists('serviceGrpName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.server_exists('serviceGrpName'))
self.assertFalse(netscaler.server_exists('serviceGrpName',
ip='1.0.0.1'))
self.assertFalse(netscaler.server_exists('serviceGrpName',
s_state='serverName'))
# 'server_add' function tests: 1
def test_server_add(self):
'''
Tests if it add a server
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.server_add('servGrpName', '1.0.0.1'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.server_add('serviceGrpName',
'1.0.0.1'))
mock = MagicMock(return_value=False)
with patch.object(netscaler, 'server_exists', mock):
self.assertTrue(netscaler.server_add('serviceGrpName',
'1.0.0.1'))
# 'server_delete' function tests: 1
def test_server_delete(self):
'''
Tests if it delete a server
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.server_delete('serviceGrpName'))
mock = MagicMock(side_effect=[MockNSServer(), None])
with patch.object(netscaler, '_server_get', mock):
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.server_delete('serGrpName'))
self.assertFalse(netscaler.server_delete('serviceGrpName'))
# 'server_update' function tests: 1
def test_server_update(self):
'''
Tests if it update a server's attributes
'''
mock = MagicMock(side_effect=[None, MockNSServer(), MockNSServer(),
MockNSServer()])
with patch.object(netscaler, '_server_get', mock):
self.assertFalse(netscaler.server_update('seGrName', '1.0.0.1'))
self.assertFalse(netscaler.server_update('serGrpName', ''))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.server_update('serGrpName',
'1.0.0.1'))
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.server_update('serGrpName',
'1.0.0.1'))
# 'server_enabled' function tests: 1
def test_server_enabled(self):
'''
Tests if it check if a server is enabled globally
'''
mock = MagicMock(return_value=MockNSServer())
with patch.object(netscaler, '_server_get', mock):
MockNSServer.flag = None
self.assertTrue(netscaler.server_enabled('serGrpName'))
# 'server_enable' function tests: 1
def test_server_enable(self):
'''
Tests if it enables a server globally
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.server_enable('serGrpName'))
MockNSServer.flag = 1
self.assertTrue(netscaler.server_enable('serGrpName'))
mock = MagicMock(side_effect=[MockNSServer(), None])
with patch.object(netscaler, '_server_get', mock):
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.server_enable('serGrpName'))
self.assertFalse(netscaler.server_enable('serGrpName'))
# 'server_disable' function tests: 1
def test_server_disable(self):
'''
Tests if it disable a server globally
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.server_disable('serGrpName'))
MockNSServer.flag = 2
self.assertTrue(netscaler.server_disable('serGrpName'))
MockNSServer.flag = None
mock = MagicMock(side_effect=[None, MockNSServer()])
with patch.object(netscaler, '_server_get', mock):
self.assertFalse(netscaler.server_disable('serGrpName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.server_disable('serGrpName'))
# 'vserver_exists' function tests: 1
def test_vserver_exists(self):
'''
Tests if it checks if a vserver exists
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.vserver_exists('vserverName'))
self.assertFalse(netscaler.vserver_exists('vserverName',
v_ip='1.0.0.1'))
self.assertFalse(netscaler.vserver_exists('vserrName', v_ip='',
v_port='vserverPort'))
self.assertFalse(netscaler.vserver_exists('vserrName', v_ip='',
v_port='',
v_type='vserverType'))
mock = MagicMock(return_value=None)
with patch.object(netscaler, '_vserver_get', mock):
self.assertFalse(netscaler.vserver_exists('vserverName'))
# 'vserver_add' function tests: 1
def test_vserver_add(self):
'''
Tests if it add a new lb vserver
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.vserver_add('alex.patate.chaude.443',
'1.2.3.4', '443', 'SSL'))
mock = MagicMock(return_value=False)
with patch.object(netscaler, 'vserver_exists', mock):
self.assertTrue(netscaler.vserver_add('alex.pae.chaude.443',
'1.2.3.4', '443',
'SSL'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.vserver_add('alex.chde.443',
'1.2.3.4', '443',
'SSL'))
# 'vserver_delete' function tests: 1
def test_vserver_delete(self):
'''
Tests if it delete a new lb vserver
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.vserver_delete('alex.pe.chaude.443'))
mock = MagicMock(side_effect=[None, MockNSLBVServer()])
with patch.object(netscaler, '_vserver_get', mock):
self.assertFalse(netscaler.vserver_delete('alex.chade.443'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.vserver_delete('al.cha.443'))
# 'vserver_servicegroup_exists' function tests: 1
def test_vser_sergrp_exists(self):
'''
Tests if it checks if a servicegroup is tied to a vserver
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.vserver_servicegroup_exists
('vserverName', 'serviceGroupName'))
# 'vserver_servicegroup_add' function tests: 1
def test_vserver_servicegroup_add(self):
'''
Tests if it bind a servicegroup to a vserver
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
MockNSNitro.flag = None
self.assertTrue(netscaler.vserver_servicegroup_add
('vserverName', 'serGroupName'))
mock = MagicMock(side_effect=
[MockNSLBVServerServiceGroupBinding(), None])
with patch.object(netscaler, 'vserver_servicegroup_exists',
mock):
self.assertFalse(netscaler.vserver_servicegroup_add
('vserName', 'serGroupName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.vserver_servicegroup_add
('vName', 'serGroupName'))
# 'vserver_servicegroup_delete' function tests: 1
def test_vser_sergrp_delete(self):
'''
Tests if it unbind a servicegroup from a vserver
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertFalse(netscaler.vserver_servicegroup_delete
('vservName', 'serGroupName'))
mock = MagicMock(return_value=
MockNSLBVServerServiceGroupBinding())
with patch.object(netscaler, 'vserver_servicegroup_exists',
mock):
MockNSNitro.flag = None
self.assertTrue(netscaler.vserver_servicegroup_delete
('vName', 'serGroupName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.vserver_servicegroup_delete
('vserverName', 'serGroupName'))
# 'vserver_sslcert_exists' function tests: 1
def test_vserver_sslcert_exists(self):
'''
Tests if it checks if a SSL certificate is tied to a vserver
'''
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
self.assertTrue(netscaler.vserver_sslcert_exists
('vserverName', 'serviceGroupName'))
# 'vserver_sslcert_add' function tests: 1
def test_vserver_sslcert_add(self):
'''
Tests if it binds a SSL certificate to a vserver
'''
mock = MagicMock(side_effect=[MockNSSSLVServerSSLCertKeyBinding(),
None, None])
with patch.object(netscaler, 'vserver_sslcert_exists', mock):
self.assertFalse(netscaler.vserver_sslcert_add
('vserName', 'serGroupName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.vserver_sslcert_add
('vName', 'serGrName'))
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
MockNSNitro.flag = None
self.assertTrue(netscaler.vserver_sslcert_add
('vserverName', 'serGroupName'))
# 'vserver_sslcert_delete' function tests: 1
def test_vserver_sslcert_delete(self):
'''
Tests if it unbinds a SSL certificate from a vserver
'''
mock = MagicMock(side_effect=[None,
MockNSSSLVServerSSLCertKeyBinding(),
MockNSSSLVServerSSLCertKeyBinding()])
with patch.object(netscaler, 'vserver_sslcert_exists', mock):
self.assertFalse(netscaler.vserver_sslcert_delete('vName',
'serGrpName'))
mock = MagicMock(return_value='')
with patch.dict(netscaler.__salt__, {'config.option': mock}):
MockNSNitro.flag = None
self.assertTrue(netscaler.vserver_sslcert_delete
('vservName', 'serGroupName'))
with patch.object(netscaler, '_connect',
MagicMock(return_value=None)):
self.assertFalse(netscaler.vserver_sslcert_delete
('vserverName', 'serGroupName'))
if __name__ == '__main__':
from integration import run_tests
run_tests(NetscalerTestCase, needs_daemon=False)
|
|
#
#
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Instance related QA tests.
"""
import os
import re
import time
from ganeti import utils
from ganeti import constants
from ganeti import pathutils
from ganeti import query
from ganeti.netutils import IP4Address
import qa_config
import qa_daemon
import qa_utils
import qa_error
from qa_filters import stdout_of
from qa_utils import AssertCommand, AssertEqual, AssertIn
from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE
from qa_instance_utils import CheckSsconfInstanceList, \
CreateInstanceDrbd8, \
CreateInstanceByDiskTemplate, \
CreateInstanceByDiskTemplateOneNode, \
GetGenericAddParameters
def _GetDiskStatePath(disk):
return "/sys/block/%s/device/state" % disk
def GetInstanceInfo(instance):
"""Return information about the actual state of an instance.
@type instance: string
@param instance: the instance name
@return: a dictionary with the following keys:
- "nodes": instance nodes, a list of strings
- "volumes": instance volume IDs, a list of strings
- "drbd-minors": DRBD minors used by the instance, a dictionary where
keys are nodes, and values are lists of integers (or an empty
dictionary for non-DRBD instances)
- "disk-template": instance disk template
- "storage-type": storage type associated with the instance disk template
"""
node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
# re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
# node1.fqdn
# node2.fqdn,node3.fqdn
# node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
# FIXME This works with no more than 2 secondaries
re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$")
info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0]
nodes = []
for nodeinfo in info["Nodes"]:
if "primary" in nodeinfo:
nodes.append(nodeinfo["primary"])
elif "secondaries" in nodeinfo:
nodestr = nodeinfo["secondaries"]
if nodestr:
m = re_nodelist.match(nodestr)
if m:
nodes.extend(filter(None, m.groups()))
else:
nodes.append(nodestr)
re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
vols = []
drbd_min = {}
dtypes = []
for (count, diskinfo) in enumerate(info["Disks"]):
(dtype, _) = diskinfo["disk/%s" % count].split(",", 1)
dtypes.append(dtype)
if dtype == constants.DT_DRBD8:
for child in diskinfo["child devices"]:
vols.append(child["logical_id"])
for key in ["nodeA", "nodeB"]:
m = re_drbdnode.match(diskinfo[key])
if not m:
raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key])
node = m.group(1)
minor = int(m.group(2))
minorlist = drbd_min.setdefault(node, [])
minorlist.append(minor)
elif dtype == constants.DT_PLAIN:
vols.append(diskinfo["logical_id"])
# TODO remove and modify calling sites
disk_template = utils.GetDiskTemplateString(dtypes)
storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
assert nodes
assert len(nodes) < 2 or vols
return {
"nodes": nodes,
"volumes": vols,
"drbd-minors": drbd_min,
"disk-template": disk_template,
"storage-type": storage_type,
}
def _DestroyInstanceDisks(instance):
"""Remove all the backend disks of an instance.
This is used to simulate HW errors (dead nodes, broken disks...); the
configuration of the instance is not affected.
@type instance: dictionary
@param instance: the instance
"""
info = GetInstanceInfo(instance.name)
# FIXME: destruction/removal should be part of the disk class
if info["storage-type"] == constants.ST_LVM_VG:
vols = info["volumes"]
for node in info["nodes"]:
AssertCommand(["lvremove", "-f"] + vols, node=node)
elif info["storage-type"] in (constants.ST_FILE, constants.ST_SHARED_FILE):
# Note that this works for both file and sharedfile, and this is intended.
storage_dir = qa_config.get("file-storage-dir",
pathutils.DEFAULT_FILE_STORAGE_DIR)
idir = os.path.join(storage_dir, instance.name)
for node in info["nodes"]:
AssertCommand(["rm", "-rf", idir], node=node)
elif info["storage-type"] == constants.ST_DISKLESS:
pass
def _GetInstanceFields(instance, fields):
"""Get the value of one or more fields of an instance.
@type instance: string
@param instance: instance name
@type field: list of string
@param field: name of the fields
@rtype: list of string
@return: value of the fields
"""
master = qa_config.GetMasterNode()
infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers",
"--separator=:", "--units", "m", "-o",
",".join(fields), instance])
return tuple(qa_utils.GetCommandOutput(master.primary, infocmd)
.strip()
.split(":"))
def _GetInstanceField(instance, field):
"""Get the value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: string
"""
return _GetInstanceFields(instance, [field])[0]
def _GetBoolInstanceField(instance, field):
"""Get the Boolean value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: bool
"""
info_out = _GetInstanceField(instance, field)
if info_out == "Y":
return True
elif info_out == "N":
return False
else:
raise qa_error.Error("Field %s of instance %s has a non-Boolean value:"
" %s" % (field, instance, info_out))
def _GetNumInstanceField(instance, field):
"""Get a numeric value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: int or float
"""
info_out = _GetInstanceField(instance, field)
try:
ret = int(info_out)
except ValueError:
try:
ret = float(info_out)
except ValueError:
raise qa_error.Error("Field %s of instance %s has a non-numeric value:"
" %s" % (field, instance, info_out))
return ret
def GetInstanceSpec(instance, spec):
"""Return the current spec for the given parameter.
@type instance: string
@param instance: Instance name
@type spec: string
@param spec: one of the supported parameters: "memory-size", "cpu-count",
"disk-count", "disk-size", "nic-count"
@rtype: tuple
@return: (minspec, maxspec); minspec and maxspec can be different only for
memory and disk size
"""
specmap = {
"memory-size": ["be/minmem", "be/maxmem"],
"cpu-count": ["vcpus"],
"disk-count": ["disk.count"],
"disk-size": ["disk.size/ "],
"nic-count": ["nic.count"],
}
# For disks, first we need the number of disks
if spec == "disk-size":
(numdisk, _) = GetInstanceSpec(instance, "disk-count")
fields = ["disk.size/%s" % k for k in range(0, numdisk)]
else:
assert spec in specmap, "%s not in %s" % (spec, specmap)
fields = specmap[spec]
values = [_GetNumInstanceField(instance, f) for f in fields]
return (min(values), max(values))
def IsFailoverSupported(instance):
return instance.disk_template in constants.DTS_MIRRORED
def IsMigrationSupported(instance):
return instance.disk_template in constants.DTS_MIRRORED
def IsDiskReplacingSupported(instance):
return instance.disk_template == constants.DT_DRBD8
def IsDiskSupported(instance):
return instance.disk_template != constants.DT_DISKLESS
def TestInstanceAddWithPlainDisk(nodes, fail=False):
"""gnt-instance add -t plain"""
if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates():
instance = CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN,
fail=fail)
if not fail:
qa_utils.RunInstanceCheck(instance, True)
return instance
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddWithDrbdDisk(nodes):
"""gnt-instance add -t drbd"""
if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates():
return CreateInstanceDrbd8(nodes)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddFile(nodes):
"""gnt-instance add -t file"""
assert len(nodes) == 1
if constants.DT_FILE in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddSharedFile(nodes):
"""gnt-instance add -t sharedfile"""
assert len(nodes) == 1
if constants.DT_SHARED_FILE in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_SHARED_FILE)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddDiskless(nodes):
"""gnt-instance add -t diskless"""
assert len(nodes) == 1
if constants.DT_DISKLESS in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddRADOSBlockDevice(nodes):
"""gnt-instance add -t rbd"""
assert len(nodes) == 1
if constants.DT_RBD in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_RBD)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddGluster(nodes):
"""gnt-instance add -t gluster"""
assert len(nodes) == 1
if constants.DT_GLUSTER in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_GLUSTER)
@InstanceCheck(None, INST_DOWN, FIRST_ARG)
def TestInstanceRemove(instance):
"""gnt-instance remove"""
AssertCommand(["gnt-instance", "remove", "-f", instance.name])
@InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
def TestInstanceStartup(instance):
"""gnt-instance startup"""
AssertCommand(["gnt-instance", "startup", instance.name])
@InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
def TestInstanceShutdown(instance):
"""gnt-instance shutdown"""
AssertCommand(["gnt-instance", "shutdown", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceReboot(instance):
"""gnt-instance reboot"""
options = qa_config.get("options", {})
reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
name = instance.name
for rtype in reboot_types:
AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name])
AssertCommand(["gnt-instance", "shutdown", name])
qa_utils.RunInstanceCheck(instance, False)
AssertCommand(["gnt-instance", "reboot", name])
master = qa_config.GetMasterNode()
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_RUNNING)
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceReinstall(instance):
"""gnt-instance reinstall"""
if instance.disk_template == constants.DT_DISKLESS:
print qa_utils.FormatInfo("Test not supported for diskless instances")
return
qa_storage = qa_config.get("qa-storage")
if qa_storage is None:
print qa_utils.FormatInfo("Test not supported because the additional QA"
" storage is not available")
else:
# Reinstall with OS image from QA storage
url = "%s/busybox.img" % qa_storage
AssertCommand(["gnt-instance", "reinstall",
"--os-parameters", "os-image=" + url,
"-f", instance.name])
# Reinstall with OS image as local file on the node
pnode = _GetInstanceField(instance.name, "pnode")
cmd = ("wget -O busybox.img %s &> /dev/null &&"
" echo $(pwd)/busybox.img") % url
image = qa_utils.GetCommandOutput(pnode, cmd).strip()
AssertCommand(["gnt-instance", "reinstall",
"--os-parameters", "os-image=" + image,
"-f", instance.name])
# Reinstall non existing local file
AssertCommand(["gnt-instance", "reinstall",
"--os-parameters", "os-image=NonExistantOsForQa",
"-f", instance.name], fail=True)
# Reinstall non existing URL
AssertCommand(["gnt-instance", "reinstall",
"--os-parameters", "os-image=http://NonExistantOsForQa",
"-f", instance.name], fail=True)
# Reinstall using OS scripts
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
# Test with non-existant OS definition
AssertCommand(["gnt-instance", "reinstall", "-f",
"--os-type=NonExistantOsForQa",
instance.name],
fail=True)
# Test with existing OS but invalid variant
AssertCommand(["gnt-instance", "reinstall", "-f", "-o", "debootstrap+ola",
instance.name],
fail=True)
# Test with existing OS but invalid variant
AssertCommand(["gnt-instance", "reinstall", "-f", "-o", "debian-image+ola",
instance.name],
fail=True)
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceRenameAndBack(rename_source, rename_target):
"""gnt-instance rename
This must leave the instance with the original name, not the target
name.
"""
CheckSsconfInstanceList(rename_source)
# first do a rename to a different actual name, expecting it to fail
qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
try:
AssertCommand(["gnt-instance", "rename", rename_source, rename_target],
fail=True)
CheckSsconfInstanceList(rename_source)
finally:
qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
info = GetInstanceInfo(rename_source)
# Check instance volume tags correctly updated. Note that this check is lvm
# specific, so we skip it for non-lvm-based instances.
# FIXME: This will need updating when instances will be able to have
# different disks living on storage pools with etherogeneous storage types.
# FIXME: This check should be put inside the disk/storage class themselves,
# rather than explicitly called here.
if info["storage-type"] == constants.ST_LVM_VG:
# In the lvm world we can check for tags on the logical volume
tags_cmd = ("lvs -o tags --noheadings %s | grep " %
(" ".join(info["volumes"]), ))
else:
# Other storage types don't have tags, so we use an always failing command,
# to make sure it never gets executed
tags_cmd = "false"
# and now rename instance to rename_target...
AssertCommand(["gnt-instance", "rename", rename_source, rename_target])
CheckSsconfInstanceList(rename_target)
qa_utils.RunInstanceCheck(rename_source, False)
qa_utils.RunInstanceCheck(rename_target, False)
# NOTE: tags might not be the exactly as the instance name, due to
# charset restrictions; hence the test might be flaky
if (rename_source != rename_target and
info["storage-type"] == constants.ST_LVM_VG):
for node in info["nodes"]:
AssertCommand(tags_cmd + rename_source, node=node, fail=True)
AssertCommand(tags_cmd + rename_target, node=node, fail=False)
# and back
AssertCommand(["gnt-instance", "rename", rename_target, rename_source])
CheckSsconfInstanceList(rename_source)
qa_utils.RunInstanceCheck(rename_target, False)
if (rename_source != rename_target and
info["storage-type"] == constants.ST_LVM_VG):
for node in info["nodes"]:
AssertCommand(tags_cmd + rename_source, node=node, fail=False)
AssertCommand(tags_cmd + rename_target, node=node, fail=True)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceFailover(instance):
"""gnt-instance failover"""
if not IsFailoverSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support failover, skipping"
" test")
return
cmd = ["gnt-instance", "failover", "--force", instance.name]
# failover ...
AssertCommand(cmd)
qa_utils.RunInstanceCheck(instance, True)
# ... and back
AssertCommand(cmd)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceMigrate(instance, toggle_always_failover=True):
"""gnt-instance migrate"""
if not IsMigrationSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support migration, skipping"
" test")
return
cmd = ["gnt-instance", "migrate", "--force", instance.name]
af_par = constants.BE_ALWAYS_FAILOVER
af_field = "be/" + constants.BE_ALWAYS_FAILOVER
af_init_val = _GetBoolInstanceField(instance.name, af_field)
# migrate ...
AssertCommand(cmd)
# TODO: Verify the choice between failover and migration
qa_utils.RunInstanceCheck(instance, True)
# ... and back (possibly with always_failover toggled)
if toggle_always_failover:
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" % (af_par, not af_init_val)),
instance.name])
AssertCommand(cmd)
# TODO: Verify the choice between failover and migration
qa_utils.RunInstanceCheck(instance, True)
if toggle_always_failover:
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" % (af_par, af_init_val)), instance.name])
# TODO: Split into multiple tests
AssertCommand(["gnt-instance", "shutdown", instance.name])
qa_utils.RunInstanceCheck(instance, False)
AssertCommand(cmd, fail=True)
AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover",
instance.name])
AssertCommand(["gnt-instance", "start", instance.name])
AssertCommand(cmd)
# @InstanceCheck enforces the check that the instance is running
qa_utils.RunInstanceCheck(instance, True)
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" %
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)),
instance.name])
AssertCommand(cmd)
qa_utils.RunInstanceCheck(instance, True)
# TODO: Verify that a failover has been done instead of a migration
# TODO: Verify whether the default value is restored here (not hardcoded)
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" %
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)),
instance.name])
AssertCommand(cmd)
qa_utils.RunInstanceCheck(instance, True)
def TestInstanceInfo(instance):
"""gnt-instance info"""
AssertCommand(["gnt-instance", "info", instance.name])
def _TestKVMHotplug(instance):
"""Tests hotplug modification commands, noting that they
"""
args_to_try = [
["--net", "-1:add", "--hotplug"],
["--net", "-1:modify,mac=aa:bb:cc:dd:ee:ff", "--hotplug", "--force"],
["--net", "-1:remove", "--hotplug"],
["--disk", "-1:add,size=1G", "--hotplug"],
["--disk", "-1:remove", "--hotplug"],
]
for alist in args_to_try:
_, stdout, stderr = \
AssertCommand(["gnt-instance", "modify"] + alist + [instance.name])
if "failed" in stdout or "failed" in stderr:
raise qa_error.Error("Hotplugging command failed; please check output"
" for further information")
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceModify(instance):
"""gnt-instance modify"""
default_hv = qa_config.GetDefaultHypervisor()
# Assume /sbin/init exists on all systems
test_kernel = "/sbin/init"
test_initrd = test_kernel
orig_maxmem = qa_config.get(constants.BE_MAXMEM)
orig_minmem = qa_config.get(constants.BE_MINMEM)
#orig_bridge = qa_config.get("bridge", "xen-br0")
args = [
["-B", "%s=128" % constants.BE_MINMEM],
["-B", "%s=128" % constants.BE_MAXMEM],
["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem,
constants.BE_MAXMEM, orig_maxmem)],
["-B", "%s=2" % constants.BE_VCPUS],
["-B", "%s=1" % constants.BE_VCPUS],
["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)],
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)],
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)],
# TODO: bridge tests
#["--bridge", "xen-br1"],
#["--bridge", orig_bridge],
]
# Not all hypervisors support kernel_path(e.g, LXC)
if default_hv in (constants.HT_XEN_PVM,
constants.HT_XEN_HVM,
constants.HT_KVM):
args.extend([
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)],
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)],
])
if default_hv == constants.HT_XEN_PVM:
args.extend([
["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)],
["-H", "no_%s" % (constants.HV_INITRD_PATH, )],
["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)],
])
elif default_hv == constants.HT_XEN_HVM:
args.extend([
["-H", "%s=acn" % constants.HV_BOOT_ORDER],
["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)],
])
elif default_hv == constants.HT_KVM and \
qa_config.TestEnabled("instance-device-hotplug"):
_TestKVMHotplug(instance)
elif default_hv == constants.HT_LXC:
args.extend([
["-H", "%s=0" % constants.HV_CPU_MASK],
["-H", "%s=%s" % (constants.HV_CPU_MASK, constants.VALUE_DEFAULT)],
["-H", "%s=0" % constants.HV_LXC_NUM_TTYS],
["-H", "%s=%s" % (constants.HV_LXC_NUM_TTYS, constants.VALUE_DEFAULT)],
])
url = "http://example.com/busybox.img"
args.extend([
["--os-parameters", "os-image=" + url],
["--os-parameters", "os-image=default"]
])
for alist in args:
AssertCommand(["gnt-instance", "modify"] + alist + [instance.name])
# check no-modify
AssertCommand(["gnt-instance", "modify", instance.name], fail=True)
# Marking offline while instance is running must fail...
AssertCommand(["gnt-instance", "modify", "--offline", instance.name],
fail=True)
# ...while making it online fails too (needs to be offline first)
AssertCommand(["gnt-instance", "modify", "--online", instance.name],
fail=True)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode):
"""gnt-instance modify --new-primary
This will leave the instance on its original primary node, not other node.
"""
if instance.disk_template != constants.DT_FILE:
print qa_utils.FormatInfo("Test only supported for the file disk template")
return
cluster_name = qa_config.get("name")
name = instance.name
current = currentnode.primary
other = othernode.primary
filestorage = qa_config.get("file-storage-dir",
pathutils.DEFAULT_FILE_STORAGE_DIR)
disk = os.path.join(filestorage, name)
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name],
fail=True)
AssertCommand(["gnt-instance", "shutdown", name])
AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" %
pathutils.SSH_KNOWN_HOSTS_FILE,
"-oCheckHostIp=no", "-oStrictHostKeyChecking=yes",
"-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name,
"-r", disk, "%s:%s" % (other, filestorage)], node=current)
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name])
AssertCommand(["gnt-instance", "startup", name])
# and back
AssertCommand(["gnt-instance", "shutdown", name])
AssertCommand(["rm", "-rf", disk], node=other)
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % current, name])
AssertCommand(["gnt-instance", "startup", name])
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceStoppedModify(instance):
"""gnt-instance modify (stopped instance)"""
name = instance.name
# Instance was not marked offline; try marking it online once more
AssertCommand(["gnt-instance", "modify", "--online", name])
# Mark instance as offline
AssertCommand(["gnt-instance", "modify", "--offline", name])
# When the instance is offline shutdown should only work with --force,
# while start should never work
AssertCommand(["gnt-instance", "shutdown", name], fail=True)
AssertCommand(["gnt-instance", "shutdown", "--force", name])
AssertCommand(["gnt-instance", "start", name], fail=True)
AssertCommand(["gnt-instance", "start", "--force", name], fail=True)
# Also do offline to offline
AssertCommand(["gnt-instance", "modify", "--offline", name])
# And online again
AssertCommand(["gnt-instance", "modify", "--online", name])
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceConvertDiskTemplate(instance, requested_conversions):
"""gnt-instance modify -t"""
def _BuildConvertCommand(disk_template, node):
cmd = ["gnt-instance", "modify", "-t", disk_template]
if disk_template == constants.DT_DRBD8:
cmd.extend(["-n", node])
cmd.append(name)
return cmd
if len(requested_conversions) < 2:
print qa_utils.FormatInfo("You must specify more than one convertible"
" disk templates in order to test the conversion"
" feature")
return
name = instance.name
template = instance.disk_template
if template in constants.DTS_NOT_CONVERTIBLE_FROM:
print qa_utils.FormatInfo("Unsupported template %s, skipping conversion"
" test" % template)
return
inodes = qa_config.AcquireManyNodes(2)
master = qa_config.GetMasterNode()
snode = inodes[0].primary
if master.primary == snode:
snode = inodes[1].primary
enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
for templ in requested_conversions:
if (templ == template or
templ not in enabled_disk_templates or
templ in constants.DTS_NOT_CONVERTIBLE_TO):
continue
AssertCommand(_BuildConvertCommand(templ, snode))
# Before we return, convert to the original template
AssertCommand(_BuildConvertCommand(template, snode))
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceModifyDisks(instance):
"""gnt-instance modify --disk"""
if not IsDiskSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support disks, skipping test")
return
disk_conf = qa_config.GetDiskOptions()[-1]
size = disk_conf.get("size")
name = instance.name
build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name]
if qa_config.AreSpindlesSupported():
spindles = disk_conf.get("spindles")
spindles_supported = True
else:
# Any number is good for spindles in this case
spindles = 1
spindles_supported = False
AssertCommand(build_cmd("add:size=%s,spindles=%s" % (size, spindles)),
fail=not spindles_supported)
AssertCommand(build_cmd("add:size=%s" % size),
fail=spindles_supported)
# Exactly one of the above commands has succeded, so we need one remove
AssertCommand(build_cmd("remove"))
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceGrowDisk(instance):
"""gnt-instance grow-disk"""
if instance.disk_template == constants.DT_DISKLESS:
print qa_utils.FormatInfo("Test not supported for diskless instances")
return
name = instance.name
disks = qa_config.GetDiskOptions()
all_size = [d.get("size") for d in disks]
all_grow = [d.get("growth") for d in disks]
if not all_grow:
# missing disk sizes but instance grow disk has been enabled,
# let's set fixed/nomimal growth
all_grow = ["128M" for _ in all_size]
for idx, (size, grow) in enumerate(zip(all_size, all_grow)):
# succeed in grow by amount
AssertCommand(["gnt-instance", "grow-disk", name, str(idx), grow])
# fail in grow to the old size
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx),
size], fail=True)
# succeed to grow to old size + 2 * growth
int_size = utils.ParseUnit(size)
int_grow = utils.ParseUnit(grow)
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx),
str(int_size + 2 * int_grow)])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceDeviceNames(instance):
if instance.disk_template == constants.DT_DISKLESS:
print qa_utils.FormatInfo("Test not supported for diskless instances")
return
name = instance.name
for dev_type in ["disk", "net"]:
if dev_type == "disk":
options = ",size=512M"
if qa_config.AreSpindlesSupported():
options += ",spindles=1"
else:
options = ""
# succeed in adding a device named 'test_device'
AssertCommand(["gnt-instance", "modify",
"--%s=-1:add,name=test_device%s" % (dev_type, options),
name])
# succeed in removing the 'test_device'
AssertCommand(["gnt-instance", "modify",
"--%s=test_device:remove" % dev_type,
name])
# fail to add two devices with the same name
AssertCommand(["gnt-instance", "modify",
"--%s=-1:add,name=test_device%s" % (dev_type, options),
"--%s=-1:add,name=test_device%s" % (dev_type, options),
name], fail=True)
# fail to add a device with invalid name
AssertCommand(["gnt-instance", "modify",
"--%s=-1:add,name=2%s" % (dev_type, options),
name], fail=True)
# Rename disks
disks = qa_config.GetDiskOptions()
disk_names = [d.get("name") for d in disks]
for idx, disk_name in enumerate(disk_names):
# Refer to disk by idx
AssertCommand(["gnt-instance", "modify",
"--disk=%s:modify,name=renamed" % idx,
name])
# Refer to by name and rename to original name
AssertCommand(["gnt-instance", "modify",
"--disk=renamed:modify,name=%s" % disk_name,
name])
if len(disks) >= 2:
# fail in renaming to disks to the same name
AssertCommand(["gnt-instance", "modify",
"--disk=0:modify,name=same_name",
"--disk=1:modify,name=same_name",
name], fail=True)
def TestInstanceList():
"""gnt-instance list"""
qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
def TestInstanceListFields():
"""gnt-instance list-fields"""
qa_utils.GenericQueryFieldsTest("gnt-instance", query.INSTANCE_FIELDS.keys())
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceConsole(instance):
"""gnt-instance console"""
AssertCommand(["gnt-instance", "console", "--show-cmd", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestReplaceDisks(instance, curr_nodes, other_nodes):
"""gnt-instance replace-disks"""
def buildcmd(args):
cmd = ["gnt-instance", "replace-disks"]
cmd.extend(args)
cmd.append(instance.name)
return cmd
if not IsDiskReplacingSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support disk replacing,"
" skipping test")
return
# Currently all supported templates have one primary and one secondary node
assert len(curr_nodes) == 2
snode = curr_nodes[1]
assert len(other_nodes) == 1
othernode = other_nodes[0]
options = qa_config.get("options", {})
use_ialloc = options.get("use-iallocators", True)
for data in [
["-p"],
["-s"],
# A placeholder; the actual command choice depends on use_ialloc
None,
# Restore the original secondary
["--new-secondary=%s" % snode.primary],
]:
if data is None:
if use_ialloc:
data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT]
else:
data = ["--new-secondary=%s" % othernode.primary]
AssertCommand(buildcmd(data))
AssertCommand(buildcmd(["-a"]))
AssertCommand(["gnt-instance", "stop", instance.name])
AssertCommand(buildcmd(["-a"]), fail=True)
AssertCommand(["gnt-instance", "activate-disks", instance.name])
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
instance.name])
AssertCommand(buildcmd(["-a"]))
AssertCommand(["gnt-instance", "start", instance.name])
def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True,
destroy=True):
"""Execute gnt-instance recreate-disks and check the result
@param cmdargs: Arguments (instance name excluded)
@param instance: Instance to operate on
@param fail: True if the command is expected to fail
@param check: If True and fail is False, check that the disks work
@prama destroy: If True, destroy the old disks first
"""
if destroy:
_DestroyInstanceDisks(instance)
AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs +
[instance.name]), fail)
if not fail and check:
# Quick check that the disks are there
AssertCommand(["gnt-instance", "activate-disks", instance.name])
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
instance.name])
AssertCommand(["gnt-instance", "deactivate-disks", instance.name])
def _BuildRecreateDisksOpts(en_disks, with_spindles, with_growth,
spindles_supported):
if with_spindles:
if spindles_supported:
if with_growth:
build_spindles_opt = (lambda disk:
",spindles=%s" %
(disk["spindles"] + disk["spindles-growth"]))
else:
build_spindles_opt = (lambda disk:
",spindles=%s" % disk["spindles"])
else:
build_spindles_opt = (lambda _: ",spindles=1")
else:
build_spindles_opt = (lambda _: "")
if with_growth:
build_size_opt = (lambda disk:
"size=%s" % (utils.ParseUnit(disk["size"]) +
utils.ParseUnit(disk["growth"])))
else:
build_size_opt = (lambda disk: "size=%s" % disk["size"])
build_disk_opt = (lambda (idx, disk):
"--disk=%s:%s%s" % (idx, build_size_opt(disk),
build_spindles_opt(disk)))
return map(build_disk_opt, en_disks)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestRecreateDisks(instance, inodes, othernodes):
"""gnt-instance recreate-disks
@param instance: Instance to work on
@param inodes: List of the current nodes of the instance
@param othernodes: list/tuple of nodes where to temporarily recreate disks
"""
options = qa_config.get("options", {})
use_ialloc = options.get("use-iallocators", True)
other_seq = ":".join([n.primary for n in othernodes])
orig_seq = ":".join([n.primary for n in inodes])
# These fail because the instance is running
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
if use_ialloc:
_AssertRecreateDisks(["-I", "hail"], instance, fail=True, destroy=False)
else:
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
AssertCommand(["gnt-instance", "stop", instance.name])
# Disks exist: this should fail
_AssertRecreateDisks([], instance, fail=True, destroy=False)
# Unsupported spindles parameters: fail
if not qa_config.AreSpindlesSupported():
_AssertRecreateDisks(["--disk=0:spindles=2"], instance,
fail=True, destroy=False)
# Recreate disks in place
_AssertRecreateDisks([], instance)
# Move disks away
if use_ialloc:
_AssertRecreateDisks(["-I", "hail"], instance)
# Move disks somewhere else
_AssertRecreateDisks(["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT],
instance)
else:
_AssertRecreateDisks(["-n", other_seq], instance)
# Move disks back
_AssertRecreateDisks(["-n", orig_seq], instance)
# Recreate resized disks
# One of the two commands fails because either spindles are given when they
# should not or vice versa
alldisks = qa_config.GetDiskOptions()
spindles_supported = qa_config.AreSpindlesSupported()
disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), True, True,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=True,
fail=not spindles_supported)
disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), False, True,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=False,
fail=spindles_supported)
# Recreate the disks one by one (with the original size)
for (idx, disk) in enumerate(alldisks):
# Only the first call should destroy all the disk
destroy = (idx == 0)
# Again, one of the two commands is expected to fail
disk_opts = _BuildRecreateDisksOpts([(idx, disk)], True, False,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=destroy, check=False,
fail=not spindles_supported)
disk_opts = _BuildRecreateDisksOpts([(idx, disk)], False, False,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=False, check=False,
fail=spindles_supported)
# This and InstanceCheck decoration check that the disks are working
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
AssertCommand(["gnt-instance", "start", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceExport(instance, node):
"""gnt-backup export -n ..."""
name = instance.name
options = ["gnt-backup", "export", "-n", node.primary]
# For files and shared files, the --long-sleep option should be used
if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
options.append("--long-sleep")
AssertCommand(options + [name])
return qa_utils.ResolveInstanceName(name)
@InstanceCheck(None, INST_DOWN, FIRST_ARG)
def TestInstanceExportWithRemove(instance, node):
"""gnt-backup export --remove-instance"""
AssertCommand(["gnt-backup", "export", "-n", node.primary,
"--remove-instance", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceExportNoTarget(instance):
"""gnt-backup export (without target node, should fail)"""
AssertCommand(["gnt-backup", "export", instance.name], fail=True)
@InstanceCheck(None, INST_DOWN, FIRST_ARG)
def TestInstanceImport(newinst, node, expnode, name):
"""gnt-backup import"""
templ = constants.DT_PLAIN
if not qa_config.IsTemplateSupported(templ):
return
cmd = (["gnt-backup", "import",
"--disk-template=%s" % templ,
"--no-ip-check",
"--src-node=%s" % expnode.primary,
"--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
"--node=%s" % node.primary] +
GetGenericAddParameters(newinst, templ,
force_mac=constants.VALUE_GENERATE))
cmd.append(newinst.name)
AssertCommand(cmd)
newinst.SetDiskTemplate(templ)
def TestBackupList(expnode):
"""gnt-backup list"""
AssertCommand(["gnt-backup", "list", "--node=%s" % expnode.primary])
qa_utils.GenericQueryTest("gnt-backup", query.EXPORT_FIELDS.keys(),
namefield=None, test_unknown=False)
def TestBackupListFields():
"""gnt-backup list-fields"""
qa_utils.GenericQueryFieldsTest("gnt-backup", query.EXPORT_FIELDS.keys())
def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online):
"""gnt-instance remove with an off-line node
@param instance: instance
@param snode: secondary node, to be set offline
@param set_offline: function to call to set the node off-line
@param set_online: function to call to set the node on-line
"""
info = GetInstanceInfo(instance.name)
set_offline(snode)
try:
TestInstanceRemove(instance)
finally:
set_online(snode)
# Clean up the disks on the offline node, if necessary
if instance.disk_template not in constants.DTS_EXT_MIRROR:
# FIXME: abstract the cleanup inside the disks
if info["storage-type"] == constants.ST_LVM_VG:
for minor in info["drbd-minors"][snode.primary]:
# DRBD 8.3 syntax comes first, then DRBD 8.4 syntax. The 8.4 syntax
# relies on the fact that we always create a resources for each minor,
# and that this resources is always named resource{minor}.
# As 'drbdsetup 0 down' does return success (even though that's invalid
# syntax), we always have to perform both commands and ignore the
# output.
drbd_shutdown_cmd = \
"(drbdsetup %d down >/dev/null 2>&1;" \
" drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
(minor, minor)
AssertCommand(drbd_shutdown_cmd, node=snode)
AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode)
elif info["storage-type"] == constants.ST_FILE:
filestorage = qa_config.get("file-storage-dir",
pathutils.DEFAULT_FILE_STORAGE_DIR)
disk = os.path.join(filestorage, instance.name)
AssertCommand(["rm", "-rf", disk], node=snode)
def TestInstanceCreationRestrictedByDiskTemplates():
"""Test adding instances for disabled disk templates."""
if qa_config.TestEnabled("cluster-exclusive-storage"):
# These tests are valid only for non-exclusive storage
return
enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
nodes = qa_config.AcquireManyNodes(2)
# Setup the cluster with the enabled_disk_templates
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
"--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates)],
fail=False)
# Test instance creation for enabled disk templates
for disk_template in enabled_disk_templates:
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False)
TestInstanceRemove(instance)
instance.Release()
# Test that instance creation fails for disabled disk templates
disabled_disk_templates = list(constants.DISK_TEMPLATES
- set(enabled_disk_templates))
for disk_template in disabled_disk_templates:
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
# Test instance creation for after disabling enabled disk templates
if (len(enabled_disk_templates) > 1):
# Partition the disk templates, enable them separately and check if the
# disabled ones cannot be used by instances.
middle = len(enabled_disk_templates) / 2
templates1 = enabled_disk_templates[:middle]
templates2 = enabled_disk_templates[middle:]
for (enabled, disabled) in [(templates1, templates2),
(templates2, templates1)]:
AssertCommand(["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % ",".join(enabled),
"--ipolicy-disk-templates=%s" % ",".join(enabled)],
fail=False)
for disk_template in disabled:
CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
elif (len(enabled_disk_templates) == 1):
# If only one disk template is enabled in the QA config, we have to enable
# some other templates in order to test if the disabling the only enabled
# disk template prohibits creating instances of that template.
other_disk_templates = list(
set([constants.DT_DISKLESS, constants.DT_BLOCK]) -
set(enabled_disk_templates))
AssertCommand(["gnt-cluster", "modify",
"--enabled-disk-templates=%s" %
",".join(other_disk_templates),
"--ipolicy-disk-templates=%s" %
",".join(other_disk_templates)],
fail=False)
CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True)
else:
raise qa_error.Error("Please enable at least one disk template"
" in your QA setup.")
# Restore initially enabled disk templates
AssertCommand(["gnt-cluster", "modify",
"--enabled-disk-templates=%s" %
",".join(enabled_disk_templates),
"--ipolicy-disk-templates=%s" %
",".join(enabled_disk_templates)],
fail=False)
def _AssertInstance(instance, status, admin_state, admin_state_source):
x, y, z = \
_GetInstanceFields(instance.name,
["status", "admin_state", "admin_state_source"])
AssertEqual(x, status)
AssertEqual(y, admin_state)
AssertEqual(z, admin_state_source)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def _TestInstanceUserDown(instance, hv_shutdown_fn):
"""Test different combinations of user shutdown"""
# 1. User shutdown
# 2. Instance start
hv_shutdown_fn()
_AssertInstance(instance,
constants.INSTST_USERDOWN,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
AssertCommand(["gnt-instance", "start", instance.name])
_AssertInstance(instance,
constants.INSTST_RUNNING,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
# 1. User shutdown
# 2. Watcher cleanup
# 3. Instance start
hv_shutdown_fn()
_AssertInstance(instance,
constants.INSTST_USERDOWN,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
qa_daemon.RunWatcherDaemon()
_AssertInstance(instance,
constants.INSTST_USERDOWN,
constants.ADMINST_DOWN,
constants.USER_SOURCE)
AssertCommand(["gnt-instance", "start", instance.name])
_AssertInstance(instance,
constants.INSTST_RUNNING,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
# 1. User shutdown
# 2. Watcher cleanup
# 3. Instance stop
# 4. Instance start
hv_shutdown_fn()
_AssertInstance(instance,
constants.INSTST_USERDOWN,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
qa_daemon.RunWatcherDaemon()
_AssertInstance(instance,
constants.INSTST_USERDOWN,
constants.ADMINST_DOWN,
constants.USER_SOURCE)
AssertCommand(["gnt-instance", "shutdown", instance.name])
_AssertInstance(instance,
constants.INSTST_ADMINDOWN,
constants.ADMINST_DOWN,
constants.ADMIN_SOURCE)
AssertCommand(["gnt-instance", "start", instance.name])
_AssertInstance(instance,
constants.INSTST_RUNNING,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
# 1. User shutdown
# 2. Instance stop
# 3. Instance start
hv_shutdown_fn()
_AssertInstance(instance,
constants.INSTST_USERDOWN,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
AssertCommand(["gnt-instance", "shutdown", instance.name])
_AssertInstance(instance,
constants.INSTST_ADMINDOWN,
constants.ADMINST_DOWN,
constants.ADMIN_SOURCE)
AssertCommand(["gnt-instance", "start", instance.name])
_AssertInstance(instance,
constants.INSTST_RUNNING,
constants.ADMINST_UP,
constants.ADMIN_SOURCE)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def _TestInstanceUserDownXen(instance):
primary = _GetInstanceField(instance.name, "pnode")
fn = lambda: AssertCommand(["xm", "shutdown", "-w", instance.name],
node=primary)
AssertCommand(["gnt-cluster", "modify", "--user-shutdown=true"])
_TestInstanceUserDown(instance, fn)
AssertCommand(["gnt-cluster", "modify", "--user-shutdown=false"])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def _TestInstanceUserDownKvm(instance):
def _StopKVMInstance():
AssertCommand("pkill -f \"\\-name %s\"" % instance.name, node=primary)
time.sleep(10)
AssertCommand(["gnt-cluster", "modify", "--user-shutdown=true"])
AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=true",
instance.name])
# The instance needs to reboot not because the 'user_shutdown'
# parameter was modified but because the KVM daemon need to be
# started, given that the instance was first created with user
# shutdown disabled.
AssertCommand(["gnt-instance", "reboot", instance.name])
primary = _GetInstanceField(instance.name, "pnode")
_TestInstanceUserDown(instance, _StopKVMInstance)
AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=false",
instance.name])
AssertCommand(["gnt-cluster", "modify", "--user-shutdown=false"])
def TestInstanceUserDown(instance):
"""Tests user shutdown"""
enabled_hypervisors = qa_config.GetEnabledHypervisors()
for (hv, fn) in [(constants.HT_XEN_PVM, _TestInstanceUserDownXen),
(constants.HT_XEN_HVM, _TestInstanceUserDownXen),
(constants.HT_KVM, _TestInstanceUserDownKvm)]:
if hv in enabled_hypervisors:
qa_daemon.TestPauseWatcher()
fn(instance)
qa_daemon.TestResumeWatcher()
else:
print "%s hypervisor is not enabled, skipping test for this hypervisor" \
% hv
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceCommunication(instance, master):
"""Tests instance communication via 'gnt-instance modify'"""
# Enable instance communication network at the cluster level
network_name = "mynetwork"
cmd = ["gnt-cluster", "modify",
"--instance-communication-network=%s" % network_name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
print result_output
# Enable instance communication mechanism for this instance
AssertCommand(["gnt-instance", "modify", "-c", "yes", instance.name])
# Reboot instance for changes to NIC to take effect
AssertCommand(["gnt-instance", "reboot", instance.name])
# Check if the instance is properly configured for instance
# communication.
nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
instance.name)
## Check the output of 'gnt-instance list'
nic_names = _GetInstanceField(instance.name, "nic.names")
nic_names = map(lambda x: x.strip(" '"), nic_names.strip("[]").split(","))
AssertIn(nic_name, nic_names,
msg="Looking for instance communication TAP interface")
nic_n = nic_names.index(nic_name)
nic_ip = _GetInstanceField(instance.name, "nic.ip/%d" % nic_n)
nic_network = _GetInstanceField(instance.name, "nic.network.name/%d" % nic_n)
nic_mode = _GetInstanceField(instance.name, "nic.mode/%d" % nic_n)
AssertEqual(IP4Address.InNetwork(constants.INSTANCE_COMMUNICATION_NETWORK4,
nic_ip),
True,
msg="Checking if NIC's IP if part of the expected network")
AssertEqual(network_name, nic_network,
msg="Checking if NIC's network name matches the expected value")
AssertEqual(constants.INSTANCE_COMMUNICATION_NETWORK_MODE, nic_mode,
msg="Checking if NIC's mode name matches the expected value")
## Check the output of 'ip route'
cmd = ["ip", "route", "show", nic_ip]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
result = result_output.split()
AssertEqual(len(result), 5, msg="Checking if the IP route is established")
route_ip = result[0]
route_dev = result[1]
route_tap = result[2]
route_scope = result[3]
route_link = result[4]
AssertEqual(route_ip, nic_ip,
msg="Checking if IP route shows the expected IP")
AssertEqual(route_dev, "dev",
msg="Checking if IP route shows the expected device")
AssertEqual(route_scope, "scope",
msg="Checking if IP route shows the expected scope")
AssertEqual(route_link, "link",
msg="Checking if IP route shows the expected link-level scope")
## Check the output of 'ip address'
cmd = ["ip", "address", "show", "dev", route_tap]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
result = result_output.splitlines()
AssertEqual(len(result), 3,
msg="Checking if the IP address is established")
result = result.pop().split()
AssertEqual(len(result), 7,
msg="Checking if the IP address has the expected value")
address_ip = result[1]
address_netmask = result[3]
AssertEqual(address_ip, "169.254.169.254/32",
msg="Checking if the TAP interface has the expected IP")
AssertEqual(address_netmask, "169.254.255.255",
msg="Checking if the TAP interface has the expected netmask")
# Disable instance communication mechanism for this instance
AssertCommand(["gnt-instance", "modify", "-c", "no", instance.name])
# Reboot instance for changes to NIC to take effect
AssertCommand(["gnt-instance", "reboot", instance.name])
# Disable instance communication network at cluster level
cmd = ["gnt-cluster", "modify",
"--instance-communication-network=%s" % network_name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
print result_output
def _TestRedactionOfSecretOsParams(node, cmd, secret_keys):
"""Tests redaction of secret os parameters
"""
AssertCommand(["gnt-cluster", "modify", "--max-running-jobs", "1"])
debug_delay_id = int(stdout_of(["gnt-debug", "delay", "--print-jobid",
"--submit", "300"]))
cmd_jid = int(stdout_of(cmd))
job_file_abspath = "%s/job-%s" % (pathutils.QUEUE_DIR, cmd_jid)
job_file = qa_utils.MakeNodePath(node, job_file_abspath)
for k in secret_keys:
grep_cmd = ["grep", "\"%s\":\"<redacted>\"" % k, job_file]
AssertCommand(grep_cmd)
AssertCommand(["gnt-job", "cancel", "--kill", "--yes-do-it",
str(debug_delay_id)])
AssertCommand(["gnt-cluster", "modify", "--max-running-jobs", "20"])
AssertCommand(["gnt-job", "wait", str(cmd_jid)])
def TestInstanceAddOsParams():
"""Tests instance add with secret os parameters"""
if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
return
master = qa_config.GetMasterNode()
instance = qa_config.AcquireInstance()
secret_keys = ["param1", "param2"]
cmd = (["gnt-instance", "add",
"--os-type=%s" % qa_config.get("os"),
"--disk-template=%s" % constants.DT_PLAIN,
"--os-parameters-secret",
"param1=secret1,param2=secret2",
"--node=%s" % master.primary] +
GetGenericAddParameters(instance, constants.DT_PLAIN))
cmd.append("--submit")
cmd.append("--print-jobid")
cmd.append(instance.name)
_TestRedactionOfSecretOsParams(master.primary, cmd, secret_keys)
TestInstanceRemove(instance)
instance.Release()
def TestSecretOsParams():
"""Tests secret os parameter transmission"""
master = qa_config.GetMasterNode()
secret_keys = ["param1", "param2"]
cmd = (["gnt-debug", "test-osparams", "--os-parameters-secret",
"param1=secret1,param2=secret2", "--submit", "--print-jobid"])
_TestRedactionOfSecretOsParams(master.primary, cmd, secret_keys)
cmd_output = stdout_of(["gnt-debug", "test-osparams",
"--os-parameters-secret",
"param1=secret1,param2=secret2"])
AssertIn("\'param1\': \'secret1\'", cmd_output)
AssertIn("\'param2\': \'secret2\'", cmd_output)
available_instance_tests = [
("instance-add-plain-disk", constants.DT_PLAIN,
TestInstanceAddWithPlainDisk, 1),
("instance-add-drbd-disk", constants.DT_DRBD8,
TestInstanceAddWithDrbdDisk, 2),
("instance-add-diskless", constants.DT_DISKLESS,
TestInstanceAddDiskless, 1),
("instance-add-file", constants.DT_FILE,
TestInstanceAddFile, 1),
("instance-add-shared-file", constants.DT_SHARED_FILE,
TestInstanceAddSharedFile, 1),
("instance-add-rbd", constants.DT_RBD,
TestInstanceAddRADOSBlockDevice, 1),
("instance-add-gluster", constants.DT_GLUSTER,
TestInstanceAddGluster, 1),
]
|
|
#!/usr/bin/env python
#
import unittest
import os
import femagtools.bch
from io import open
import numpy as np
class BchReaderTest(unittest.TestCase):
def read_bch(self, filename):
testPath = os.path.join(os.path.split(__file__)[0], 'data')
if len(testPath) == 0:
testPath = os.path.join(os.path.abspath('.'), 'data')
r = femagtools.bch.Reader()
with open('{0}/{1}'.format(testPath, filename),
encoding='latin1') as f:
r.read(f)
return r
def test_read_cogging(self):
bch = self.read_bch('cogging.BATCH')
self.assertEqual(bch.version, '7.9.147 November 2012')
self.assertEqual(bch.nodes, 2315)
self.assertEqual(bch.elements, 3305)
self.assertEqual(bch.quality, 100.0)
self.assertEqual(len(bch.torque_fft), 1)
self.assertEqual(len(bch.torque_fft[0]), 5)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 5)
self.assertEqual(bch.torque_fft[0]['order'], [4, 12, 24, 36, 48])
self.assertEqual(sorted(bch.flux.keys()), ['1', '2', '3'])
self.assertEqual(sorted(bch.flux['1'][0].keys()),
sorted(['displ', 'voltage_four',
'current_k', 'flux_k',
'voltage_ir', 'displunit',
'voltage_dpsi']))
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 61)
self.assertEqual(bch.flux_fft['1'][0]['order'], [1, 3, 5, 7, 9, 11])
self.assertEqual(len(bch.torque), 1)
self.assertEqual(sorted(bch.torque[0].keys()),
sorted(['angle', 'force_y', 'force_x', 'torque',
'current_1', 'ripple', 't_idpsi']))
self.assertEqual(len(bch.torque[0]['torque']), 61)
self.assertAlmostEqual(bch.losses[0]['winding'], 0.0, 1)
self.assertAlmostEqual(bch.losses[0]['stajo'], 0.458, 2)
self.assertAlmostEqual(bch.losses[0]['staza'], 0.344, 3)
self.assertAlmostEqual(bch.losses[0]['magnetJ'], 0.006, 3)
# self.assertAlmostEqual(bch.losses[0]['rotfe'], 0.000, 3)
self.assertAlmostEqual(bch.lossPar['fo'][0], 50.0, 1)
self.assertAlmostEqual(bch.lossPar['fo'][1], 50.0, 1)
self.assertEqual(bch.get(('machine', 'p')), 2)
np.testing.assert_almost_equal(bch.inertia, [0.230195e-3, 0.011774e-3])
def test_read_sctest(self):
bch = self.read_bch('sctest.BATCH')
self.assertEqual(len(bch.torque_fft), 1)
self.assertEqual(len(bch.scData['ia']), 134)
self.assertAlmostEqual(bch.scData['ikd'], 0.0, 1)
self.assertAlmostEqual(bch.scData['iks'], 1263.581, 2)
self.assertAlmostEqual(bch.scData['tks'], 1469.736, 2)
def test_read_pmsim(self):
bch = self.read_bch('pmsim.BATCH')
self.assertEqual(len(bch.torque_fft), 2)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 7)
self.assertEqual(bch.torque_fft[1]['order'], [0, 12, 24, 30, 36, 42])
self.assertEqual(sorted(bch.flux['1'][0].keys()),
sorted(['displ', 'voltage_four',
'current_k', 'flux_k',
'voltage_ir', 'displunit',
'voltage_dpsi']))
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 46)
self.assertEqual(len(bch.torque), 2)
self.assertTrue('torque' in bch.torque[1])
self.assertEqual(len(bch.torque[1]['torque']), 46)
self.assertTrue('ld' in bch.dqPar)
self.assertAlmostEqual(bch.dqPar['i1'][1], 49.992, 3)
self.assertAlmostEqual(bch.dqPar['ld'][0], 9.9e-3, 6)
self.assertAlmostEqual(bch.dqPar['ld'][0], 9.9e-3, 6)
self.assertAlmostEqual(bch.dqPar['u1'][1], 358.38, 2)
self.assertAlmostEqual(bch.dqPar['torque'][0], 65.3, 1)
self.assertAlmostEqual(bch.machine['i1'], 50.0)
self.assertAlmostEqual(bch.lossPar['fo'][0], 50.0, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_el'],
[1, 3, 5, 7, 9, 11, 13, 15])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[100.0, 300.0, 500.0, 700.0, 900.0,
1100.0, 1300.0, 1500.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[10.33, 9.391, 9.391, 9.391, 3.348,
2.971, 1.476, 0.882])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['eddy'],
[15.804, 142.234, 395.094, 774.383,
455.591, 603.881, 419.063, 333.395])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['order_el'],
[1, 3, 5, 7, 9, 11, 13, 15])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['freq'],
[100.0, 300.0, 500.0, 700.0, 900.0, 1100.0, 1300.0, 1500.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['hyst'],
[8.641, 7.774, 7.774, 7.748, 3.679, 2.915, 1.303, 0.626])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['eddy'],
[13.065, 117.587, 326.631, 637.999, 500.663, 592.805, 370.023, 236.594])
def test_read_pmsim_9(self):
bch = self.read_bch('pmsim-9.BATCH')
self.assertAlmostEqual(bch.machine['plfe'][0], 2540.2, 1)
self.assertAlmostEqual(bch.machine['plfe'][1], 2020.5, 1)
self.assertAlmostEqual(bch.dqPar['up'][0], 259.4, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_mech'],
[6, 18, 30, 42, 54, 90, 114])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_el'],
[1.0, 3.0, 5.0, 7.0, 9.0, 15.0, 19.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[400.0, 1200.0, 2000.0, 2800.0, 3600.0, 6000.0, 7600.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['eddy'],
[1637.884, 225.861, 93.969, 19.904, 6.661, 3.043, 1.752])
assert [round(l*1e3, 4) for l in bch.dqPar['Lho']] == [0.5908, 0.6583]
def test_read_relsim(self):
bch = self.read_bch('relsim.BATCH')
self.assertEqual(len(bch.torque), 1)
self.assertTrue('torque' in bch.torque[0])
self.assertAlmostEqual(np.mean(bch.torque[0]['torque']), 5.656, 2)
self.assertAlmostEqual(bch.dqPar['u1'][1], 274.5, 1)
self.assertAlmostEqual(bch.dqPar['torque'][0], 5.775, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[50.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[0.152])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['hyst'],
[0.066])
def test_read_pmsim_external(self):
bch = self.read_bch('pmsim-external.BATCH')
self.assertTrue('ld' in bch.dqPar)
self.assertAlmostEqual(bch.dqPar['i1'][1], 49.992, 3)
self.assertAlmostEqual(bch.dqPar['ld'][0], 0.86688e-3, 6)
self.assertAlmostEqual(bch.dqPar['ld'][0], 0.86688e-3, 6)
self.assertAlmostEqual(bch.dqPar['u1'][1], 2409.142, 2)
self.assertAlmostEqual(bch.dqPar['torque'][0], 1137.92, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_el'],
[1, 3])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[800.0, 2400.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[2619.555, 49.438])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['eddy'],
[15512.529, 1186.523])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['order_el'],
[1, 3, 5])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['freq'],
[800.0, 2400.0, 4000.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['hyst'],
[5688.175, 296.19, 0.989])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['eddy'],
[43864.352, 7108.561, 39.563])
def test_read_psidq(self):
bch = self.read_bch('psidpsiq.BATCH')
self.assertEqual(len(bch.torque_fft), 10)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 7)
self.assertEqual(bch.torque_fft[0]['order'],
[0, 4, 8, 12, 16, 20, 24])
self.assertEqual(sorted(bch.flux.keys()), ['1', '2', '3'])
self.assertEqual(len(bch.flux['1']), 10)
self.assertTrue('flux_k' in bch.flux['1'][0])
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 16)
self.assertEqual(len(bch.torque), 10)
self.assertEqual(len(bch.torque[-1]['torque']), 16)
self.assertEqual(len(bch.psidq), 7)
self.assertEqual(len(bch.psidq_ldq), 6)
self.assertEqual(len(bch.psidq['psid']), 3)
self.assertEqual(len(bch.psidq_ldq['ld']), 3)
self.assertEqual(len(bch.psidq['losses']), 11)
self.assertEqual(len(bch.psidq['losses']['styoke']), 3)
self.assertTrue('id' in bch.airgapInduction)
self.assertEqual(bch.airgapInduction['id'],
[-200.0, -100.0, 0.0])
self.assertEqual(len(bch.airgapInduction['Ba']), 3)
self.assertEqual(len(bch.airgapInduction['Bm'][0]), 3)
def test_read_ldq(self):
bch = self.read_bch('ldq.BATCH')
self.assertEqual(len(bch.torque_fft), 13)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 8)
self.assertEqual(bch.torque_fft[0]['order'], [12, 36, 48, 56, 60,
72, 76, 84])
self.assertEqual(sorted(bch.flux.keys()), ['1', '2', '3'])
self.assertEqual(len(bch.flux['1']), 13)
self.assertEqual(len(bch.flux['1'][0]), 7)
self.assertTrue('flux_k' in bch.flux['1'][0])
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 46)
self.assertEqual(len(bch.torque), 13)
self.assertEqual(len(bch.torque[-1]['torque']), 46)
self.assertEqual(len(bch.ldq['losses']), 5)
self.assertEqual(len(bch.ldq['losses']['styoke']), 4)
self.assertTrue('i1' in bch.airgapInduction)
self.assertEqual(len(bch.airgapInduction['i1']), 3)
self.assertEqual(len(bch.airgapInduction['an']), 4)
self.assertEqual(len(bch.airgapInduction['an'][0]), 4)
def test_read_pmsim2(self):
bch = self.read_bch('PM_270_L8_001.BATCH')
self.assertAlmostEqual(bch.dqPar['i1'][1], 70.0, 1)
self.assertAlmostEqual(bch.dqPar['beta'][0], -38.0, 1)
def test_read_linearforce(self):
bch = self.read_bch('linearForce.BATCH')
self.assertEqual(len(bch.linearForce), 1)
self.assertEqual(len(bch.linearForce[0]['displ']), 26)
self.assertEqual(bch.linearForce[0]['displ'][5], 15.0)
self.assertEqual(bch.linearForce[0]['force_x'][7], -0.3439)
self.assertEqual(bch.linearForce[0]['force_y'][2], 03107.0)
self.assertEqual(bch.linearForce[0]['magnet_1'][13], 10.0)
self.assertEqual(bch.linearForce_fft[0]['force'][0], 0.3483)
self.assertEqual(bch.linearForce_fft[1]['force'][0], 3157.)
self.assertEqual(len(bch.linearForce_fft), 2)
self.assertEqual(len(bch.flux_fft), 3)
def test_read_linmot_z(self):
bch = self.read_bch('linmot_z.BATCH')
self.assertEqual(len(bch.linearForce), 2)
self.assertEqual(max(bch.linearForce[1]['force_z']), 4074.0)
def test_dq(self):
bch = self.read_bch('dq.BATCH')
bch.get(['torque', 'torque']) == []
bch.get(['linearForce[-1]', 'ripple_x']) == 0.0
assert bch.get(['linearForce', 'ripple_z']) is None
# self.assertAlmostEqual(bch.dqPar['psid'][0], 2.7294321753800737, 5)
# self.assertAlmostEqual(bch.dqPar['psiq'][0], 1.0899999999999999, 5)
self.assertAlmostEqual(bch.dqPar['psid'][0], 1.93, 5)
self.assertAlmostEqual(bch.dqPar['psiq'][0], 0.77074639149333668, 5)
def test_read_felosses(self):
bch = self.read_bch('rel-felosses.BATCH')
self.assertEqual(len(bch.losses), 4)
self.assertEqual(bch.losses[-1]['stajo'], 4425.106)
self.assertEqual(bch.losses[-1]['staza'], 7504.659)
def test_read_pmsim_demag(self):
bch = self.read_bch('PMREL-4p-skewed.BATCH')
self.assertEqual(len(bch.demag), 9)
self.assertEqual([-370.92, -2241.79, -2236.31],
[d['H_max'] for d in bch.demag if d['segment'] == 3])
def test_read_characteristics(self):
bch = self.read_bch('char.BATCH')
self.assertEqual(len(bch.characteristics), 1)
self.assertEqual(len(bch.characteristics[0].keys()), 19)
self.assertEqual(len(bch.characteristics[0]['speed_torque']['n']), 16)
def test_read_asterisks(self):
bch = self.read_bch('PM-with-asterisks_001.BATCH')
self.assertTrue(np.isnan(bch.nodes))
self.assertAlmostEqual(bch.airgapInduction['an'][0][8][0], 0.0690, 1)
self.assertAlmostEqual(bch.airgapInduction['an'][0][9][0], -0.9915, 1)
def test_read_dist_leak(self):
bch = self.read_bch('PM-4p-distleak.BATCH')
self.assertTrue(bch.leak_dist_wind)
self.assertEqual(bch.leak_dist_wind['nseg'], 4)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Image cache manager.
The cache manager implements the specification at
http://wiki.openstack.org/nova-image-cache-management.
"""
import hashlib
import os
import re
import time
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo_concurrency import processutils
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
imagecache_opts = [
cfg.StrOpt('image_info_filename_pattern',
default='$instances_path/$image_cache_subdirectory_name/'
'%(image)s.info',
help='Allows image information files to be stored in '
'non-standard locations'),
cfg.BoolOpt('remove_unused_kernels',
default=False,
help='Should unused kernel images be removed? This is only '
'safe to enable if all compute nodes have been updated '
'to support this option. This will be enabled by default '
'in future.'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
'removed'),
cfg.BoolOpt('checksum_base_images',
default=False,
help='Write a checksum for files in _base to disk'),
cfg.IntOpt('checksum_interval_seconds',
default=3600,
help='How frequently to checksum base images'),
]
CONF = cfg.CONF
CONF.register_opts(imagecache_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
def get_cache_fname(images, key):
"""Return a filename based on the SHA1 hash of a given image ID.
Image files stored in the _base directory that match this pattern
are considered for cleanup by the image cache manager. The cache
manager considers the file to be in use if it matches an instance's
image_ref, kernel_id or ramdisk_id property.
However, in grizzly-3 and before, only the image_ref property was
considered. This means that it's unsafe to store kernel and ramdisk
images using this pattern until we're sure that all compute nodes
are running a cache manager newer than grizzly-3. For now, we
require admins to confirm that by setting the remove_unused_kernels
boolean but, at some point in the future, we'll be safely able to
assume this.
"""
image_id = str(images[key])
if ((not CONF.libvirt.remove_unused_kernels and
key in ['kernel_id', 'ramdisk_id'])):
return image_id
else:
return hashlib.sha1(image_id).hexdigest()
def get_info_filename(base_path):
"""Construct a filename for storing additional information about a base
image.
Returns a filename.
"""
base_file = os.path.basename(base_path)
return (CONF.libvirt.image_info_filename_pattern
% {'image': base_file})
def is_valid_info_file(path):
"""Test if a given path matches the pattern for info files."""
digest_size = hashlib.sha1().digestsize * 2
regexp = (CONF.libvirt.image_info_filename_pattern
% {'image': ('([0-9a-f]{%(digest_size)d}|'
'[0-9a-f]{%(digest_size)d}_sm|'
'[0-9a-f]{%(digest_size)d}_[0-9]+)'
% {'digest_size': digest_size})})
m = re.match(regexp, path)
if m:
return True
return False
def _read_possible_json(serialized, info_file):
try:
d = jsonutils.loads(serialized)
except ValueError as e:
LOG.error(_LE('Error reading image info file %(filename)s: '
'%(error)s'),
{'filename': info_file,
'error': e})
d = {}
return d
def read_stored_info(target, field=None, timestamped=False):
"""Read information about an image.
Returns an empty dictionary if there is no info, just the field value if
a field is requested, or the entire dictionary otherwise.
"""
info_file = get_info_filename(target)
if not os.path.exists(info_file):
# NOTE(mikal): Special case to handle essex checksums being converted.
# There is an assumption here that target is a base image filename.
old_filename = target + '.sha1'
if field == 'sha1' and os.path.exists(old_filename):
hash_file = open(old_filename)
hash_value = hash_file.read()
hash_file.close()
write_stored_info(target, field=field, value=hash_value)
os.remove(old_filename)
d = {field: hash_value}
else:
d = {}
else:
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def read_file(info_file):
LOG.debug('Reading image info file: %s', info_file)
with open(info_file, 'r') as f:
return f.read().rstrip()
serialized = read_file(info_file)
d = _read_possible_json(serialized, info_file)
if field:
if timestamped:
return (d.get(field, None), d.get('%s-timestamp' % field, None))
else:
return d.get(field, None)
return d
def write_stored_info(target, field=None, value=None):
"""Write information about an image."""
if not field:
return
info_file = get_info_filename(target)
LOG.info(_LI('Writing stored info to %s'), info_file)
fileutils.ensure_tree(os.path.dirname(info_file))
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def write_file(info_file, field, value):
d = {}
if os.path.exists(info_file):
with open(info_file, 'r') as f:
d = _read_possible_json(f.read(), info_file)
d[field] = value
d['%s-timestamp' % field] = time.time()
with open(info_file, 'w') as f:
f.write(jsonutils.dumps(d))
write_file(info_file, field, value)
def _hash_file(filename):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
with open(filename) as f:
for chunk in iter(lambda: f.read(32768), b''):
checksum.update(chunk)
return checksum.hexdigest()
def read_stored_checksum(target, timestamped=True):
"""Read the checksum.
Returns the checksum (as hex) or None.
"""
return read_stored_info(target, field='sha1', timestamped=timestamped)
def write_stored_checksum(target):
"""Write a checksum to disk for a file in _base."""
write_stored_info(target, field='sha1', value=_hash_file(target))
class ImageCacheManager(imagecache.ImageCacheManager):
def __init__(self):
super(ImageCacheManager, self).__init__()
self.lock_path = os.path.join(CONF.instances_path, 'locks')
self._reset_state()
def _reset_state(self):
"""Reset state variables used for each pass."""
self.used_images = {}
self.image_popularity = {}
self.instance_names = set()
self.back_swap_images = set()
self.used_swap_images = set()
self.active_base_files = []
self.corrupt_base_files = []
self.originals = []
self.removable_base_files = []
self.unexplained_images = []
def _store_image(self, base_dir, ent, original=False):
"""Store a base image for later examination."""
entpath = os.path.join(base_dir, ent)
if os.path.isfile(entpath):
self.unexplained_images.append(entpath)
if original:
self.originals.append(entpath)
def _store_swap_image(self, ent):
"""Store base swap images for later examination."""
names = ent.split('_')
if len(names) == 2 and names[0] == 'swap':
if len(names[1]) > 0 and names[1].isdigit():
LOG.debug('Adding %s into backend swap images', ent)
self.back_swap_images.add(ent)
def _list_base_images(self, base_dir):
"""Return a list of the images present in _base.
Determine what images we have on disk. There will be other files in
this directory so we only grab the ones which are the right length
to be disk images.
"""
digest_size = hashlib.sha1().digestsize * 2
for ent in os.listdir(base_dir):
if len(ent) == digest_size:
self._store_image(base_dir, ent, original=True)
elif (len(ent) > digest_size + 2 and
ent[digest_size] == '_' and
not is_valid_info_file(os.path.join(base_dir, ent))):
self._store_image(base_dir, ent, original=False)
else:
self._store_swap_image(ent)
return {'unexplained_images': self.unexplained_images,
'originals': self.originals}
def _list_backing_images(self):
"""List the backing images currently in use."""
inuse_images = []
for ent in os.listdir(CONF.instances_path):
if ent in self.instance_names:
LOG.debug('%s is a valid instance name', ent)
disk_path = os.path.join(CONF.instances_path, ent, 'disk')
if os.path.exists(disk_path):
LOG.debug('%s has a disk file', ent)
try:
backing_file = libvirt_utils.get_disk_backing_file(
disk_path)
except processutils.ProcessExecutionError:
# (for bug 1261442)
if not os.path.exists(disk_path):
LOG.debug('Failed to get disk backing file: %s',
disk_path)
continue
else:
raise
LOG.debug('Instance %(instance)s is backed by '
'%(backing)s',
{'instance': ent,
'backing': backing_file})
if backing_file:
backing_path = os.path.join(
CONF.instances_path,
CONF.image_cache_subdirectory_name,
backing_file)
if backing_path not in inuse_images:
inuse_images.append(backing_path)
if backing_path in self.unexplained_images:
LOG.warn(_LW('Instance %(instance)s is using a '
'backing file %(backing)s which '
'does not appear in the image '
'service'),
{'instance': ent,
'backing': backing_file})
self.unexplained_images.remove(backing_path)
return inuse_images
def _find_base_file(self, base_dir, fingerprint):
"""Find the base file matching this fingerprint.
Yields the name of the base file, a boolean which is True if the image
is "small", and a boolean which indicates if this is a resized image.
Note that it is possible for more than one yield to result from this
check.
If no base file is found, then nothing is yielded.
"""
# The original file from glance
base_file = os.path.join(base_dir, fingerprint)
if os.path.exists(base_file):
yield base_file, False, False
# An older naming style which can be removed sometime after Folsom
base_file = os.path.join(base_dir, fingerprint + '_sm')
if os.path.exists(base_file):
yield base_file, True, False
# Resized images
resize_re = re.compile('.*/%s_[0-9]+$' % fingerprint)
for img in self.unexplained_images:
m = resize_re.match(img)
if m:
yield img, False, True
def _verify_checksum(self, img_id, base_file, create_if_missing=True):
"""Compare the checksum stored on disk with the current file.
Note that if the checksum fails to verify this is logged, but no actual
action occurs. This is something sysadmins should monitor for and
handle manually when it occurs.
"""
if not CONF.libvirt.checksum_base_images:
return None
lock_name = 'hash-%s' % os.path.split(base_file)[-1]
# Protect against other nova-computes performing checksums at the same
# time if we are using shared storage
@utils.synchronized(lock_name, external=True, lock_path=self.lock_path)
def inner_verify_checksum():
(stored_checksum, stored_timestamp) = read_stored_checksum(
base_file, timestamped=True)
if stored_checksum:
# NOTE(mikal): Checksums are timestamped. If we have recently
# checksummed (possibly on another compute node if we are using
# shared storage), then we don't need to checksum again.
if (stored_timestamp and
time.time() - stored_timestamp <
CONF.libvirt.checksum_interval_seconds):
return True
# NOTE(mikal): If there is no timestamp, then the checksum was
# performed by a previous version of the code.
if not stored_timestamp:
write_stored_info(base_file, field='sha1',
value=stored_checksum)
current_checksum = _hash_file(base_file)
if current_checksum != stored_checksum:
LOG.error(_LE('image %(id)s at (%(base_file)s): image '
'verification failed'),
{'id': img_id,
'base_file': base_file})
return False
else:
return True
else:
LOG.info(_LI('image %(id)s at (%(base_file)s): image '
'verification skipped, no hash stored'),
{'id': img_id,
'base_file': base_file})
# NOTE(mikal): If the checksum file is missing, then we should
# create one. We don't create checksums when we download images
# from glance because that would delay VM startup.
if CONF.libvirt.checksum_base_images and create_if_missing:
LOG.info(_LI('%(id)s (%(base_file)s): generating '
'checksum'),
{'id': img_id,
'base_file': base_file})
write_stored_checksum(base_file)
return None
return inner_verify_checksum()
@staticmethod
def _get_age_of_file(base_file):
if not os.path.exists(base_file):
LOG.debug('Cannot remove %s, it does not exist', base_file)
return (False, 0)
mtime = os.path.getmtime(base_file)
age = time.time() - mtime
return (True, age)
def _remove_old_enough_file(self, base_file, maxage, remove_sig=True):
"""Remove a single swap or base file if it is old enough."""
exists, age = self._get_age_of_file(base_file)
if not exists:
return
if age < maxage:
LOG.info(_LI('Base or swap file too young to remove: %s'),
base_file)
else:
LOG.info(_LI('Removing base or swap file: %s'), base_file)
try:
os.remove(base_file)
if remove_sig:
signature = get_info_filename(base_file)
if os.path.exists(signature):
os.remove(signature)
except OSError as e:
LOG.error(_LE('Failed to remove %(base_file)s, '
'error was %(error)s'),
{'base_file': base_file,
'error': e})
def _remove_swap_file(self, base_file):
"""Remove a single swap base file if it is old enough."""
maxage = CONF.remove_unused_original_minimum_age_seconds
self._remove_old_enough_file(base_file, maxage, remove_sig=False)
def _remove_base_file(self, base_file):
"""Remove a single base file if it is old enough."""
maxage = CONF.libvirt.remove_unused_resized_minimum_age_seconds
if base_file in self.originals:
maxage = CONF.remove_unused_original_minimum_age_seconds
self._remove_old_enough_file(base_file, maxage)
def _handle_base_image(self, img_id, base_file):
"""Handle the checks for a single base image."""
image_bad = False
image_in_use = False
LOG.info(_LI('image %(id)s at (%(base_file)s): checking'),
{'id': img_id,
'base_file': base_file})
if base_file in self.unexplained_images:
self.unexplained_images.remove(base_file)
if (base_file and os.path.exists(base_file)
and os.path.isfile(base_file)):
# _verify_checksum returns True if the checksum is ok, and None if
# there is no checksum file
checksum_result = self._verify_checksum(img_id, base_file)
if checksum_result is not None:
image_bad = not checksum_result
# Give other threads a chance to run
time.sleep(0)
instances = []
if img_id in self.used_images:
local, remote, instances = self.used_images[img_id]
if local > 0 or remote > 0:
image_in_use = True
LOG.info(_LI('image %(id)s at (%(base_file)s): '
'in use: on this node %(local)d local, '
'%(remote)d on other nodes sharing this instance '
'storage'),
{'id': img_id,
'base_file': base_file,
'local': local,
'remote': remote})
self.active_base_files.append(base_file)
if not base_file:
LOG.warn(_LW('image %(id)s at (%(base_file)s): warning '
'-- an absent base file is in use! '
'instances: %(instance_list)s'),
{'id': img_id,
'base_file': base_file,
'instance_list': ' '.join(instances)})
if image_bad:
self.corrupt_base_files.append(base_file)
if base_file:
if not image_in_use:
LOG.debug('image %(id)s at (%(base_file)s): image is not in '
'use',
{'id': img_id,
'base_file': base_file})
self.removable_base_files.append(base_file)
else:
LOG.debug('image %(id)s at (%(base_file)s): image is in '
'use',
{'id': img_id,
'base_file': base_file})
if os.path.exists(base_file):
libvirt_utils.chown(base_file, os.getuid())
os.utime(base_file, None)
def _age_and_verify_swap_images(self, context, base_dir):
LOG.debug('Verify swap images')
for ent in self.back_swap_images:
base_file = os.path.join(base_dir, ent)
if ent in self.used_swap_images and os.path.exists(base_file):
libvirt_utils.chown(base_file, os.getuid())
os.utime(base_file, None)
elif self.remove_unused_base_images:
self._remove_swap_file(base_file)
error_images = self.used_swap_images - self.back_swap_images
for error_image in error_images:
LOG.warn(_LW('%s swap image was used by instance'
' but no back files existing!'), error_image)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
LOG.debug('Verify base images')
# Determine what images are on disk because they're in use
for img in self.used_images:
fingerprint = hashlib.sha1(img).hexdigest()
LOG.debug('Image id %(id)s yields fingerprint %(fingerprint)s',
{'id': img,
'fingerprint': fingerprint})
for result in self._find_base_file(base_dir, fingerprint):
base_file, image_small, image_resized = result
self._handle_base_image(img, base_file)
if not image_small and not image_resized:
self.originals.append(base_file)
# Elements remaining in unexplained_images might be in use
inuse_backing_images = self._list_backing_images()
for backing_path in inuse_backing_images:
if backing_path not in self.active_base_files:
self.active_base_files.append(backing_path)
# Anything left is an unknown base image
for img in self.unexplained_images:
LOG.warn(_LW('Unknown base file: %s'), img)
self.removable_base_files.append(img)
# Dump these lists
if self.active_base_files:
LOG.info(_LI('Active base files: %s'),
' '.join(self.active_base_files))
if self.corrupt_base_files:
LOG.info(_LI('Corrupt base files: %s'),
' '.join(self.corrupt_base_files))
if self.removable_base_files:
LOG.info(_LI('Removable base files: %s'),
' '.join(self.removable_base_files))
if self.remove_unused_base_images:
for base_file in self.removable_base_files:
self._remove_base_file(base_file)
# That's it
LOG.debug('Verification complete')
def _get_base(self):
# NOTE(mikal): The new scheme for base images is as follows -- an
# image is streamed from the image service to _base (filename is the
# sha1 hash of the image id). If CoW is enabled, that file is then
# resized to be the correct size for the instance (filename is the
# same as the original, but with an underscore and the resized size
# in bytes). This second file is then CoW'd to the instance disk. If
# CoW is disabled, the resize occurs as part of the copy from the
# cache to the instance directory. Files ending in _sm are no longer
# created, but may remain from previous versions.
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if not os.path.exists(base_dir):
LOG.debug('Skipping verification, no base directory at %s',
base_dir)
return
return base_dir
def update(self, context, all_instances):
base_dir = self._get_base()
if not base_dir:
return
# reset the local statistics
self._reset_state()
# read the cached images
self._list_base_images(base_dir)
# read running instances data
running = self._list_running_instances(context, all_instances)
self.used_images = running['used_images']
self.image_popularity = running['image_popularity']
self.instance_names = running['instance_names']
self.used_swap_images = running['used_swap_images']
# perform the aging and image verification
self._age_and_verify_cached_images(context, all_instances, base_dir)
self._age_and_verify_swap_images(context, base_dir)
|
|
'''
Attempt to implement synchronous optimizers for Keras models. A synchronous
optimizers averages the gradients across devices. This should result in more
consistent learning convergence rate. An alternative popular implementation is
via Horovod.
Note, the current implementation might not be working correctly.
'''
from __future__ import print_function
import sys
from keras import backend as K
from keras.optimizers import (
clip_norm, Optimizer, TFOptimizer,
Adagrad, Adadelta, Adam, Adamax, Nadam, RMSprop, SGD)
from keras_exp._mixin_common import mixedomatic
_DEBUG = False
if _DEBUG:
# import traceback
pass
if K.backend() == 'tensorflow':
import tensorflow as tf
try:
from tensorflow.contrib import nccl
HAVE_NCCL = True
print('NCCL support available', file=sys.stderr)
except ImportError:
HAVE_NCCL = False
print('WARNING: NCCL support not available', file=sys.stderr)
__all__ = (
'OptimizerMultiGPUMixin',
'AdagradMGPU', 'AdadeltaMGPU', 'AdamMGPU', 'AdamaxMGPU', 'NadamMGPU',
'RMSPropMGPU', 'SgdMGPU', 'TFOptimizerMGPU',)
def all_avg_gradients(
tower_gradvars, devices, param_server_device='/gpu:0', usenccl=True):
'''Take the average of gradients across devices'''
if len(devices) == 1:
return tower_gradvars
num_devices = len(devices)
avg_gradvars = []
for layer in zip(*tower_gradvars):
grads_on_devices, vars_on_devices = zip(*layer)
if HAVE_NCCL and usenccl:
# Note: These nccl ops _must_ be run on all devices, else deadlock
# print('ALL_AVG_GRADIENTS GRADS_ON_DEVICES:',
# grads_on_devices) # DEBUG
avg_grads_on_devices = nccl.all_sum(grads_on_devices)
for idev, device in enumerate(devices):
with tf.device(device):
avg_grads_on_devices[idev] *= 1. / num_devices
else:
with tf.device(param_server_device):
avg_grad = tf.reduce_mean(tf.stack(grads_on_devices), 0)
avg_grads_on_devices = [avg_grad] * num_devices
avg_gradvars_on_devices = zip(*(avg_grads_on_devices, vars_on_devices))
avg_gradvars.append(avg_gradvars_on_devices)
return list(zip(*avg_gradvars))
class TFOptimizerMGPU(TFOptimizer):
'''Wrapper class for native TensorFlow optimizers.'''
def __init__(self, optimizer, gdev_list=None):
TFOptimizer.__init__(self, optimizer)
self._gdev_list = gdev_list
@property
def ismgpu(self):
'''Property to indicate this is a multigpu enabled optimizer.'''
return True
def get_updates(self, loss, params):
tower_gradvars = []
gdev_list = self._gdev_list
global_scope = tf.get_variable_scope()
for idev, device in enumerate(gdev_list):
with tf.device(device), \
tf.variable_scope(global_scope, reuse=idev > 0), \
tf.name_scope('tower_%i' % idev):
grads = self.optimizer.compute_gradients(loss, params)
gradvars = zip(grads, params)
tower_gradvars.append(gradvars)
tower_gradvars = all_avg_gradients(tower_gradvars,
gdev_list,
usenccl=False)
self.updates = [K.update_add(self.iterations, 1)]
for device_num, device in enumerate(gdev_list):
with tf.device(device):
gradvars = tower_gradvars[device_num]
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
class OptimizerMultiGPUMixin(object):
'''
Refer to classes below (such a SgdMGPU) for an example of how to use
this mixin.
'''
# :param baseopt: A base class keras optimizer such as SGD, RMSprop,...
def __init__(self, gdev_list=None, usenccl=True):
'''
:param list gdev_list: List of gpu devices i.e.
['/gpu:0', '/gpu:1', ...]. Use function get_available_gpus to get
the list of available gpus.
:param bool usenccl: Use the contrib.nccl Tensorflow library for gradients
averaging. Note, the models usenccl option overrides the optimizers
usenccl option during model compile stage.
'''
if len(self.__class__.__bases__) < 2 or \
not isinstance(self, Optimizer):
raise RuntimeError(
'A Keras Optimizer derived class required for mixin: {}.\nUse '
'multiple inheritance. Ex.:\n{}'.format(
'OptimizerMultiGPUMixin',
' @mixedomatic(ignore_kargs_spec=True)\n'
' class RMSPropMGPU(OptimizerMultiGPUMixin, RMSprop):\n'
' pass\n'
))
baseopt = super(OptimizerMultiGPUMixin, self)
# baseopt = self.__class__.__bases__[-1]
self._baseopt = baseopt
self._gdev_list = gdev_list
# This mixin class works fine for 1-gpu case.
# ngpus = len(gdev_list)
# if ngpus < 2:
# err_msg = 'Multi-GPU requires more than one gpu devices.'
# raise RuntimeError(err_msg)
self.__idev = 0 # SET STATE: DEVICE
self._tower_gradvars = None
self._usenccl = usenccl
@property
def ismgpu(self):
'''Property to indicate this is a multigpu enabled optimizer.'''
return True
@property
def usenccl(self):
'''Property to indicate if using the nccl contrib library.'''
return self._usenccl
@usenccl.setter
def usenccl(self, usenccl):
self._usenccl = usenccl
@property
def _device(self):
'''Device state currently used within get_gradients. This is a
protected/private property so use it as such i.e. an implementation
detail not a public property or interface.'''
return self.__idev
@_device.setter
def _device(self, device):
self.__idev = device
def _get_tower_gradvars(self, loss, params):
gdev_list = self._gdev_list
# tower parallelization
global_scope = tf.get_variable_scope()
tower_gradvars = []
for idev, device in enumerate(gdev_list):
# tf.variable_scope('GPU_%i' % idev), \
with tf.device(device), \
tf.variable_scope(global_scope, reuse=idev > 0), \
tf.name_scope('tower_%i' % idev):
# tf.gradients returns list of `sum(dy/dx)`. The gradients
# are aggregated by all_avg_gradients. Something doesn't seem
# right though. SOMEWHAT SLOW.
# TODO: Need to figure out how to efficiently aggregate.
colo = True if not self._usenccl else not HAVE_NCCL
# colo = True
grads = tf.gradients(
loss, params,
# # GATE_NONE faster??
# gate_gradients=tf.train.Optimizer.GATE_NONE,
colocate_gradients_with_ops=colo)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue)
for g in grads]
gradvars = zip(grads, params)
tower_gradvars.append(gradvars)
tower_gradvars = all_avg_gradients(tower_gradvars, gdev_list,
usenccl=self._usenccl)
return tower_gradvars
def get_gradients(self, loss, params):
'''
:override get_gradients: Overrides the base Optimizer class/sub-class
get_gradients method to get gradients from tower grads.
'''
# READ STATE: TOWER GRADS
tower_gradvars = self._tower_gradvars \
if self._tower_gradvars is not None else \
self._get_tower_gradvars(loss, params)
idev = self._device # READ STATE: DEVICE
grads = [tg[0] for tg in tower_gradvars[idev]]
if _DEBUG:
# traceback.print_stack() # DEBUG
print('\nOptimizerMultiGPUMixin grads: {}'.format(grads)) # DEBUG
return grads
def get_updates(self, loss, params):
'''
:override get_updates: Overrides the base Optimizer class/sub-class
get_updates method to optionally use nccl for gradient aggregation.
'''
tower_gradvars = self._get_tower_gradvars(loss, params)
self._tower_gradvars = tower_gradvars # SET STATE: TOWER GRADS
gdev_list = self._gdev_list
# ngpus = len(gdev_list)
global_scope = tf.get_variable_scope()
updates = []
# IMPORTANT when using NCCL to get updates for all devices otherwise
# the nccl ops deadlock. Hence the loop below over all gpus.
for idev, dev in enumerate(gdev_list):
# Clear internal updates state. Aggregated and set after for-loop
self.updates = []
self._device = idev # SET STATE: DEVICE
# The self._baseopt.get_updates calls get_gradients method.
# The self._device state is set and the get_gradients uses this
# state to return the gradients for that device.
with tf.device(dev), \
tf.variable_scope(global_scope, reuse=idev > 0), \
tf.name_scope('tower_%i' % idev):
# updates_ = self._baseopt.get_updates(self, params,
# constraints, loss)
updates_ = self._baseopt.get_updates(loss, params)
updates += [up for up in updates_ if up not in updates]
if (not HAVE_NCCL or not self.usenccl) and idev == 0:
# no need to iterate over all devices
break
self._device = 0 # SET STATE: DEVICE
self.updates = updates
# if _DEBUG:
# print 'UPDATES:', _updates # DEBUG
return self.updates
# Note: The code used in keras Optims is in bad style.
# for k in kwargs:
# if k not in allowed_kwargs:
# raise TypeError('Unexpected keyword argument '
# 'passed to optimizer: ' + str(k))
# List the allowed kwargs in __init__ and don't use **kwargs if the intention
# is not to allow/passthru of uknown kwargs.
# Current workaround is to add ignore_kargs_spec=True to the decorator.
# Implementation without mixedomatic
# class RMSPropMGPU(OptimizerMultiGPUMixin, RMSprop):
# def __init__(self, **kwargs):
# gdev_list = kwargs.pop('gdev_list', [])
# RMSprop.__init__(self, **kwargs)
# OptimizerMultiGPUMixin.__init__(self, gdev_list=gdev_list)
@mixedomatic(ignore_kargs_spec=True)
class AdagradMGPU(OptimizerMultiGPUMixin, Adagrad):
'''Multigpu Adagrad'''
pass
@mixedomatic(ignore_kargs_spec=True)
class AdadeltaMGPU(OptimizerMultiGPUMixin, Adadelta):
'''Multigpu Adadelta'''
pass
@mixedomatic(ignore_kargs_spec=True)
class AdamMGPU(OptimizerMultiGPUMixin, Adam):
'''Multigpu Adam'''
pass
@mixedomatic(ignore_kargs_spec=True)
class AdamaxMGPU(OptimizerMultiGPUMixin, Adamax):
'''Multigpu Adamax'''
pass
@mixedomatic(ignore_kargs_spec=True)
class NadamMGPU(OptimizerMultiGPUMixin, Nadam):
'''Multigpu Nadam'''
pass
@mixedomatic(ignore_kargs_spec=True)
class RMSPropMGPU(OptimizerMultiGPUMixin, RMSprop):
'''Multigpu RMSprop'''
pass
@mixedomatic(ignore_kargs_spec=True)
class SgdMGPU(OptimizerMultiGPUMixin, SGD):
'''Multigpu SGD'''
pass
|
|
#Pyjsdl - Copyright (C) 2013 James Garnon <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from pyjsdl.pyjsarray import BitSet
from pyjsdl.color import Color
import sys
if sys.version_info < (3,):
from pyjsdl.util import _range as range
__docformat__ = 'restructuredtext'
def from_surface(surface, threshold=127):
"""
**pyjsdl.mask.from_surface**
Return Mask derived from surface using alpha transparency.
Optional argument to set alpha threshold.
"""
mask = Mask((surface.width, surface.height))
if not mask.bit:
return None
pixels = surface.impl.getImageData(0, 0, surface.width, surface.height)
width, height = surface.width*4, surface.height
for y in range(0, height):
xpix = 0
i = (y*width)+3
for x in range(0, width, 4):
if surface._getPixel(pixels, i+x) > threshold:
mask.set_at((xpix,y))
xpix += 1
return mask
def from_threshold(surface, color, threshold=(0,0,0,255)):
"""
**pyjsdl.mask.from_threshold**
Return Mask from surface using a given color.
Optional threshold argument to set color range and alpha threshold.
"""
mask = Mask((surface.width, surface.height))
if not mask.bit:
return None
pixels = surface.impl.getImageData(0, 0, surface.width, surface.height)
if threshold == (0,0,0,255):
color = Color(color)
color = (color.r,color.g,color.b)
width, height = surface.width*4, surface.height
for y in range(0, height):
xpix = 0
i = y*width
for x in range(0, width, 4):
ix = i+x
if (surface._getPixel(pixels, ix) == color[0] and
surface._getPixel(pixels, ix+1) == color[1] and
surface._getPixel(pixels, ix+2) == color[2] and
surface._getPixel(pixels, ix+3) >= threshold[3]):
mask.set_at((xpix,y))
xpix += 1
else:
color = Color(color)
col = {}
for i, c in enumerate(('r','g','b')):
if threshold[i]:
col[c+'1'] = color[i] - threshold[i] - 1
col[c+'2'] = color[i] + threshold[i] + 1
else:
col[c+'1'] = color[i] - 1
col[c+'2'] = color[i] + 1
col['a'] = threshold[3] - 1
width, height = surface.width*4, surface.height
for y in range(0, height):
xpix = 0
i = y*width
for x in range(0, width, 4):
ix = i+x
if ((col['r1'] < surface._getPixel(pixels, ix) < col['r2']) and
(col['g1'] < surface._getPixel(pixels, ix+1) < col['g2']) and
(col['b1'] < surface._getPixel(pixels, ix+2) < col['b2']) and
(surface._getPixel(pixels, ix+3) > col['a'])):
mask.set_at((xpix,y))
xpix += 1
return mask
class Mask(object):
"""
**pyjsdl.mask.Mask**
* Mask.get_size
* Mask.get_at
* Mask.set_at
* Mask.fill
* Mask.clear
* Mask.invert
* Mask.count
* Mask.overlap
* Mask.toString
"""
def __init__(self, size):
"""
Return a Mask object.
The size argument is (width, height) of the mask.
The mask is represented by a list of Bitset.
"""
self.width = int(size[0])
self.height = int(size[1])
self.bit = []
for bitset in range(self.height):
self.bit.append(BitSet(self.width))
def __str__(self):
return self.toString()
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
def get_size(self):
"""
Return width, height of mask.
"""
return (self.width, self.height)
def get_at(self, pos):
"""
Return bit setting for given pos.
"""
return self.bit[pos[1]].get(pos[0])
def set_at(self, pos, value=1):
"""
Set bit for given pos.
Optional value to set bit, eith 1 or 0, defaults to 1.
"""
self.bit[pos[1]].set(pos[0], value)
return None
def fill(self):
"""
Fill mask.
"""
for bitset in self.bit:
bitset.fill()
return None
def clear(self):
"""
Clear mask.
"""
for bitset in self.bit:
bitset.clear()
return None
def invert(self):
"""
Invert bit value in mask.
"""
for bitset in self.bit:
bitset.flip(0,self.width)
return None
def count(self):
"""
Return count of true bits in mask.
"""
true_bits = 0
for bitset in self.bit:
true_bits += bitset.cardinality()
return true_bits
def overlap(self, mask, offset):
"""
Return True if mask at offset position overlap with this mask.
"""
if offset[0] > 0:
x1 = offset[0]
x2 = 0
else:
x1 = 0
x2 = -offset[0]
if offset[1] > 0:
y1 = offset[1]
y2 = 0
else:
y1 = 0
y2 = -offset[1]
w = min(self.width-x1, mask.width-x2)
h = min(self.height-y1, mask.height-y2)
if w > 0 and h > 0:
for y in range(h):
if self.bit[y1+y].get(x1, x1+w).intersects(
mask.bit[y2+y].get(x2, x2+w)):
return True
return None
def toString(self, bit=('1','0')):
"""
Return string representation of mask.
Optional bit argument specify bit character.
"""
cbit = {True:bit[0], False:bit[1]}
cbitset = []
for bitset in self.bit:
cbitset.append('\n')
cbitset.extend([cbit[bitset.get(i)]
for i in range(self.width)])
bitstr = ''.join(cbitset)
return bitstr
|
|
# openstack_dashboard.local.dashboards.project_nci.vlconfig.forms
#
# Copyright (c) 2015, NCI, Australian National University.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import logging
#import pdb ## DEBUG
import re
import sys
import uuid
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.local.nci import crypto as ncicrypto
from openstack_dashboard.local.nci.constants import *
LOG = logging.getLogger(__name__)
SPECIAL_FIELDS_REGEX = r"(repo_key|eyaml)"
class VLConfigForm(forms.SelfHandlingForm):
puppet_action = forms.ChoiceField(
label=_("Default Puppet Action"),
required=True,
choices=[("auto", _("Automatic"))] + PUPPET_ACTION_CHOICES,
help_text=_("Default Puppet command to execute. This value can be overridden in the launch instance dialog."))
puppet_env = forms.RegexField(
label=_("Default Puppet Environment"),
required=True,
regex=REPO_BRANCH_REGEX,
help_text=_("Default Puppet configuration environment (or branch name). This value can be overridden in the launch instance dialog."))
repo_path = forms.RegexField(
label=_("Puppet Repository Path"),
required=True,
regex=REPO_PATH_REGEX,
help_text=_("Path component of the Puppet configuration repository URL."))
repo_key_public = forms.CharField(
widget=forms.Textarea(attrs={"readonly": True}),
label=_("Public Deployment Key"),
required=False)
repo_key_fp = forms.CharField(
widget=forms.TextInput(attrs={"readonly": True}),
label=_("Deployment Key Fingerprint"),
required=False)
repo_key_create = forms.BooleanField(
label=_("Create New Deployment Key"),
required=False,
initial=True,
help_text=_("Generates a new SSH key for deploying the Puppet configuration repository."))
eyaml_key_fp = forms.CharField(
widget=forms.TextInput(attrs={"readonly": True}),
label=_("Hiera eyaml Key Fingerprint"),
required=False)
eyaml_key_upload = forms.FileField(
label=_("Import Hiera eyaml Key"),
required=False)
eyaml_cert_fp = forms.CharField(
widget=forms.TextInput(attrs={"readonly": True}),
label=_("Hiera eyaml Certificate Fingerprint"),
required=False)
eyaml_cert_upload = forms.FileField(
label=_("Import Hiera eyaml Certificate"),
required=False)
eyaml_update = forms.ChoiceField(
label=_("Modify Hiera eyaml Certificate/Key Pair"),
required=False,
choices=[
("", _("No Change")),
("create", _("Create New")),
("import", _("Import")),
],
initial="create",
help_text=_("Create or import a certificate/key pair for encrypting data in Hiera."))
revision = forms.CharField(
widget=forms.HiddenInput(),
required=False)
def __init__(self, request, *args, **kwargs):
super(VLConfigForm, self).__init__(request, *args, **kwargs)
self.saved_params = {}
self.cfg_timestamp = None
self.stash = ncicrypto.CryptoStash(request)
obj = None
try:
LOG.debug("Checking if project configuration exists")
container = nci_private_container_name(request)
config_obj_name = nci_vl_project_config_name()
if api.swift.swift_object_exists(request, container, config_obj_name):
LOG.debug("Loading project configuration")
obj = api.swift.swift_get_object(request,
container,
config_obj_name,
resp_chunk_size=None)
self.cfg_timestamp = obj.timestamp
if self.cfg_timestamp is None:
# Workaround bug in Ceph which doesn't return the "X-Timestamp"
# header. This appears to be fixed in Ceph 0.87.1 (Giant).
# http://tracker.ceph.com/issues/8911
# https://github.com/ceph/ceph/commit/8c573c8826096d90dc7dfb9fd0126b9983bc15eb
metadata = api.swift.swift_api(request).head_object(container, config_obj_name)
try:
lastmod = metadata["last-modified"]
# https://github.com/ceph/ceph/blob/v0.80.6/src/rgw/rgw_rest.cc#L325
dt = datetime.datetime.strptime(lastmod, "%a, %d %b %Y %H:%M:%S %Z")
assert dt.utcoffset() is None
self.cfg_timestamp = dt.strftime("%Y-%m-%dT%H:%M:%SZ")
except Exception as e:
LOG.exception("Error getting project config timestamp: {0}".format(e))
except:
exceptions.handle(request)
# NB: Can't use "self.api_error()" here since form not yet validated.
msg = _("Failed to load configuration data.")
self.set_warning(msg)
return
try:
if obj and obj.data:
LOG.debug("Parsing project configuration")
self.saved_params = json.loads(obj.data)
except ValueError as e:
LOG.exception("Error parsing project configuration: {0}".format(e))
messages.error(request, str(e))
# NB: Can't use "self.api_error()" here since form not yet validated.
msg = _("Configuration data is corrupt and cannot be loaded.")
self.set_warning(msg)
return
if not self.saved_params:
if request.method == "GET":
msg = _("No existing project configuration found.")
self.set_warning(msg)
self.fields["puppet_action"].initial = "auto"
self.fields["puppet_env"].initial = "production"
self.fields["repo_path"].initial = "{0}/puppet.git".format(request.user.project_name)
return
for k, v in self.saved_params.iteritems():
if (k in self.fields) and not re.match(SPECIAL_FIELDS_REGEX, k):
self.fields[k].initial = v
partial_load = False
if self.saved_params.get("stash"):
try:
self.stash.init_params(self.saved_params["stash"])
except:
exceptions.handle(request)
partial_load = True
else:
if self.saved_params.get("repo_key"):
self.fields["repo_key_create"].initial = False
if request.method == "GET":
try:
key = self.stash.load_private_key(self.saved_params["repo_key"])
self.fields["repo_key_public"].initial = key.ssh_publickey()
self.fields["repo_key_fp"].initial = key.ssh_fingerprint()
except:
exceptions.handle(request)
partial_load = True
if self.saved_params.get("eyaml_key"):
self.fields["eyaml_update"].initial = ""
if request.method == "GET":
try:
key = self.stash.load_private_key(self.saved_params["eyaml_key"])
self.fields["eyaml_key_fp"].initial = key.fingerprint()
except:
exceptions.handle(request)
partial_load = True
if self.saved_params.get("eyaml_cert"):
self.fields["eyaml_update"].initial = ""
if request.method == "GET":
try:
cert = self.stash.load_x509_cert(self.saved_params["eyaml_cert"])
self.fields["eyaml_cert_fp"].initial = cert.fingerprint()
except:
exceptions.handle(request)
partial_load = True
if partial_load:
# NB: Can't use "self.api_error()" here since form not yet validated.
msg = _("The project configuration was only partially loaded.")
self.set_warning(msg)
def clean(self):
data = super(VLConfigForm, self).clean()
# Don't allow the form data to be saved if the revision stored in the
# form by the GET request doesn't match what we've just loaded while
# processing the POST request.
if data.get("revision", "") != self.saved_params.get("revision", ""):
if self.saved_params.get("revision"):
msg = _("Saved configuration has changed since form was loaded.")
else:
msg = _("Failed to retrieve existing configuration for update.")
raise forms.ValidationError(msg)
if data.get("puppet_action", "none") != "none":
if not (data.get("repo_key_create", False) or self.saved_params.get("repo_key")):
msg = _("The selected Puppet action requires a deployment key.")
self._errors["puppet_action"] = self.error_class([msg])
elif not (data.get("eyaml_update") or (self.saved_params.get("eyaml_key") and self.saved_params.get("eyaml_cert"))):
msg = _("The selected Puppet action requires a Hiera eyaml certificate/key pair.")
self._errors["puppet_action"] = self.error_class([msg])
if data.get("eyaml_update", "") == "import":
if not data.get("eyaml_key_upload"):
msg = _("No private key specified to import.")
self._errors["eyaml_key_upload"] = self.error_class([msg])
if not data.get("eyaml_cert_upload"):
msg = _("No certificate specified to import.")
self._errors["eyaml_cert_upload"] = self.error_class([msg])
return data
def handle(self, request, data):
new_params = self.saved_params.copy()
if "repo_branch" in new_params:
del new_params["repo_branch"]
new_params.update([(k, v) for k, v in data.iteritems() if not re.match(SPECIAL_FIELDS_REGEX, k)])
try:
# Make sure the container exists first.
container = nci_private_container_name(request)
if not api.swift.swift_container_exists(request, container):
api.swift.swift_create_container(request, container)
if not api.swift.swift_object_exists(request, container, "README"):
msg = "**WARNING** Don't delete, rename or modify this container or any objects herein."
api.swift.swift_api(request).put_object(container,
"README",
msg,
content_type="text/plain")
# And check that a temporary URL key is defined as we'll need it
# when launching new instances.
if not ncicrypto.swift_get_temp_url_key(request):
LOG.debug("Generating temp URL secret key")
ncicrypto.swift_create_temp_url_key(request)
messages.success(request, _("Temporary URL key generated successfully."))
except:
exceptions.handle(request)
msg = _("Failed to save configuration.")
self.api_error(msg)
return False
if not self.stash.initialised:
LOG.debug("Configuring crypto stash")
try:
self.stash.init_params()
new_params["stash"] = self.stash.params
except:
exceptions.handle(request)
msg = _("Failed to setup crypto stash.")
self.api_error(msg)
return False
new_repo_key = None
new_eyaml_key = None
new_eyaml_cert = None
try:
if data.get("repo_key_create", False):
LOG.debug("Generating new deployment key")
try:
new_repo_key = self.stash.create_private_key()
new_params["repo_key"] = new_repo_key.metadata()
except:
exceptions.handle(request)
msg = _("Failed to generate deployment key.")
self.api_error(msg)
return False
eyaml_update = data.get("eyaml_update", "")
if eyaml_update:
try:
if eyaml_update == "create":
LOG.debug("Generating new eyaml key")
new_eyaml_key = self.stash.create_private_key()
elif eyaml_update == "import":
LOG.debug("Importing eyaml key")
new_eyaml_key = self.stash.import_private_key(data.get("eyaml_key_upload"))
assert new_eyaml_key
new_params["eyaml_key"] = new_eyaml_key.metadata()
except:
exceptions.handle(request)
msg = _("Failed to update Hiera eyaml key.")
self.api_error(msg)
return False
try:
if eyaml_update == "create":
LOG.debug("Generating new eyaml certificate")
new_eyaml_cert = self.stash.create_x509_cert(new_eyaml_key,
"hiera-eyaml-{0}".format(request.user.project_name),
100 * 365)
elif eyaml_update == "import":
LOG.debug("Importing eyaml certificate")
new_eyaml_cert = self.stash.import_x509_cert(data.get("eyaml_cert_upload"))
assert new_eyaml_cert
new_params["eyaml_cert"] = new_eyaml_cert.metadata()
except:
exceptions.handle(request)
msg = _("Failed to update Hiera eyaml certificate.")
self.api_error(msg)
return False
try:
if not new_eyaml_cert.verify_key_pair(new_eyaml_key):
msg = _("Hiera eyaml certificate was not signed with the given key.")
self.api_error(msg)
return False
except:
exceptions.handle(request)
msg = _("Failed to verify Hiera eyaml certificate/key pair.")
self.api_error(msg)
return False
if new_params != self.saved_params:
new_params["revision"] = datetime.datetime.utcnow().isoformat()
obj_data = json.dumps(new_params)
try:
config_obj_name = nci_vl_project_config_name()
if self.cfg_timestamp:
backup_name = "{0}_{1}".format(config_obj_name,
self.cfg_timestamp)
if not api.swift.swift_object_exists(request, container, backup_name):
LOG.debug("Backing up current project configuration")
api.swift.swift_copy_object(request,
container,
config_obj_name,
container,
backup_name)
elif api.swift.swift_object_exists(request, container, config_obj_name):
msg = _("Couldn't backup previous configuration. No timestamp available.")
messages.warning(request, msg)
LOG.debug("Saving project configuration")
api.swift.swift_api(request).put_object(container,
config_obj_name,
obj_data,
content_type="application/json")
except:
exceptions.handle(request)
msg = _("Failed to save configuration.")
self.api_error(msg)
return False
new_repo_key = None
new_eyaml_key = None
new_eyaml_cert = None
self.saved_params = new_params
messages.success(request, _("Configuration saved."))
finally:
try:
if new_repo_key:
LOG.debug("Rolling back deployment key generation")
self.stash.delete(new_repo_key)
except Exception as e:
LOG.exception("Error deleting orphaned deployment key: {0}".format(e))
try:
if new_eyaml_key:
LOG.debug("Rolling back eyaml key generation")
self.stash.delete(new_eyaml_key)
except Exception as e:
LOG.exception("Error deleting orphaned eyaml key: {0}".format(e))
try:
if new_eyaml_cert:
LOG.debug("Rolling back eyaml certificate generation")
self.stash.delete(new_eyaml_cert)
except Exception as e:
LOG.exception("Error deleting orphaned eyaml certificate: {0}".format(e))
return True
# vim:ts=4 et sw=4 sts=4:
|
|
"""Tools to extract features."""
import logging
import time
from typing import Tuple, Dict, Any, List, Optional
import cv2
import numpy as np
from opensfm import context, pyfeatures
logger = logging.getLogger(__name__)
class SemanticData:
segmentation: np.ndarray
instances: Optional[np.ndarray]
labels: List[Dict[str, Any]]
def __init__(
self,
segmentation: np.ndarray,
instances: Optional[np.ndarray],
labels: List[Dict[str, Any]],
):
self.segmentation = segmentation
self.instances = instances
self.labels = labels
def has_instances(self) -> bool:
return self.instances is not None
def mask(self, mask: np.ndarray) -> "SemanticData":
try:
segmentation = self.segmentation[mask]
instances = self.instances
if instances is not None:
instances = instances[mask]
except IndexError:
logger.error(
f"Invalid mask array of dtype {mask.dtype}, shape {mask.shape}: {mask}"
)
raise
return SemanticData(segmentation, instances, self.labels)
class FeaturesData:
points: np.ndarray
descriptors: Optional[np.ndarray]
colors: np.ndarray
semantic: Optional[SemanticData]
FEATURES_VERSION: int = 2
FEATURES_HEADER: str = "OPENSFM_FEATURES_VERSION"
def __init__(
self,
points: np.ndarray,
descriptors: Optional[np.ndarray],
colors: np.ndarray,
semantic: Optional[SemanticData],
):
self.points = points
self.descriptors = descriptors
self.colors = colors
self.semantic = semantic
def get_segmentation(self) -> Optional[np.ndarray]:
semantic = self.semantic
if not semantic:
return None
if semantic.segmentation is not None:
return semantic.segmentation
return None
def has_instances(self) -> bool:
semantic = self.semantic
if not semantic:
return False
return semantic.instances is not None
def mask(self, mask: np.ndarray) -> "FeaturesData":
if self.semantic:
masked_semantic = self.semantic.mask(mask)
else:
masked_semantic = None
return FeaturesData(
self.points[mask],
self.descriptors[mask] if self.descriptors is not None else None,
self.colors[mask],
masked_semantic,
)
def save(self, fileobject: Any, config: Dict[str, Any]):
"""Save features from file (path like or file object like)"""
feature_type = config["feature_type"]
if (
(
feature_type == "AKAZE"
and config["akaze_descriptor"] in ["MLDB_UPRIGHT", "MLDB"]
)
or (feature_type == "HAHOG" and config["hahog_normalize_to_uchar"])
or (feature_type == "ORB")
):
feature_data_type = np.uint8
else:
feature_data_type = np.float32
descriptors = self.descriptors
if descriptors is None:
raise RuntimeError("No descriptors found, canot save features data.")
semantic = self.semantic
if semantic:
np.savez_compressed(
fileobject,
points=self.points.astype(np.float32),
descriptors=descriptors.astype(feature_data_type),
colors=self.colors,
segmentations=semantic.segmentation,
instances=semantic.instances,
segmentation_labels=semantic.labels,
OPENSFM_FEATURES_VERSION=self.FEATURES_VERSION,
allow_pickle=True,
)
else:
np.savez_compressed(
fileobject,
points=self.points.astype(np.float32),
descriptors=descriptors.astype(feature_data_type),
colors=self.colors,
segmentations=None,
instances=None,
segmentation_labels=None,
OPENSFM_FEATURES_VERSION=self.FEATURES_VERSION,
allow_pickle=True,
)
@classmethod
def from_file(cls, fileobject: Any, config: Dict[str, Any]) -> "FeaturesData":
"""Load features from file (path like or file object like)"""
s = np.load(fileobject, allow_pickle=True)
version = cls._features_file_version(s)
return getattr(cls, "_from_file_v%d" % version)(s, config)
@classmethod
def _features_file_version(cls, obj: Dict[str, Any]) -> int:
"""Retrieve features file version. Return 0 if none"""
if cls.FEATURES_HEADER in obj:
return obj[cls.FEATURES_HEADER]
else:
return 0
@classmethod
def _from_file_v0(
cls, data: Dict[str, np.ndarray], config: Dict[str, Any]
) -> "FeaturesData":
"""Base version of features file
Scale (desc[2]) set to reprojection_error_sd by default (legacy behaviour)
"""
feature_type = config["feature_type"]
if feature_type == "HAHOG" and config["hahog_normalize_to_uchar"]:
descriptors = data["descriptors"].astype(np.float32)
else:
descriptors = data["descriptors"]
points = data["points"]
points[:, 2:3] = config["reprojection_error_sd"]
return FeaturesData(points, descriptors, data["colors"].astype(float), None)
@classmethod
def _from_file_v1(
cls, data: Dict[str, np.ndarray], config: Dict[str, Any]
) -> "FeaturesData":
"""Version 1 of features file
Scale is not properly set higher in the pipeline, default is gone.
"""
feature_type = config["feature_type"]
if feature_type == "HAHOG" and config["hahog_normalize_to_uchar"]:
descriptors = data["descriptors"].astype(np.float32)
else:
descriptors = data["descriptors"]
return FeaturesData(
data["points"], descriptors, data["colors"].astype(float), None
)
@classmethod
def _from_file_v2(
cls,
data: Dict[str, Any],
config: Dict[str, Any],
) -> "FeaturesData":
"""Version 2 of features file
Added segmentation and segmentation labels.
"""
feature_type = config["feature_type"]
if feature_type == "HAHOG" and config["hahog_normalize_to_uchar"]:
descriptors = data["descriptors"].astype(np.float32)
else:
descriptors = data["descriptors"]
has_segmentation = (data["segmentations"] != None).all()
has_instances = (data["instances"] != None).all()
if has_segmentation or has_instances:
semantic_data = SemanticData(
data["segmentations"] if has_segmentation else None,
data["instances"] if has_instances else None,
data["segmentation_labels"],
)
else:
semantic_data = None
return FeaturesData(
data["points"], descriptors, data["colors"].astype(float), semantic_data
)
def resized_image(image: np.ndarray, max_size: int) -> np.ndarray:
"""Resize image to feature_process_size."""
h, w = image.shape[:2]
size = max(w, h)
if 0 < max_size < size:
dsize = w * max_size // size, h * max_size // size
return cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_AREA)
else:
return image
def root_feature(desc: np.ndarray, l2_normalization: bool = False) -> np.ndarray:
if l2_normalization:
s2 = np.linalg.norm(desc, axis=1)
desc = (desc.T / s2).T
s = np.sum(desc, 1)
desc = np.sqrt(desc.T / s).T
return desc
def root_feature_surf(
desc: np.ndarray, l2_normalization: bool = False, partial: bool = False
) -> np.ndarray:
"""
Experimental square root mapping of surf-like feature, only work for 64-dim surf now
"""
if desc.shape[1] == 64:
if l2_normalization:
s2 = np.linalg.norm(desc, axis=1)
desc = (desc.T / s2).T
if partial:
ii = np.array([i for i in range(64) if (i % 4 == 2 or i % 4 == 3)])
else:
ii = np.arange(64)
desc_sub = np.abs(desc[:, ii])
desc_sub_sign = np.sign(desc[:, ii])
# s_sub = np.sum(desc_sub, 1) # This partial normalization gives slightly better results for AKAZE surf
s_sub = np.sum(np.abs(desc), 1)
desc_sub = np.sqrt(desc_sub.T / s_sub).T
desc[:, ii] = desc_sub * desc_sub_sign
return desc
def normalized_image_coordinates(
pixel_coords: np.ndarray, width: int, height: int
) -> np.ndarray:
size = max(width, height)
p = np.empty((len(pixel_coords), 2))
p[:, 0] = (pixel_coords[:, 0] + 0.5 - width / 2.0) / size
p[:, 1] = (pixel_coords[:, 1] + 0.5 - height / 2.0) / size
return p
def denormalized_image_coordinates(
norm_coords: np.ndarray, width: int, height: int
) -> np.ndarray:
size = max(width, height)
p = np.empty((len(norm_coords), 2))
p[:, 0] = norm_coords[:, 0] * size - 0.5 + width / 2.0
p[:, 1] = norm_coords[:, 1] * size - 0.5 + height / 2.0
return p
def normalize_features(
points: np.ndarray, desc: np.ndarray, colors: np.ndarray, width: int, height: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray,]:
"""Normalize feature coordinates and size."""
points[:, :2] = normalized_image_coordinates(points[:, :2], width, height)
points[:, 2:3] /= max(width, height)
return points, desc, colors
def _in_mask(point: np.ndarray, width: int, height: int, mask: np.ndarray) -> bool:
"""Check if a point is inside a binary mask."""
u = mask.shape[1] * (point[0] + 0.5) / width
v = mask.shape[0] * (point[1] + 0.5) / height
return mask[int(v), int(u)] != 0
def extract_features_sift(
image: np.ndarray, config: Dict[str, Any], features_count: int
) -> Tuple[np.ndarray, np.ndarray]:
sift_edge_threshold = config["sift_edge_threshold"]
sift_peak_threshold = float(config["sift_peak_threshold"])
# SIFT support is in cv2 main from version 4.4.0
if context.OPENCV44 or context.OPENCV5:
# OpenCV versions concerned /** 3.4.11, >= 4.4.0 **/ ==> Sift became free since March 2020
detector = cv2.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
descriptor = detector
elif context.OPENCV3 or context.OPENCV4:
try:
# OpenCV versions concerned /** 3.2.x, 3.3.x, 3.4.0, 3.4.1, 3.4.2, 3.4.10, 4.3.0, 4.4.0 **/
detector = cv2.xfeatures2d.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
except AttributeError as ae:
# OpenCV versions concerned /** 3.4.3, 3.4.4, 3.4.5, 3.4.6, 3.4.7, 3.4.8, 3.4.9, 4.0.x, 4.1.x, 4.2.x **/
if "no attribute 'xfeatures2d'" in str(ae):
logger.error(
"OpenCV Contrib modules are required to extract SIFT features"
)
raise
descriptor = detector
else:
detector = cv2.FeatureDetector_create("SIFT")
descriptor = cv2.DescriptorExtractor_create("SIFT")
detector.setDouble("edgeThreshold", sift_edge_threshold)
while True:
logger.debug("Computing sift with threshold {0}".format(sift_peak_threshold))
t = time.time()
# SIFT support is in cv2 main from version 4.4.0
if context.OPENCV44 or context.OPENCV5:
detector = cv2.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
elif context.OPENCV3:
detector = cv2.xfeatures2d.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
else:
detector.setDouble("contrastThreshold", sift_peak_threshold)
points = detector.detect(image)
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
if len(points) < features_count and sift_peak_threshold > 0.0001:
sift_peak_threshold = (sift_peak_threshold * 2) / 3
logger.debug("reducing threshold")
else:
logger.debug("done")
break
points, desc = descriptor.compute(image, points)
if desc is not None:
if config["feature_root"]:
desc = root_feature(desc)
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
else:
points = np.array(np.zeros((0, 3)))
desc = np.array(np.zeros((0, 3)))
return points, desc
def extract_features_surf(
image: np.ndarray, config: Dict[str, Any], features_count: int
) -> Tuple[np.ndarray, np.ndarray]:
surf_hessian_threshold = config["surf_hessian_threshold"]
if context.OPENCV3:
try:
detector = cv2.xfeatures2d.SURF_create()
except AttributeError as ae:
if "no attribute 'xfeatures2d'" in str(ae):
logger.error(
"OpenCV Contrib modules are required to extract SURF features"
)
raise
descriptor = detector
detector.setHessianThreshold(surf_hessian_threshold)
detector.setNOctaves(config["surf_n_octaves"])
detector.setNOctaveLayers(config["surf_n_octavelayers"])
detector.setUpright(config["surf_upright"])
else:
detector = cv2.FeatureDetector_create("SURF")
descriptor = cv2.DescriptorExtractor_create("SURF")
detector.setDouble("hessianThreshold", surf_hessian_threshold)
detector.setDouble("nOctaves", config["surf_n_octaves"])
detector.setDouble("nOctaveLayers", config["surf_n_octavelayers"])
detector.setInt("upright", config["surf_upright"])
while True:
logger.debug("Computing surf with threshold {0}".format(surf_hessian_threshold))
t = time.time()
if context.OPENCV3:
detector.setHessianThreshold(surf_hessian_threshold)
else:
detector.setDouble(
"hessianThreshold", surf_hessian_threshold
) # default: 0.04
points = detector.detect(image)
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
if len(points) < features_count and surf_hessian_threshold > 0.0001:
surf_hessian_threshold = (surf_hessian_threshold * 2) / 3
logger.debug("reducing threshold")
else:
logger.debug("done")
break
points, desc = descriptor.compute(image, points)
if desc is not None:
if config["feature_root"]:
desc = root_feature(desc)
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
else:
points = np.array(np.zeros((0, 3)))
desc = np.array(np.zeros((0, 3)))
return points, desc
def akaze_descriptor_type(name: str) -> pyfeatures.AkazeDescriptorType:
d = pyfeatures.AkazeDescriptorType.__dict__
if name in d:
return d[name]
else:
logger.debug("Wrong akaze descriptor type")
return d["MSURF"]
def extract_features_akaze(
image: np.ndarray, config: Dict[str, Any], features_count: int
) -> Tuple[np.ndarray, np.ndarray]:
options = pyfeatures.AKAZEOptions()
options.omax = config["akaze_omax"]
akaze_descriptor_name = config["akaze_descriptor"]
options.descriptor = akaze_descriptor_type(akaze_descriptor_name)
options.descriptor_size = config["akaze_descriptor_size"]
options.descriptor_channels = config["akaze_descriptor_channels"]
options.dthreshold = config["akaze_dthreshold"]
options.kcontrast_percentile = config["akaze_kcontrast_percentile"]
options.use_isotropic_diffusion = config["akaze_use_isotropic_diffusion"]
options.target_num_features = features_count
options.use_adaptive_suppression = config["feature_use_adaptive_suppression"]
logger.debug("Computing AKAZE with threshold {0}".format(options.dthreshold))
t = time.time()
points, desc = pyfeatures.akaze(image, options)
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
if config["feature_root"]:
if akaze_descriptor_name in ["SURF_UPRIGHT", "MSURF_UPRIGHT"]:
desc = root_feature_surf(desc, partial=True)
elif akaze_descriptor_name in ["SURF", "MSURF"]:
desc = root_feature_surf(desc, partial=False)
points = points.astype(float)
return points, desc
def extract_features_hahog(
image: np.ndarray, config: Dict[str, Any], features_count: int
) -> Tuple[np.ndarray, np.ndarray]:
t = time.time()
points, desc = pyfeatures.hahog(
image.astype(np.float32) / 255, # VlFeat expects pixel values between 0, 1
peak_threshold=config["hahog_peak_threshold"],
edge_threshold=config["hahog_edge_threshold"],
target_num_features=features_count,
)
if config["feature_root"]:
desc = np.sqrt(desc)
uchar_scaling = 362 # x * 512 < 256 => sqrt(x) * 362 < 256
else:
uchar_scaling = 512
if config["hahog_normalize_to_uchar"]:
desc = (uchar_scaling * desc).clip(0, 255).round()
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
return points, desc
def extract_features_orb(
image: np.ndarray, config: Dict[str, Any], features_count: int
) -> Tuple[np.ndarray, np.ndarray]:
if context.OPENCV3:
detector = cv2.ORB_create(nfeatures=features_count)
descriptor = detector
else:
detector = cv2.FeatureDetector_create("ORB")
descriptor = cv2.DescriptorExtractor_create("ORB")
detector.setDouble("nFeatures", features_count)
logger.debug("Computing ORB")
t = time.time()
points = detector.detect(image)
points, desc = descriptor.compute(image, points)
if desc is not None:
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
else:
points = np.array(np.zeros((0, 3)))
desc = np.array(np.zeros((0, 3)))
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
return points, desc
def extract_features(
image: np.ndarray, config: Dict[str, Any], is_panorama: bool
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Detect features in a color or gray-scale image.
The type of feature detected is determined by the ``feature_type``
config option.
The coordinates of the detected points are returned in normalized
image coordinates.
Parameters:
- image: a color image with shape (h, w, 3) or
gray-scale image with (h, w) or (h, w, 1)
- config: the configuration structure
- is_panorama : if True, alternate settings are used for feature count and extraction size.
Returns:
tuple:
- points: ``x``, ``y``, ``size`` and ``angle`` for each feature
- descriptors: the descriptor of each feature
- colors: the color of the center of each feature
"""
extraction_size = (
config["feature_process_size_panorama"]
if is_panorama
else config["feature_process_size"]
)
features_count = (
config["feature_min_frames_panorama"]
if is_panorama
else config["feature_min_frames"]
)
assert len(image.shape) == 3 or len(image.shape) == 2
image = resized_image(image, extraction_size)
if len(image.shape) == 2: # convert (h, w) to (h, w, 1)
image = np.expand_dims(image, axis=2)
# convert color to gray-scale if necessary
if image.shape[2] == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
image_gray = image
feature_type = config["feature_type"].upper()
if feature_type == "SIFT":
points, desc = extract_features_sift(image_gray, config, features_count)
elif feature_type == "SURF":
points, desc = extract_features_surf(image_gray, config, features_count)
elif feature_type == "AKAZE":
points, desc = extract_features_akaze(image_gray, config, features_count)
elif feature_type == "HAHOG":
points, desc = extract_features_hahog(image_gray, config, features_count)
elif feature_type == "ORB":
points, desc = extract_features_orb(image_gray, config, features_count)
else:
raise ValueError(
"Unknown feature type " "(must be SURF, SIFT, AKAZE, HAHOG or ORB)"
)
xs = points[:, 0].round().astype(int)
ys = points[:, 1].round().astype(int)
colors = image[ys, xs]
if image.shape[2] == 1:
colors = np.repeat(colors, 3).reshape((-1, 3))
return normalize_features(points, desc, colors, image.shape[1], image.shape[0])
def build_flann_index(descriptors: np.ndarray, config: Dict[str, Any]) -> Any:
# FLANN_INDEX_LINEAR = 0
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_KMEANS = 2
# FLANN_INDEX_COMPOSITE = 3
# FLANN_INDEX_KDTREE_SINGLE = 4
# FLANN_INDEX_HIERARCHICAL = 5
if descriptors.dtype.type is np.float32:
algorithm_type = config["flann_algorithm"].upper()
if algorithm_type == "KMEANS":
FLANN_INDEX_METHOD = FLANN_INDEX_KMEANS
elif algorithm_type == "KDTREE":
FLANN_INDEX_METHOD = FLANN_INDEX_KDTREE
else:
raise ValueError("Unknown flann algorithm type " "must be KMEANS, KDTREE")
flann_params = {
"algorithm": FLANN_INDEX_METHOD,
"branching": config["flann_branching"],
"iterations": config["flann_iterations"],
"tree": config["flann_tree"],
}
else:
raise ValueError(
"FLANN isn't supported for binary features because of poor-performance. Use BRUTEFORCE instead."
)
return context.flann_Index(descriptors, flann_params)
|
|
from nose.tools import eq_
import hashlib
import json
import nose
from js_helper import _do_real_test_raw as _js_test
from validator.testcases.markup.markuptester import MarkupParser
import validator.testcases.jetpack as jetpack
from validator.errorbundler import ErrorBundle
from validator.xpi import XPIManager
def _do_test(xpi_package, allow_old_sdk=True):
err = ErrorBundle()
jetpack.inspect_jetpack(err, xpi_package, allow_old_sdk=allow_old_sdk)
return err
class MockXPI(object):
def __init__(self, resources):
self.resources = resources
def read(self, name):
if isinstance(self.resources[name], bool):
return ''
return self.resources[name]
def __iter__(self):
for name in self.resources.keys():
yield name
def __contains__(self, name):
return name in self.resources
def test_not_jetpack():
"""Test that add-ons which do not match the Jetpack pattern are ignored."""
err = _do_test(MockXPI({'foo': True, 'bar': True}))
assert not err.errors
assert not err.warnings
assert not err.notices
eq_(err.metadata.get('is_jetpack', False), False)
def test_package_json_jetpack():
"""Test that add-ons with the new package.json are treated as jetpack."""
err = _do_test(MockXPI({'bootstrap.js': '', 'package.json': ''}))
assert not err.errors
assert not err.warnings
assert not err.notices
eq_(err.metadata.get('is_jetpack'), True)
def test_bad_harnessoptions():
"""Test that a malformed harness-options.json file is warned against."""
err = _do_test(MockXPI({'bootstrap.js': True,
'components/harness.js': True,
'harness-options.json': 'foo bar'}))
assert err.failed()
assert err.warnings
print err.warnings
assert err.warnings[0]['id'][-1] == 'bad_harness-options.json'
def test_pass_jetpack():
"""Test that a minimalistic Jetpack setup will pass."""
harnessoptions = {'sdkVersion': '1.17',
'jetpackID': '',
'manifest': {}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert pretested_files
assert 'bootstrap.js' in pretested_files
def test_package_json_pass_jetpack():
"""Test that a minimalistic package.json Jetpack setup will pass."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'package.json': '{}'}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert pretested_files
assert 'bootstrap.js' in pretested_files
def test_package_json_different_bootstrap():
"""Test that a minimalistic package.json Jetpack setup will pass."""
err = _do_test(MockXPI({'bootstrap.js': "var foo = 'bar';",
'package.json': '{}'}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are not marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert not pretested_files
assert 'bootstrap.js' not in pretested_files
def test_missing_elements():
"""Test that missing elements in harness-options will fail."""
harnessoptions = {'sdkVersion': '1.17',
'jetpackID': ''}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
assert err.failed()
def test_skip_safe_files():
"""Test that missing elements in harness-options will fail."""
harnessoptions = {'sdkVersion': '1.17',
'jetpackID': '',
'manifest': {}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions),
'foo.png': True,
'bar.JpG': True,
'safe.GIF': True,
'icon.ico': True,
'foo/.DS_Store': True}))
assert not err.failed()
def test_pass_manifest_elements():
"""Test that proper elements in harness-options will pass."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'jetpackID': 'foobar',
'sdkVersion': '1.17',
'manifest': {
'bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions),
'resources/bootstrap.js': bootstrap}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'jetpack_loaded_modules' in err.metadata
nose.tools.eq_(err.metadata['jetpack_loaded_modules'],
['addon-kit-lib/drawing.js'])
assert 'jetpack_identified_files' in err.metadata
assert 'identified_files' in err.metadata
assert 'bootstrap.js' in err.metadata['jetpack_identified_files']
assert 'bootstrap.js' in err.metadata['identified_files']
assert 'jetpack_unknown_files' in err.metadata
assert not err.metadata['jetpack_unknown_files']
def test_ok_resource():
"""Test that resource:// URIs aren't flagged."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'jetpackID': 'foobar',
'sdkVersion': '1.17',
'manifest': {
'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert not err.failed()
def test_bad_resource():
"""Test for failure on non-resource:// modules."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'sdkVersion': '1.17',
'jetpackID': 'foobar',
'manifest':
{'http://foo.com/bar/bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert err.failed()
def test_missing_manifest_elements():
"""Test that missing manifest elements in harness-options will fail."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'sdkVersion': '1.17',
'jetpackID': 'foobar',
'manifest':
{'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert err.failed()
def test_mismatched_hash():
"""
Test that failure occurs when the actual file hash doesn't match the hash
provided by harness-options.js.
"""
harnessoptions = {
'sdkVersion': '1.17',
'jetpackID': 'foobar',
'manifest':
{'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'moduleName': 'drawing',
'jsSHA256': '',
'docsSHA256': ''}}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert err.failed()
def test_mismatched_db_hash():
"""
Test that failure occurs when the hash of a file doesn't exist in the
Jetpack known file database.
"""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
# Break the hash with this.
bootstrap = 'function() {}; %s' % bootstrap
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'sdkVersion': '1.17',
'jetpackID': 'foobar',
'manifest':
{'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'moduleName': 'drawing',
'sectionName': 'lib',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'jetpack_loaded_modules' in err.metadata
assert not err.metadata['jetpack_loaded_modules']
assert 'jetpack_identified_files' in err.metadata
assert 'jetpack_unknown_files' in err.metadata
unknown_files = err.metadata['jetpack_unknown_files']
nose.tools.eq_(len(unknown_files), 2)
nose.tools.ok_('bootstrap.js' in unknown_files)
nose.tools.ok_('resources/bootstrap.js' in unknown_files)
def test_mismatched_module_version():
"""
Tests that add-ons using modules from a version of the SDK
other than the version they claim.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.8-pretending-1.8.1.xpi')
err = _do_test(xpi)
assert err.failed()
assert any(w['id'][2] == 'mismatched_version' for w in err.warnings)
def test_new_module_location_spec():
"""
Tests that we don't fail for missing modules in add-ons generated with
newer versions of the SDK.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.14.xpi')
err = _do_test(xpi)
assert not any(w['id'][2] == 'missing_jetpack_module'
for w in err.warnings)
def test_components_flagged():
"""Test that `Components` is flagged in Jetpack."""
js = """
var x = Components.services.foo.bar;
"""
assert not _js_test(js).failed()
assert _js_test(js, jetpack=True).failed()
def test_safe_require():
"""Test that requiring an innocuous module does not add the
requires_chrome flag."""
def base_case():
err = _js_test("""var foo = require("bar");""",
jetpack=True)
eq_(err.metadata['requires_chrome'], False)
yield base_case
def test_unsafe_safe_require():
"""Test that requiring low-level modules does add the requires_chrome
flag."""
interfaces = ['chrome', 'window-utils', 'observer-service']
def interface_cases(interface):
err = _js_test("""var {cc, ci} = require("%s")""" % interface,
jetpack=True)
print err.print_summary(verbose=True)
first_message = err.warnings[0]['message']
assert 'non-SDK interface' in first_message, ('unexpected: %s' %
first_message)
assert 'requires_chrome' in err.metadata, \
'unexpected: "requires_chrome" should be in metadata'
eq_(err.metadata['requires_chrome'], True)
for case in interfaces:
yield interface_cases, case
def test_absolute_uris_in_js():
"""
Test that a warning is thrown for absolute URIs within JS files.
"""
bad_js = 'alert("resource://foo-data/bar/zap.png");'
assert not _js_test(bad_js).failed()
err =_js_test(bad_js, jetpack=True)
assert err.failed()
assert err.compat_summary['errors']
# Test that literals are inspected even if they're the result of an
# operation.
bad_js = 'alert("resou" + "rce://foo-" + "data/bar/zap.png");'
assert not _js_test(bad_js).failed()
err =_js_test(bad_js, jetpack=True)
assert err.failed()
assert err.compat_summary['errors']
def test_observer_service_flagged():
assert _js_test("""
var {Ci} = require("chrome");
thing.QueryInterface(Ci.nsIObserverService);
""", jetpack=True).failed()
assert not _js_test("""
thing.QueryInterface(Ci.nsIObserverService);
""").failed()
def test_absolute_uris_in_markup():
"""
Test that a warning is thrown for absolute URIs within markup files.
"""
err = ErrorBundle()
bad_html = '<foo><bar src="resource://foo-data/bar/zap.png" /></foo>'
parser = MarkupParser(err)
parser.process('foo.html', bad_html, 'html')
assert not err.failed()
err.metadata['is_jetpack'] = True
parser = MarkupParser(err)
parser.process('foo.html', bad_html, 'html')
assert err.failed()
assert err.compat_summary['errors']
def test_bad_sdkversion():
"""Test that a redacted SDK version is not used."""
harnessoptions = {'sdkVersion': '1.4',
'jetpackID': '',
'manifest': {}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
with open('jetpack/addon-sdk/lib/sdk/test/harness.js') as harness_file:
harness = harness_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'components/harness.js': harness,
'harness-options.json':
json.dumps(harnessoptions)}))
assert err.failed() and err.errors
def test_outdated_sdkversion():
"""
Tests that add-ons using a version other than the latest release
are warned against, but module hashes are still recognized.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.8-outdated.xpi')
err = _do_test(xpi, allow_old_sdk=False)
assert err.failed()
# Make sure we don't have any version mismatch warnings
eq_(len(err.warnings), 1)
eq_(err.warnings[0]['id'][2], 'outdated_version')
def test_future_sdkversion():
"""
Test that if the developer is using a verison of the SDK that's newer than
the latest recognized version, we don't throw an error.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.8-future.xpi')
err = _do_test(xpi, allow_old_sdk=False)
print err.print_summary(verbose=True)
assert not err.failed()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase):
def testInitialization(self):
with self.test_session():
var0 = variables.Variable(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
var1 = variables.Variable(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var0.eval()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var1.eval()
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var0.eval())
self.assertAllClose(1.1, var1.eval())
def testInitializationOrder(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
variables.global_variables_initializer().run()
self.assertAllClose(rnd.eval(), dep.eval())
self.assertAllClose(rnd.eval() + dep.eval() + 2.0, depdep.eval())
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
def testAssignments(self):
with self.test_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
self.assertAllClose(1.0, plus_one.eval())
self.assertAllClose(1.0, var.eval())
self.assertAllClose(-1.0, minus_one.eval())
self.assertAllClose(-1.0, var.eval())
self.assertAllClose(4.0, four.eval())
self.assertAllClose(4.0, var.eval())
def _countUpToTest(self, dtype):
with self.test_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
variables.global_variables_initializer().run()
self.assertEqual(0, var.eval())
self.assertEqual(0, count_up_to.eval())
self.assertEqual(1, var.eval())
self.assertEqual(1, count_up_to.eval())
self.assertEqual(2, var.eval())
self.assertEqual(2, count_up_to.eval())
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
def testControlDepsNone(self):
with self.test_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.Variable(2.0)
# initialized_value do not either.
inited_x = var_x.initialized_value()
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
self.assertEqual([var_x.initializer], inited_x.op.control_inputs)
def testControlFlow(self):
with self.test_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testUseVariableAsTensor(self):
with self.test_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
variables.global_variables_initializer().run()
self.assertAllClose(2.0, var_x.eval())
self.assertAllClose(3.0, var_y.eval())
self.assertAllClose(5.0, math_ops.add(var_x, var_y).eval())
def testZeroSizeVarSameAsConst(self):
with self.test_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
variables.global_variables_initializer().run()
variable_output = variable_mul.eval()
self.assertAllClose(const_mul.eval(), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
def testCachingDevice(self):
with self.test_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.value().device)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
self.assertTrue(var_cached.initialized_value().device.startswith(
"/job:foo"))
def testCollections(self):
with self.test_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(2.0, trainable=False)
var_z = variables.Variable(2.0, trainable=True)
var_t = variables.Variable(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
def testOperators(self):
with self.test_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
variables.global_variables_initializer().run()
self.assertAllClose([2.0], add.eval())
self.assertAllClose([3.0], radd.eval())
self.assertAllClose([1.0], sub.eval())
self.assertAllClose([-1.0], rsub.eval())
self.assertAllClose([20.0], mul.eval())
self.assertAllClose([20.0], rmul.eval())
self.assertAllClose([0.2], div.eval())
self.assertAllClose([5.0], rdiv.eval())
self.assertAllClose([-2.0], neg.eval())
self.assertAllClose([2.0], abs_v.eval())
self.assertAllClose([True], lt.eval())
self.assertAllClose([False], rlt.eval())
self.assertAllClose([True], le.eval())
self.assertAllClose([True], rle.eval())
self.assertAllClose([False], gt.eval())
self.assertAllClose([True], rgt.eval())
self.assertAllClose([True], ge.eval())
self.assertAllClose([True], rge.eval())
self.assertAllClose([6], mod.eval())
self.assertAllClose([3], rmod.eval())
self.assertAllClose([True, False], and_v.eval())
self.assertAllClose([True, True], or_v.eval())
self.assertAllClose([True, False], xor_v.eval())
self.assertAllClose([False, True], invert_v.eval())
self.assertAllClose(rnd[2, 0:0], slice_v.eval())
def testSession(self):
with self.test_session() as sess:
var = variables.Variable([1, 12])
variables.global_variables_initializer().run()
self.assertAllClose([1, 12], sess.run(var))
def testDevicePlacement(self):
with self.test_session() as sess:
with ops.device("/cpu:0"):
var = variables.Variable([1, 12])
init_value = var.initialized_value()
init_op = variables.global_variables_initializer()
self.assertEqual(var.op.device, init_value.device)
self.assertEqual(var.op.device, init_op.device)
sess.run(init_op)
def testColocation(self):
with ops.device("/job:ps"):
var = variables.Variable(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.test_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertAllClose(value, v1.initial_value.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v1.eval()
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertAllClose(np.negative(value), v2.initial_value.eval())
# Once v2.initial_value.eval() has been called, v1 has effectively been
# initialized.
self.assertAllClose(value, v1.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v2.eval()
variables.global_variables_initializer().run()
self.assertAllClose(np.negative(value), v2.eval())
def testInitializerFunctionDevicePlacement(self):
with self.test_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
def testLoad(self):
with self.test_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
variables.global_variables_initializer().run()
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), var.eval())
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.test_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, sess.run(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
variables.global_variables_initializer().run()
self.assertEqual(0, sess.run(uninited).size)
def testVariableList(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
sess.run(w.initializer)
self.assertAllEqual(np.array([b"v"]), sess.run(uninited))
v.initializer.run()
self.assertEqual(0, sess.run(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, sess.run(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.test_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
variables.global_variables_initializer().run()
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
sess.run([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], b.eval())
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2])
w = variables.Variable([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run(inited)
variables.global_variables_initializer().run()
sess.run(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2])
w = variables.Variable([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
sess.run(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], concatenated.get_shape())
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
if __name__ == "__main__":
test.main()
|
|
import warnings
from contextlib import contextmanager
import pytest
import capnp
import os
import platform
import test_regression
import tempfile
import pickle
import mmap
import sys
this_dir = os.path.dirname(__file__)
@pytest.fixture
def all_types():
return capnp.load(os.path.join(this_dir, "all_types.capnp"))
def test_roundtrip_file(all_types):
f = tempfile.TemporaryFile()
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msg.write(f)
f.seek(0)
msg = all_types.TestAllTypes.read(f)
test_regression.check_all_types(msg)
def test_roundtrip_file_packed(all_types):
f = tempfile.TemporaryFile()
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msg.write_packed(f)
f.seek(0)
msg = all_types.TestAllTypes.read_packed(f)
test_regression.check_all_types(msg)
def test_roundtrip_bytes(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
message_bytes = msg.to_bytes()
msg = all_types.TestAllTypes.from_bytes(message_bytes)
test_regression.check_all_types(msg)
@pytest.mark.skipif(
platform.python_implementation() == "PyPy",
reason="TODO: Investigate why this works on CPython but fails on PyPy.",
)
def test_roundtrip_segments(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
segments = msg.to_segments()
msg = all_types.TestAllTypes.from_segments(segments)
test_regression.check_all_types(msg)
@pytest.mark.skipif(
sys.version_info[0] < 3,
reason="mmap doesn't implement the buffer interface under python 2.",
)
def test_roundtrip_bytes_mmap(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
with tempfile.TemporaryFile() as f:
msg.write(f)
length = f.tell()
f.seek(0)
memory = mmap.mmap(f.fileno(), length)
msg = all_types.TestAllTypes.from_bytes(memory)
test_regression.check_all_types(msg)
@pytest.mark.skipif(
sys.version_info[0] < 3, reason="memoryview is a builtin on Python 3"
)
def test_roundtrip_bytes_buffer(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
b = msg.to_bytes()
v = memoryview(b)
msg = all_types.TestAllTypes.from_bytes(v)
test_regression.check_all_types(msg)
def test_roundtrip_bytes_fail(all_types):
with pytest.raises(TypeError):
all_types.TestAllTypes.from_bytes(42)
@pytest.mark.skipif(
platform.python_implementation() == "PyPy",
reason="This works in PyPy 4.0.1 but travisci's version of PyPy has some bug that fails this test.",
)
def test_roundtrip_bytes_packed(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
message_bytes = msg.to_bytes_packed()
msg = all_types.TestAllTypes.from_bytes_packed(message_bytes)
test_regression.check_all_types(msg)
@contextmanager
def _warnings(
expected_count=2, expected_text="This message has already been written once."
):
with warnings.catch_warnings(record=True) as w:
yield
assert len(w) == expected_count
assert all(issubclass(x.category, UserWarning) for x in w), w
assert all(expected_text in str(x.message) for x in w), w
def test_roundtrip_file_multiple(all_types):
f = tempfile.TemporaryFile()
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msg.write(f)
with _warnings(2):
msg.write(f)
msg.write(f)
f.seek(0)
i = 0
for msg in all_types.TestAllTypes.read_multiple(f):
test_regression.check_all_types(msg)
i += 1
assert i == 3
def test_roundtrip_bytes_multiple(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msgs = msg.to_bytes()
with _warnings(2):
msgs += msg.to_bytes()
msgs += msg.to_bytes()
i = 0
for msg in all_types.TestAllTypes.read_multiple_bytes(msgs):
test_regression.check_all_types(msg)
i += 1
assert i == 3
def test_roundtrip_file_multiple_packed(all_types):
f = tempfile.TemporaryFile()
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msg.write_packed(f)
with _warnings(2):
msg.write_packed(f)
msg.write_packed(f)
f.seek(0)
i = 0
for msg in all_types.TestAllTypes.read_multiple_packed(f):
test_regression.check_all_types(msg)
i += 1
assert i == 3
def test_roundtrip_bytes_multiple_packed(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msgs = msg.to_bytes_packed()
with _warnings(2):
msgs += msg.to_bytes_packed()
msgs += msg.to_bytes_packed()
i = 0
for msg in all_types.TestAllTypes.read_multiple_bytes_packed(msgs):
test_regression.check_all_types(msg)
i += 1
assert i == 3
def test_file_and_bytes(all_types):
f = tempfile.TemporaryFile()
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msg.write(f)
f.seek(0)
with _warnings(1):
assert f.read() == msg.to_bytes()
def test_file_and_bytes_packed(all_types):
f = tempfile.TemporaryFile()
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
msg.write_packed(f)
f.seek(0)
with _warnings(1):
assert f.read() == msg.to_bytes_packed()
def test_pickle(all_types):
msg = all_types.TestAllTypes.new_message()
test_regression.init_all_types(msg)
data = pickle.dumps(msg)
msg2 = pickle.loads(data)
test_regression.check_all_types(msg2)
def test_from_bytes_traversal_limit(all_types):
size = 1024
bld = all_types.TestAllTypes.new_message()
bld.init("structList", size)
data = bld.to_bytes()
msg = all_types.TestAllTypes.from_bytes(data)
with pytest.raises(capnp.KjException):
for i in range(0, size):
msg.structList[i].uInt8Field == 0
msg = all_types.TestAllTypes.from_bytes(data, traversal_limit_in_words=2 ** 62)
for i in range(0, size):
assert msg.structList[i].uInt8Field == 0
def test_from_bytes_packed_traversal_limit(all_types):
size = 1024
bld = all_types.TestAllTypes.new_message()
bld.init("structList", size)
data = bld.to_bytes_packed()
msg = all_types.TestAllTypes.from_bytes_packed(data)
with pytest.raises(capnp.KjException):
for i in range(0, size):
msg.structList[i].uInt8Field == 0
msg = all_types.TestAllTypes.from_bytes_packed(
data, traversal_limit_in_words=2 ** 62
)
for i in range(0, size):
assert msg.structList[i].uInt8Field == 0
|
|
"""Conversion tool from CTF to FIF
"""
# Author: Eric Larson <larson.eric.d<gmail.com>
#
# License: BSD (3-clause)
import os
from os import path as op
import numpy as np
from ...utils import verbose, logger
from ...externals.six import string_types
from ..base import _BaseRaw
from ..utils import _mult_cal_one, _blk_read_lims
from .res4 import _read_res4, _make_ctf_name
from .hc import _read_hc
from .eeg import _read_eeg
from .trans import _make_ctf_coord_trans_set
from .info import _compose_meas_info
from .constants import CTF
def read_raw_ctf(directory, system_clock='truncate', preload=False,
verbose=None):
"""Raw object from CTF directory
Parameters
----------
directory : str
Path to the KIT data (ending in ``'.ds'``).
system_clock : str
How to treat the system clock. Use "truncate" (default) to truncate
the data file when the system clock drops to zero, and use "ignore"
to ignore the system clock (e.g., if head positions are measured
multiple times during a recording).
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of RawCTF
The raw data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
.. versionadded:: 0.11
"""
return RawCTF(directory, system_clock, preload=preload, verbose=verbose)
class RawCTF(_BaseRaw):
"""Raw object from CTF directory
Parameters
----------
directory : str
Path to the KIT data (ending in ``'.ds'``).
system_clock : str
How to treat the system clock. Use "truncate" (default) to truncate
the data file when the system clock drops to zero, and use "ignore"
to ignore the system clock (e.g., if head positions are measured
multiple times during a recording).
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, directory, system_clock='truncate', preload=False,
verbose=None):
# adapted from mne_ctf2fiff.c
if not isinstance(directory, string_types) or \
not directory.endswith('.ds'):
raise TypeError('directory must be a directory ending with ".ds"')
if not op.isdir(directory):
raise ValueError('directory does not exist: "%s"' % directory)
known_types = ['ignore', 'truncate']
if not isinstance(system_clock, string_types) or \
system_clock not in known_types:
raise ValueError('system_clock must be one of %s, not %s'
% (known_types, system_clock))
logger.info('ds directory : %s' % directory)
res4 = _read_res4(directory) # Read the magical res4 file
coils = _read_hc(directory) # Read the coil locations
eeg = _read_eeg(directory) # Read the EEG electrode loc info
# Investigate the coil location data to get the coordinate trans
coord_trans = _make_ctf_coord_trans_set(res4, coils)
# Compose a structure which makes fiff writing a piece of cake
info = _compose_meas_info(res4, coils, coord_trans, eeg)
# Determine how our data is distributed across files
fnames = list()
last_samps = list()
raw_extras = list()
while(True):
suffix = 'meg4' if len(fnames) == 0 else ('%d_meg4' % len(fnames))
meg4_name = _make_ctf_name(directory, suffix, raise_error=False)
if meg4_name is None:
break
# check how much data is in the file
sample_info = _get_sample_info(meg4_name, res4, system_clock)
if sample_info['n_samp'] == 0:
break
if len(fnames) == 0:
info['buffer_size_sec'] = \
sample_info['block_size'] / info['sfreq']
info['filename'] = directory
fnames.append(meg4_name)
last_samps.append(sample_info['n_samp'] - 1)
raw_extras.append(sample_info)
super(RawCTF, self).__init__(
info, preload, last_samps=last_samps, filenames=fnames,
raw_extras=raw_extras, orig_format='int', verbose=verbose)
@verbose
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data"""
si = self._raw_extras[fi]
offset = 0
trial_start_idx, r_lims, d_lims = _blk_read_lims(start, stop,
int(si['block_size']))
with open(self._filenames[fi], 'rb') as fid:
for bi in range(len(r_lims)):
samp_offset = (bi + trial_start_idx) * si['res4_nsamp']
n_read = min(si['n_samp'] - samp_offset, si['block_size'])
# read the chunk of data
pos = CTF.HEADER_SIZE
pos += samp_offset * si['n_chan'] * 4
fid.seek(pos, 0)
this_data = np.fromstring(
fid.read(si['n_chan'] * n_read * 4), '>i4')
this_data.shape = (si['n_chan'], n_read)
this_data = this_data[:, r_lims[bi, 0]:r_lims[bi, 1]]
data_view = data[:, d_lims[bi, 0]:d_lims[bi, 1]]
_mult_cal_one(data_view, this_data, idx, cals, mult)
offset += n_read
def _get_sample_info(fname, res4, system_clock):
"""Helper to determine the number of valid samples"""
logger.info('Finding samples for %s: ' % (fname,))
if CTF.SYSTEM_CLOCK_CH in res4['ch_names']:
clock_ch = res4['ch_names'].index(CTF.SYSTEM_CLOCK_CH)
else:
clock_ch = None
for k, ch in enumerate(res4['chs']):
if ch['ch_name'] == CTF.SYSTEM_CLOCK_CH:
clock_ch = k
break
with open(fname, 'rb') as fid:
fid.seek(0, os.SEEK_END)
st_size = fid.tell()
fid.seek(0, 0)
if (st_size - CTF.HEADER_SIZE) % (4 * res4['nsamp'] *
res4['nchan']) != 0:
raise RuntimeError('The number of samples is not an even multiple '
'of the trial size')
n_samp_tot = (st_size - CTF.HEADER_SIZE) // (4 * res4['nchan'])
n_trial = n_samp_tot // res4['nsamp']
n_samp = n_samp_tot
if clock_ch is None:
logger.info(' System clock channel is not available, assuming '
'all samples to be valid.')
elif system_clock == 'ignore':
logger.info(' System clock channel is available, but ignored.')
else: # use it
logger.info(' System clock channel is available, checking '
'which samples are valid.')
for t in range(n_trial):
# Skip to the correct trial
samp_offset = t * res4['nsamp']
offset = CTF.HEADER_SIZE + (samp_offset * res4['nchan'] +
(clock_ch * res4['nsamp'])) * 4
fid.seek(offset, 0)
this_data = np.fromstring(fid.read(4 * res4['nsamp']), '>i4')
if len(this_data) != res4['nsamp']:
raise RuntimeError('Cannot read data for trial %d'
% (t + 1))
end = np.where(this_data == 0)[0]
if len(end) > 0:
n_samp = samp_offset + end[0]
break
if n_samp < res4['nsamp']:
n_trial = 1
logger.info(' %d x %d = %d samples from %d chs'
% (n_trial, n_samp, n_samp, res4['nchan']))
else:
n_trial = n_samp // res4['nsamp']
n_omit = n_samp_tot - n_samp
n_samp = n_trial * res4['nsamp']
logger.info(' %d x %d = %d samples from %d chs'
% (n_trial, res4['nsamp'], n_samp, res4['nchan']))
if n_omit != 0:
logger.info(' %d samples omitted at the end' % n_omit)
return dict(n_samp=n_samp, n_samp_tot=n_samp_tot, block_size=res4['nsamp'],
n_trial=n_trial, res4_nsamp=res4['nsamp'],
n_chan=res4['nchan'])
|
|
import sklearn
import sklearn.ensemble
import gc
from sklearn.preprocessing import StandardScaler
import numpy as np
class KerasWrap(object):
""" A wrapper that allows us to set parameters in the constructor and do a reset before fitting.
"""
def __init__(self, model, epochs, flatten_output=False):
self.model = model
self.epochs = epochs
self.flatten_output = flatten_output
self.init_weights = None
self.scaler = StandardScaler()
def fit(self, X, y, verbose=0):
if self.init_weights is None:
self.init_weights = self.model.get_weights()
else:
self.model.set_weights(self.init_weights)
self.scaler.fit(X)
return self.model.fit(X, y, epochs=self.epochs, verbose=verbose)
def predict(self, X):
X = self.scaler.transform(X)
if self.flatten_output:
return self.model.predict(X).flatten()
else:
return self.model.predict(X)
# This models are all tuned for the corrgroups60 dataset
def corrgroups60__lasso():
""" Lasso Regression
"""
return sklearn.linear_model.Lasso(alpha=0.1)
def corrgroups60__ridge():
""" Ridge Regression
"""
return sklearn.linear_model.Ridge(alpha=1.0)
def corrgroups60__decision_tree():
""" Decision Tree
"""
# max_depth was chosen to minimise test error
return sklearn.tree.DecisionTreeRegressor(random_state=0, max_depth=6)
def corrgroups60__random_forest():
""" Random Forest
"""
return sklearn.ensemble.RandomForestRegressor(100, random_state=0)
def corrgroups60__gbm():
""" Gradient Boosted Trees
"""
import xgboost
# max_depth and learning_rate were fixed then n_estimators was chosen using a train/test split
return xgboost.XGBRegressor(max_depth=6, n_estimators=50, learning_rate=0.1, n_jobs=8, random_state=0)
def corrgroups60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
def independentlinear60__lasso():
""" Lasso Regression
"""
return sklearn.linear_model.Lasso(alpha=0.1)
def independentlinear60__ridge():
""" Ridge Regression
"""
return sklearn.linear_model.Ridge(alpha=1.0)
def independentlinear60__decision_tree():
""" Decision Tree
"""
# max_depth was chosen to minimise test error
return sklearn.tree.DecisionTreeRegressor(random_state=0, max_depth=4)
def independentlinear60__random_forest():
""" Random Forest
"""
return sklearn.ensemble.RandomForestRegressor(100, random_state=0)
def independentlinear60__gbm():
""" Gradient Boosted Trees
"""
import xgboost
# max_depth and learning_rate were fixed then n_estimators was chosen using a train/test split
return xgboost.XGBRegressor(max_depth=6, n_estimators=100, learning_rate=0.1, n_jobs=8, random_state=0)
def independentlinear60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
def cric__lasso():
""" Lasso Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l1", C=0.002)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__ridge():
""" Ridge Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l2")
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__decision_tree():
""" Decision Tree
"""
model = sklearn.tree.DecisionTreeClassifier(random_state=0, max_depth=4)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__random_forest():
""" Random Forest
"""
model = sklearn.ensemble.RandomForestClassifier(100, random_state=0)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__gbm():
""" Gradient Boosted Trees
"""
import xgboost
# max_depth and subsample match the params used for the full cric data in the paper
# learning_rate was set a bit higher to allow for faster runtimes
# n_estimators was chosen based on a train/test split of the data
model = xgboost.XGBClassifier(max_depth=5, n_estimators=400, learning_rate=0.01, subsample=0.2, n_jobs=8, random_state=0)
# we want to explain the margin, not the transformed probability outputs
model.__orig_predict = model.predict
model.predict = lambda X: model.__orig_predict(X, output_margin=True) # pylint: disable=E1123
return model
def cric__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout
model = Sequential()
model.add(Dense(10, activation='relu', input_dim=336))
model.add(Dropout(0.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return KerasWrap(model, 30, flatten_output=True)
def human__decision_tree():
""" Decision Tree
"""
# build data
N = 1000000
M = 3
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[0, 0] = 1
y[0] = 8
X[1, 1] = 1
y[1] = 8
X[2, 0:2] = 1
y[2] = 4
# fit model
xor_model = sklearn.tree.DecisionTreeRegressor(max_depth=2)
xor_model.fit(X, y)
return xor_model
|
|
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""VTGateCursor, and StreamVTGateCursor."""
import itertools
import operator
import re
from vtdb import base_cursor
from vtdb import dbexceptions
write_sql_pattern = re.compile(r'\s*(insert|update|delete)', re.IGNORECASE)
def ascii_lower(string):
"""Lower-case, but only in the ASCII range."""
return string.encode('utf8').lower().decode('utf8')
class VTGateCursorMixin(object):
def connection_list(self):
return [self._conn]
def is_writable(self):
return self._writable
class VTGateCursor(base_cursor.BaseListCursor, VTGateCursorMixin):
"""A cursor for execute statements to VTGate.
Results are stored as a list.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None, keyranges=None,
writable=False, as_transaction=False, single_db=False,
twopc=False):
"""Init VTGateCursor.
Args:
connection: A PEP0249 connection object.
tablet_type: Str tablet_type.
keyspace: Str keyspace or None if batch API will be used.
shards: List of strings.
keyspace_ids: Struct('!Q').packed keyspace IDs.
keyranges: Str keyranges.
writable: True if writable.
as_transaction: True if an executemany call is its own transaction.
single_db: True if single db transaction is needed.
twopc: True if 2-phase commit is needed.
"""
super(VTGateCursor, self).__init__(single_db=single_db, twopc=twopc)
self._conn = connection
self._writable = writable
self.description = None
self.index = None
self.keyspace = keyspace
self.shards = shards
self.keyspace_ids = keyspace_ids
self.keyranges = keyranges
self.lastrowid = None
self.results = None
self.routing = None
self.rowcount = 0
self.tablet_type = tablet_type
self.as_transaction = as_transaction
self._clear_batch_state()
# pass kwargs here in case higher level APIs need to push more data through
# for instance, a key value for shard mapping
def execute(self, sql, bind_variables, **kwargs):
"""Perform a query, return the number of rows affected."""
self._clear_list_state()
self._clear_batch_state()
if self._handle_transaction_sql(sql):
return
entity_keyspace_id_map = kwargs.pop('entity_keyspace_id_map', None)
entity_column_name = kwargs.pop('entity_column_name', None)
write_query = bool(write_sql_pattern.match(sql))
# NOTE: This check may also be done at higher layers but adding it
# here for completion.
if write_query:
if not self.is_writable():
raise dbexceptions.ProgrammingError('DML on a non-writable cursor', sql)
if entity_keyspace_id_map:
raise dbexceptions.ProgrammingError(
'entity_keyspace_id_map is not allowed for write queries')
# FIXME(alainjobart): the entity_keyspace_id_map should be in the
# cursor, same as keyspace_ids, shards, keyranges, to avoid this hack.
if entity_keyspace_id_map:
shards = None
keyspace_ids = None
keyranges = None
else:
shards = self.shards
keyspace_ids = self.keyspace_ids
keyranges = self.keyranges
self.results, self.rowcount, self.lastrowid, self.description = (
self.connection._execute( # pylint: disable=protected-access
sql,
bind_variables,
tablet_type=self.tablet_type,
keyspace_name=self.keyspace,
shards=shards,
keyspace_ids=keyspace_ids,
keyranges=keyranges,
entity_keyspace_id_map=entity_keyspace_id_map,
entity_column_name=entity_column_name,
not_in_transaction=not self.is_writable(),
effective_caller_id=self.effective_caller_id,
**kwargs))
return self.rowcount
def fetch_aggregate_function(self, func):
return func(row[0] for row in self.fetchall())
def fetch_aggregate(self, order_by_columns, limit):
"""Fetch from many shards, sort, then remove sort columns.
A scatter query may return up to limit rows. Sort all results
manually order them, and return the first rows.
This is a special-use function.
Args:
order_by_columns: The ORDER BY clause. Each element is either a
column, [column, 'ASC'], or [column, 'DESC'].
limit: Int limit.
Returns:
Smallest rows, with up to limit items. First len(order_by_columns)
columns are stripped.
"""
sort_columns = []
desc_columns = []
for order_clause in order_by_columns:
if isinstance(order_clause, (tuple, list)):
sort_columns.append(order_clause[0])
if ascii_lower(order_clause[1]) == 'desc':
desc_columns.append(order_clause[0])
else:
sort_columns.append(order_clause)
# sort the rows and then trim off the prepended sort columns
if sort_columns:
sorted_rows = list(sort_row_list_by_columns(
self.fetchall(), sort_columns, desc_columns))[:limit]
else:
sorted_rows = itertools.islice(self.fetchall(), limit)
neutered_rows = [row[len(order_by_columns):] for row in sorted_rows]
return neutered_rows
def _clear_batch_state(self):
"""Clear state that allows traversal to next query's results."""
self.result_sets = []
self.result_set_index = None
def close(self):
super(VTGateCursor, self).close()
self._clear_batch_state()
def executemany(self, sql, params_list, **kwargs):
"""Execute multiple statements in one batch.
This adds len(params_list) result_sets to self.result_sets. Each
result_set is a (results, rowcount, lastrowid, fields) tuple.
Each call overwrites the old result_sets. After execution, nextset()
is called to move the fetch state to the start of the first
result set.
Args:
sql: The sql text, with %(format)s-style tokens. May be None.
params_list: A list of the keyword params that are normally sent
to execute. Either the sql arg or params['sql'] must be defined.
**kwargs: passed as is to connection._execute_batch.
"""
if sql:
sql_list = [sql] * len(params_list)
else:
sql_list = [params.get('sql') for params in params_list]
bind_variables_list = [params['bind_variables'] for params in params_list]
keyspace_list = [params['keyspace'] for params in params_list]
keyspace_ids_list = [params.get('keyspace_ids') for params in params_list]
shards_list = [params.get('shards') for params in params_list]
self._clear_batch_state()
# Find other _execute_batch calls in test code.
self.result_sets = self.connection._execute_batch( # pylint: disable=protected-access
sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list,
self.tablet_type, self.as_transaction, self.effective_caller_id,
**kwargs)
self.nextset()
def nextset(self):
"""Move the fetch state to the start of the next result set.
self.(results, rowcount, lastrowid, description) will be set to
the next result_set, and the fetch-commands will work on this
result set.
Returns:
True if another result set exists, False if not.
"""
if self.result_set_index is None:
self.result_set_index = 0
else:
self.result_set_index += 1
self._clear_list_state()
if self.result_set_index < len(self.result_sets):
self.results, self.rowcount, self.lastrowid, self.description = (
self.result_sets[self.result_set_index])
return True
else:
self._clear_batch_state()
return None
class StreamVTGateCursor(base_cursor.BaseStreamCursor, VTGateCursorMixin):
"""A cursor for streaming statements to VTGate.
Results are returned as a generator.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None,
keyranges=None, writable=False):
super(StreamVTGateCursor, self).__init__()
self._conn = connection
self._writable = writable
self.keyspace = keyspace
self.shards = shards
self.keyspace_ids = keyspace_ids
self.keyranges = keyranges
self.routing = None
self.tablet_type = tablet_type
def is_writable(self):
return self._writable
# pass kwargs here in case higher level APIs need to push more data through
# for instance, a key value for shard mapping
def execute(self, sql, bind_variables, **kwargs):
"""Start a streaming query."""
if self._writable:
raise dbexceptions.ProgrammingError('Streaming query cannot be writable')
self._clear_stream_state()
self.generator, self.description = self.connection._stream_execute( # pylint: disable=protected-access
sql,
bind_variables,
tablet_type=self.tablet_type,
keyspace_name=self.keyspace,
shards=self.shards,
keyspace_ids=self.keyspace_ids,
keyranges=self.keyranges,
not_in_transaction=not self.is_writable(),
effective_caller_id=self.effective_caller_id,
**kwargs)
return 0
def sort_row_list_by_columns(row_list, sort_columns=(), desc_columns=()):
"""Sort by leading sort columns by stable-sorting in reverse-index order."""
for column_index, column_name in reversed(
[x for x in enumerate(sort_columns)]):
og = operator.itemgetter(column_index)
if not isinstance(row_list, list):
row_list = sorted(
row_list, key=og, reverse=bool(column_name in desc_columns))
else:
row_list.sort(key=og, reverse=bool(column_name in desc_columns))
return row_list
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import threading
import time
from collections.abc import Generator
from functools import partial
from typing import Any, Tuple, Dict, List
from pyflink.common import Row
from pyflink.fn_execution import pickle
from pyflink.serializers import PickleSerializer
from pyflink.table import functions
from pyflink.table.udf import DelegationTableFunction, DelegatingScalarFunction, \
ImperativeAggregateFunction, PandasAggregateFunctionWrapper
_func_num = 0
_constant_num = 0
def normalize_table_function_result(it):
def normalize_one_row(value):
if isinstance(value, tuple):
# We assume that tuple is a single line output
return [*value]
elif isinstance(value, Row):
# We assume that tuple is a single line output
return value._values
else:
# single field value
return [value]
if it is None:
return []
if isinstance(it, (list, range, Generator)):
def func():
for item in it:
yield normalize_one_row(item)
return func()
else:
return [normalize_one_row(it)]
def normalize_pandas_result(it):
import pandas as pd
arrays = []
for result in it:
if isinstance(result, (Row, Tuple)):
arrays.append(pd.concat([pd.Series([item]) for item in result], axis=1))
else:
arrays.append(pd.Series([result]))
return arrays
def wrap_input_series_as_dataframe(*args):
import pandas as pd
return pd.concat(args, axis=1)
def check_pandas_udf_result(f, *input_args):
output = f(*input_args)
import pandas as pd
assert type(output) == pd.Series or type(output) == pd.DataFrame, \
"The result type of Pandas UDF '%s' must be pandas.Series or pandas.DataFrame, got %s" \
% (f.__name__, type(output))
assert len(output) == len(input_args[0]), \
"The result length '%d' of Pandas UDF '%s' is not equal to the input length '%d'" \
% (len(output), f.__name__, len(input_args[0]))
return output
def extract_over_window_user_defined_function(user_defined_function_proto):
window_index = user_defined_function_proto.window_index
return (*extract_user_defined_function(user_defined_function_proto, True), window_index)
def extract_user_defined_function(user_defined_function_proto, pandas_udaf=False,
one_arg_optimization=False)\
-> Tuple[str, Dict, List]:
"""
Extracts user-defined-function from the proto representation of a
:class:`UserDefinedFunction`.
:param user_defined_function_proto: the proto representation of the Python
:param pandas_udaf: whether the user_defined_function_proto is pandas udaf
:param one_arg_optimization: whether the optimization enabled
:class:`UserDefinedFunction`
"""
def _next_func_num():
global _func_num
_func_num = _func_num + 1
return _func_num
def _extract_input(args) -> Tuple[str, Dict, List]:
local_variable_dict = {}
local_funcs = []
args_str = []
for arg in args:
if arg.HasField("udf"):
# for chaining Python UDF input: the input argument is a Python ScalarFunction
udf_arg, udf_variable_dict, udf_funcs = extract_user_defined_function(
arg.udf, one_arg_optimization=one_arg_optimization)
args_str.append(udf_arg)
local_variable_dict.update(udf_variable_dict)
local_funcs.extend(udf_funcs)
elif arg.HasField("inputOffset"):
if one_arg_optimization:
args_str.append("value")
else:
# the input argument is a column of the input row
args_str.append("value[%s]" % arg.inputOffset)
else:
# the input argument is a constant value
constant_value_name, parsed_constant_value = \
_parse_constant_value(arg.inputConstant)
args_str.append(constant_value_name)
local_variable_dict[constant_value_name] = parsed_constant_value
return ",".join(args_str), local_variable_dict, local_funcs
variable_dict = {}
user_defined_funcs = []
user_defined_func = pickle.loads(user_defined_function_proto.payload)
if pandas_udaf:
user_defined_func = PandasAggregateFunctionWrapper(user_defined_func)
func_name = 'f%s' % _next_func_num()
if isinstance(user_defined_func, DelegatingScalarFunction) \
or isinstance(user_defined_func, DelegationTableFunction):
if user_defined_function_proto.is_pandas_udf:
variable_dict[func_name] = partial(check_pandas_udf_result, user_defined_func.func)
else:
variable_dict[func_name] = user_defined_func.func
else:
variable_dict[func_name] = user_defined_func.eval
user_defined_funcs.append(user_defined_func)
func_args, input_variable_dict, input_funcs = _extract_input(user_defined_function_proto.inputs)
variable_dict.update(input_variable_dict)
user_defined_funcs.extend(input_funcs)
if user_defined_function_proto.takes_row_as_input:
if input_variable_dict:
# for constant or other udfs as input arguments.
func_str = "%s(%s)" % (func_name, func_args)
elif user_defined_function_proto.is_pandas_udf or pandas_udaf:
# for pandas udf/udaf, the input data structure is a List of Pandas.Series
# we need to merge these Pandas.Series into a Pandas.DataFrame
variable_dict['wrap_input_series_as_dataframe'] = wrap_input_series_as_dataframe
func_str = "%s(wrap_input_series_as_dataframe(%s))" % (func_name, func_args)
else:
# directly use `value` as input argument
# e.g.
# lambda value: Row(value[0], value[1])
# can be optimized to
# lambda value: value
func_str = "%s(value)" % func_name
else:
func_str = "%s(%s)" % (func_name, func_args)
return func_str, variable_dict, user_defined_funcs
def _parse_constant_value(constant_value) -> Tuple[str, Any]:
j_type = constant_value[0]
serializer = PickleSerializer()
pickled_data = serializer.loads(constant_value[1:])
# the type set contains
# TINYINT,SMALLINT,INTEGER,BIGINT,FLOAT,DOUBLE,DECIMAL,CHAR,VARCHAR,NULL,BOOLEAN
# the pickled_data doesn't need to transfer to anther python object
if j_type == 0:
parsed_constant_value = pickled_data
# the type is DATE
elif j_type == 1:
parsed_constant_value = \
datetime.date(year=1970, month=1, day=1) + datetime.timedelta(days=pickled_data)
# the type is TIME
elif j_type == 2:
seconds, milliseconds = divmod(pickled_data, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
parsed_constant_value = datetime.time(hours, minutes, seconds, milliseconds * 1000)
# the type is TIMESTAMP
elif j_type == 3:
parsed_constant_value = \
datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0) \
+ datetime.timedelta(milliseconds=pickled_data)
else:
raise Exception("Unknown type %s, should never happen" % str(j_type))
def _next_constant_num():
global _constant_num
_constant_num = _constant_num + 1
return _constant_num
constant_value_name = 'c%s' % _next_constant_num()
return constant_value_name, parsed_constant_value
def extract_user_defined_aggregate_function(
current_index,
user_defined_function_proto,
distinct_info_dict: Dict[Tuple[List[str]], Tuple[List[int], List[int]]]):
user_defined_agg = load_aggregate_function(user_defined_function_proto.payload)
assert isinstance(user_defined_agg, ImperativeAggregateFunction)
args_str = []
local_variable_dict = {}
for arg in user_defined_function_proto.inputs:
if arg.HasField("inputOffset"):
# the input argument is a column of the input row
args_str.append("value[%s]" % arg.inputOffset)
else:
# the input argument is a constant value
constant_value_name, parsed_constant_value = \
_parse_constant_value(arg.inputConstant)
for key, value in local_variable_dict.items():
if value == parsed_constant_value:
constant_value_name = key
break
if constant_value_name not in local_variable_dict:
local_variable_dict[constant_value_name] = parsed_constant_value
args_str.append(constant_value_name)
if user_defined_function_proto.distinct:
if tuple(args_str) in distinct_info_dict:
distinct_info_dict[tuple(args_str)][0].append(current_index)
distinct_info_dict[tuple(args_str)][1].append(user_defined_function_proto.filter_arg)
distinct_index = distinct_info_dict[tuple(args_str)][0][0]
else:
distinct_info_dict[tuple(args_str)] = \
([current_index], [user_defined_function_proto.filter_arg])
distinct_index = current_index
else:
distinct_index = -1
if user_defined_function_proto.takes_row_as_input and not local_variable_dict:
# directly use `value` as input argument
# e.g.
# lambda value: Row(value[0], value[1])
# can be optimized to
# lambda value: value
func_str = "lambda value : [value]"
else:
func_str = "lambda value : (%s,)" % ",".join(args_str)
return user_defined_agg, \
eval(func_str, local_variable_dict) \
if args_str else lambda v: tuple(), \
user_defined_function_proto.filter_arg, \
distinct_index
def is_built_in_function(payload):
# The payload may be a pickled bytes or the class name of the built-in functions.
# If it represents a built-in function, it will start with 0x00.
# If it is a pickled bytes, it will start with 0x80.
return payload[0] == 0
def load_aggregate_function(payload):
if is_built_in_function(payload):
built_in_function_class_name = payload[1:].decode("utf-8")
cls = getattr(functions, built_in_function_class_name)
return cls()
else:
return pickle.loads(payload)
def parse_function_proto(proto):
from pyflink.fn_execution import flink_fn_execution_pb2
serialized_fn = flink_fn_execution_pb2.UserDefinedFunctions()
serialized_fn.ParseFromString(proto)
return serialized_fn
def deserialized_operation_from_serialized_bytes(b):
import cloudpickle
return cloudpickle.loads(b)
def create_scalar_operation_from_proto(proto, one_arg_optimization=False,
one_result_optimization=False):
from pyflink.fn_execution.table.operations import ScalarFunctionOperation
serialized_fn = parse_function_proto(proto)
scalar_operation = ScalarFunctionOperation(
serialized_fn, one_arg_optimization, one_result_optimization)
return scalar_operation
def create_serialized_scalar_operation_from_proto(proto, one_arg_optimization=False,
one_result_optimization=False):
"""
The CPython extension included in proto does not support initialization multiple times, so we
choose the only interpreter process to be responsible for initialization and proto parsing. The
only interpreter parses the proto and serializes function operations with cloudpickle.
"""
import cloudpickle
scalar_operation = create_scalar_operation_from_proto(
bytes(b % 256 for b in proto), one_arg_optimization, one_result_optimization)
return cloudpickle.dumps(scalar_operation)
class PeriodicThread(threading.Thread):
"""Call a function periodically with the specified number of seconds"""
def __init__(self,
interval,
function,
args=None,
kwargs=None
) -> None:
threading.Thread.__init__(self)
self._interval = interval
self._function = function
self._args = args if args is not None else []
self._kwargs = kwargs if kwargs is not None else {}
self._finished = threading.Event()
def run(self) -> None:
now = time.time()
next_call = now + self._interval
while (next_call <= now and not self._finished.is_set()) or \
(not self._finished.wait(next_call - now)):
if next_call <= now:
next_call = now + self._interval
else:
next_call = next_call + self._interval
self._function(*self._args, **self._kwargs)
now = time.time()
def cancel(self) -> None:
"""Stop the thread if it hasn't finished yet."""
self._finished.set()
|
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Natural Neighbor Verification
=============================
Walks through the steps of Natural Neighbor interpolation to validate that the algorithmic
approach taken in MetPy is correct.
"""
###########################################
# Find natural neighbors visual test
#
# A triangle is a natural neighbor for a point if the
# `circumscribed circle <https://en.wikipedia.org/wiki/Circumscribed_circle>`_ of the
# triangle contains that point. It is important that we correctly grab the correct triangles
# for each point before proceeding with the interpolation.
#
# Algorithmically:
#
# 1. We place all of the grid points in a KDTree. These provide worst-case O(n) time
# complexity for spatial searches.
#
# 2. We generate a `Delaunay Triangulation <https://docs.scipy.org/doc/scipy/
# reference/tutorial/spatial.html#delaunay-triangulations>`_
# using the locations of the provided observations.
#
# 3. For each triangle, we calculate its circumcenter and circumradius. Using
# KDTree, we then assign each grid a triangle that has a circumcenter within a
# circumradius of the grid's location.
#
# 4. The resulting dictionary uses the grid index as a key and a set of natural
# neighbor triangles in the form of triangle codes from the Delaunay triangulation.
# This dictionary is then iterated through to calculate interpolation values.
#
# 5. We then traverse the ordered natural neighbor edge vertices for a particular
# grid cell in groups of 3 (n - 1, n, n + 1), and perform calculations to generate
# proportional polygon areas.
#
# Circumcenter of (n - 1), n, grid_location
# Circumcenter of (n + 1), n, grid_location
#
# Determine what existing circumcenters (ie, Delaunay circumcenters) are associated
# with vertex n, and add those as polygon vertices. Calculate the area of this polygon.
#
# 6. Increment the current edges to be checked, i.e.:
# n - 1 = n, n = n + 1, n + 1 = n + 2
#
# 7. Repeat steps 5 & 6 until all of the edge combinations of 3 have been visited.
#
# 8. Repeat steps 4 through 7 for each grid cell.
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import ConvexHull, Delaunay, delaunay_plot_2d, Voronoi, voronoi_plot_2d
from scipy.spatial.distance import euclidean
from metpy.gridding import polygons, triangles
from metpy.gridding.interpolation import nn_point
###########################################
# For a test case, we generate 10 random points and observations, where the
# observation values are just the x coordinate value times the y coordinate
# value divided by 1000.
#
# We then create two test points (grid 0 & grid 1) at which we want to
# estimate a value using natural neighbor interpolation.
#
# The locations of these observations are then used to generate a Delaunay triangulation.
np.random.seed(100)
pts = np.random.randint(0, 100, (10, 2))
xp = pts[:, 0]
yp = pts[:, 1]
zp = (pts[:, 0] * pts[:, 0]) / 1000
tri = Delaunay(pts)
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
delaunay_plot_2d(tri, ax=ax)
for i, zval in enumerate(zp):
ax.annotate('{} F'.format(zval), xy=(pts[i, 0] + 2, pts[i, 1]))
sim_gridx = [30., 60.]
sim_gridy = [30., 60.]
ax.plot(sim_gridx, sim_gridy, '+', markersize=10)
ax.set_aspect('equal', 'datalim')
ax.set_title('Triangulation of observations and test grid cell '
'natural neighbor interpolation values')
members, tri_info = triangles.find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy)))
val = nn_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri, members[0], tri_info)
ax.annotate('grid 0: {:.3f}'.format(val), xy=(sim_gridx[0] + 2, sim_gridy[0]))
val = nn_point(xp, yp, zp, (sim_gridx[1], sim_gridy[1]), tri, members[1], tri_info)
ax.annotate('grid 1: {:.3f}'.format(val), xy=(sim_gridx[1] + 2, sim_gridy[1]))
###########################################
# Using the circumcenter and circumcircle radius information from
# :func:`metpy.gridding.triangles.find_natural_neighbors`, we can visually
# examine the results to see if they are correct.
def draw_circle(ax, x, y, r, m, label):
th = np.linspace(0, 2 * np.pi, 100)
nx = x + r * np.cos(th)
ny = y + r * np.sin(th)
ax.plot(nx, ny, m, label=label)
members, tri_info = triangles.find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy)))
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
delaunay_plot_2d(tri, ax=ax)
ax.plot(sim_gridx, sim_gridy, 'ks', markersize=10)
for i, info in tri_info.items():
x_t = info['cc'][0]
y_t = info['cc'][1]
if i in members[1] and i in members[0]:
draw_circle(ax, x_t, y_t, info['r'], 'm-', str(i) + ': grid 1 & 2')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[0]:
draw_circle(ax, x_t, y_t, info['r'], 'r-', str(i) + ': grid 0')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[1]:
draw_circle(ax, x_t, y_t, info['r'], 'b-', str(i) + ': grid 1')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
else:
draw_circle(ax, x_t, y_t, info['r'], 'k:', str(i) + ': no match')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=9)
ax.set_aspect('equal', 'datalim')
ax.legend()
###########################################
# What?....the circle from triangle 8 looks pretty darn close. Why isn't
# grid 0 included in that circle?
x_t, y_t = tri_info[8]['cc']
r = tri_info[8]['r']
print('Distance between grid0 and Triangle 8 circumcenter:',
euclidean([x_t, y_t], [sim_gridx[0], sim_gridy[0]]))
print('Triangle 8 circumradius:', r)
###########################################
# Lets do a manual check of the above interpolation value for grid 0 (southernmost grid)
# Grab the circumcenters and radii for natural neighbors
cc = np.array([tri_info[m]['cc'] for m in members[0]])
r = np.array([tri_info[m]['r'] for m in members[0]])
print('circumcenters:\n', cc)
print('radii\n', r)
###########################################
# Draw the natural neighbor triangles and their circumcenters. Also plot a `Voronoi diagram
# <https://docs.scipy.org/doc/scipy/reference/tutorial/spatial.html#voronoi-diagrams>`_
# which serves as a complementary (but not necessary)
# spatial data structure that we use here simply to show areal ratios.
# Notice that the two natural neighbor triangle circumcenters are also vertices
# in the Voronoi plot (green dots), and the observations are in the polygons (blue dots).
vor = Voronoi(list(zip(xp, yp)))
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
voronoi_plot_2d(vor, ax=ax)
nn_ind = np.array([0, 5, 7, 8])
z_0 = zp[nn_ind]
x_0 = xp[nn_ind]
y_0 = yp[nn_ind]
for x, y, z in zip(x_0, y_0, z_0):
ax.annotate('{}, {}: {:.3f} F'.format(x, y, z), xy=(x, y))
ax.plot(sim_gridx[0], sim_gridy[0], 'k+', markersize=10)
ax.annotate('{}, {}'.format(sim_gridx[0], sim_gridy[0]), xy=(sim_gridx[0] + 2, sim_gridy[0]))
ax.plot(cc[:, 0], cc[:, 1], 'ks', markersize=15, fillstyle='none',
label='natural neighbor\ncircumcenters')
for center in cc:
ax.annotate('{:.3f}, {:.3f}'.format(center[0], center[1]),
xy=(center[0] + 1, center[1] + 1))
tris = tri.points[tri.simplices[members[0]]]
for triangle in tris:
x = [triangle[0, 0], triangle[1, 0], triangle[2, 0], triangle[0, 0]]
y = [triangle[0, 1], triangle[1, 1], triangle[2, 1], triangle[0, 1]]
ax.plot(x, y, ':', linewidth=2)
ax.legend()
ax.set_aspect('equal', 'datalim')
def draw_polygon_with_info(ax, polygon, off_x=0, off_y=0):
"""Draw one of the natural neighbor polygons with some information."""
pts = np.array(polygon)[ConvexHull(polygon).vertices]
for i, pt in enumerate(pts):
ax.plot([pt[0], pts[(i + 1) % len(pts)][0]],
[pt[1], pts[(i + 1) % len(pts)][1]], 'k-')
avex, avey = np.mean(pts, axis=0)
ax.annotate('area: {:.3f}'.format(polygons.area(pts)), xy=(avex + off_x, avey + off_y),
fontsize=12)
cc1 = triangles.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = triangles.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc1, cc2])
cc1 = triangles.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = triangles.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2], off_x=-9, off_y=3)
cc1 = triangles.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = triangles.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info(ax, [cc[1], cc1, cc2], off_x=-15)
cc1 = triangles.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = triangles.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2])
###########################################
# Put all of the generated polygon areas and their affiliated values in arrays.
# Calculate the total area of all of the generated polygons.
areas = np.array([60.434, 448.296, 25.916, 70.647])
values = np.array([0.064, 1.156, 2.809, 0.225])
total_area = np.sum(areas)
print(total_area)
###########################################
# For each polygon area, calculate its percent of total area.
proportions = areas / total_area
print(proportions)
###########################################
# Multiply the percent of total area by the respective values.
contributions = proportions * values
print(contributions)
###########################################
# The sum of this array is the interpolation value!
interpolation_value = np.sum(contributions)
function_output = nn_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri, members[0], tri_info)
print(interpolation_value, function_output)
###########################################
# The values are slightly different due to truncating the area values in
# the above visual example to the 3rd decimal place.
plt.show()
|
|
#######################################################
#
# ***** NOTE *****
#
# This file hasn't been updated since some changes were made in the way
# Midi() works. Basically, rather than using MidiInput.devices(),
# you need to explicitly pull in the MIDI hardware you're using, e.g.
#
# m = MidiPypmHardware()
#
# and then use methods on that:
#
# m.input_devices() # returns an array of names
# i = m.get_input(name)
#
# The code in this file needs to be updated to reflect that change.
#
#######################################################
from nosuch.midiutil import *
from Queue import Queue
from threading import Thread
CLOCKS_PER_BEAT = 96.0
class MidiDispatcher(Thread):
"""
Route incoming Midi events to one or more processors.
"""
def __init__(self, inputCallbacks, inputProcessedCallback=None):
Thread.__init__(self)
self.setDaemon(True)
self._inputQueue = Queue()
self._callbacks = inputCallbacks
self._inputProcessedCallback = inputProcessedCallback
def execute(self):
"""
Begin processing events from queue on a background thread.
"""
Midi.callback(self.onMidiInput, "dummy")
self.start()
def onMidiInput(self, event, data):
"""
Receive input from Midi devices (or simulated inpute from the
application)
"""
self._inputQueue.put((event, data))
def run(self):
"""
Process events enqueued by the onMidiInput method
"""
get = self._inputQueue.get
while True:
# pass events to all callbacks
midiInput = get()
for c in self._callbacks:
c(*midiInput)
if self._inputProcessedCallback:
self._inputProcessedCallback()
class MergedIO(object):
"""
Automatically route incoming MIDI events to output devices.
This enables, for example, playing a virtual instrument via
a keyboard controller connected to a MIDI input device.
"""
def __init__(self, deviceMap):
"""
Create a MergedIO instance.
@param deviceMap: a L{MidiDeviceMap} object that maps
input devices to output devices
"""
self._deviceMap = deviceMap
def onMidiInput(self, midiEvent, data):
"""
Route events received from Midi input devices to output
devices.
"""
# A device may be mapped to the input but not for automatic
# merging; just send the events to the devices mapped for merging.
if hasattr(midiEvent.midimsg, "device"):
mergedOutputDevices = [device for device, merge in \
self._deviceMap.getOutputDevices(midiEvent.midimsg.device) if merge]
for d in mergedOutputDevices:
Midi.oneThread.schedule(d, midiEvent.midimsg, midiEvent.time)
class MidiListener(object):
"""
Base class for managing Midi event processing, typically throughout
an entire application session.
"""
def __init__(self):
"""
Create a MidiListener instance.
"""
self._openedInputDevices = {}
self._openedOutputDevices = {}
self._dispatcher = None
def _createClients(self):
# Subclasses override by returning a list of functions that
# will handle Midi input events. The function signatures must
# match those expected by the Midi.callback method.
return []
def _startClients(self):
# Subclasses that perform throughout the application session
# can do startup work here.
pass
def _stopClients(self):
# Subclasses that perform throughout the application session
# can do cleanup work here.
pass
def _getOpenedDevice(self, deviceDict, deviceFactory, deviceName):
# Internal helper function that opens a device exactly once
if deviceName not in deviceDict:
device = deviceFactory(deviceName)
device.open()
deviceDict[deviceName] = device
return deviceDict[deviceName]
def _onInputProcessed(self):
# subclasses override
pass
def openInputDevice(self, name):
"""
Open a Midi input device. This method can be called multiple times
without causing a device conflict.
@param name: the name of the device to open
"""
return self._getOpenedDevice(self._openedInputDevices, MidiInput, name)
def openOutputDevice(self, name):
"""
Open a Midi output device. This method can be called multiple times
without causing a device conflict.
@param name: the name of the device to open
"""
return self._getOpenedDevice(self._openedOutputDevices, MidiOutput, name)
def start(self):
"""
Start Midi processing.
"""
Midi.startup()
self._dispatcher = MidiDispatcher(self._createClients(),
self._onInputProcessed)
self._dispatcher.execute()
self._startClients()
def stop(self):
"""
End Midi processing.
"""
self._stopClients()
Midi.shutdown()
class MidiSequencer(MidiListener):
"""
Provide services related to recording and playing Midi sequences.
"""
def __init__(self, deviceMap=None):
"""
Create a MidiSequencer instance.
"""
MidiListener.__init__(self)
self._deviceMap = deviceMap
self._mergedIO = None
self._metronome = None
self._recorder = None
self._playing = False
self._playbackStartTime = None
self._playbackThread = None
self._beatsPerBar = 4
self._feedbackQueue = Queue()
self._feedbackHandler = None
def _createClients(self):
# Open input and output devices, and prepare to route events
# from inputs to outputs
self._mergedIO = MergedIO(self.deviceMap)
# Create a metronome
self._metronome = Metronome(self._getMetronomeDevice(), self.pushEvent)
# Create a recorder (sequencer)
self._recorder = MidiRecorder(self._onTrackInput)
# return the functions that will handle events from Midi input
return [self._onMidiInput, self._mergedIO.onMidiInput,
self._recorder.onMidiInput]
def _emptyFeedbackQueue(self):
while self._feedbackQueue.qsize():
self._feedbackQueue.get_nowait()
def _getDeviceMap(self):
"""
Return the L{MidiDeviceMap} that defines routings between
input and output devices.
"""
if self._deviceMap is None:
self._deviceMap = MidiDeviceMap(self.openInputDevice,
self.openOutputDevice)
self._deviceMap.addDefaultDeviceMapping(merge=True)
return self._deviceMap
def _getDefaultOutputDeviceName(self):
return MidiOutput.devices()[pypm.GetDefaultOutputDeviceID()]
def _getFeedbackQueue(self):
"""
Return a L{Queue} that supplies feedback events to the application.
"""
return self._feedbackQueue
def _getMetronomeDevice(self):
return self.openOutputDevice(self._getDefaultOutputDeviceName())
def _getPlaying(self):
"""
Return whether the sequencer is playing.
"""
return self._playing
def _getPlaybackPhrases(self, includeMutedTracks = False):
# Yield copies, rather than originals, allowing simultaneous
# playback and recording of a track (overdubbing).
return (track.phrase[:] for track in self.recorder.sequence if \
(includeMutedTracks or (not track.mute)))
def _getRecorder(self):
"""
Return the MidiRecorder object that the sequencer uses for recording
sequences.
"""
return self._recorder
def _getSequence(self):
return self._recorder.sequence
def _setSequence(self, sequence):
self._recorder.sequence = sequence
def _getTempo(self):
return int((Midi.oneThread.clocks_per_second / CLOCKS_PER_BEAT) * 60.0)
def _setTempo(self, bpm):
Midi.oneThread.clocks_per_second = (bpm / 60.0) * CLOCKS_PER_BEAT
def _onMidiInput(self, event, data):
# This is the one receiver of the TickMsg (sent by the Metronome) that
# sends it back through the feedback queue.
if self._feedbackHandler:
msg = event.midimsg
if isinstance(msg, TickMsg):
if msg.clocks is not None:
self._feedbackQueue.put(msg)
def _onInputProcessed(self):
# subclasses override
if self._feedbackHandler:
feedbackMessages = []
messageCount = self._feedbackQueue.qsize()
if messageCount:
feedbackMessages = [self._feedbackQueue.get_nowait() for \
i in range(messageCount)]
keepCalling = self._feedbackHandler(feedbackMessages)
if not keepCalling:
self._feedbackHandler = None
self._emptyFeedbackQueue()
def _onTrackInput(self, msg):
if self._feedbackHandler:
self._feedbackQueue.put(msg)
def _playMergedEvents(self, startTime):
def _getNextMergedNote(mergedMessages):
try:
note = mergedMessages.next()
except:
note = None
return note
def _onClock(now, tickTime, mergedMessages, lastClocks,
nextNote):
# this is invoked from a scheduled Midi callback
nextNoteTime = None
if self._playing:
while nextNote and nextNote.clocks == lastClocks:
# play it
[outputDevice.schedule(nextNote.msg) for outputDevice, _ in \
self.deviceMap.getOutputDevices(nextNote.msg.device)]
# get the next note
nextNote = _getNextMergedNote(mergedMessages)
if nextNote:
nextNoteTime = tickTime + Midi.oneThread.clocks2secs(\
nextNote.clocks - lastClocks)
lastClocks = nextNote.clocks
else:
self._playing = False
# return the next time for the callback to invoke this method,
# along with the other required arguments, or None to stop
return nextNoteTime and (nextNoteTime, [mergedMessages,
lastClocks, nextNote]) or None
nextNoteTime = startTime and startTime or self._metronome.nextTickTime
lastClocks = 0.0
mergedMessages = (n for n in Phrase.merged(self._getPlaybackPhrases()))
nextNote = _getNextMergedNote(mergedMessages)
if nextNote:
Midi.oneThread.schedule_callback(_onClock, nextNoteTime,
mergedMessages, lastClocks, nextNote)
else:
self._playing = False
def _startClients(self):
self._metronome.active = True
def _stopClients(self):
self._metronome.active = False
deviceMap = property(fget=_getDeviceMap, doc=_getDeviceMap.__doc__)
feedbackQueue = property(fget=_getFeedbackQueue,
doc=_getFeedbackQueue.__doc__)
def getTotalClocks(self):
"""
Return the length of the sequence, in Midi clocks.
"""
return self._recorder.sequence.getTotalClocks()
playing = property(fget=_getPlaying, doc=_getPlaying.__doc__)
def pushEvent(self, event, data):
"""
Simulate Midi input.
"""
self._dispatcher.onMidiInput(event, data)
recorder = property(fget=_getRecorder, doc=_getRecorder.__doc__)
sequence = property(fget=_getSequence, fset=_setSequence, doc = \
"Return or set the L{MultitrackSequence} for recording Midi input.")
def startMetronome(self):
"""
Start playing the metronome.
"""
self._metronome.audible = True
def stopMetronome(self):
"""
Stop playing the metronome.
"""
self._metronome.audible = False
self._metronome.stopOutputTimer()
def startPlayback(self, startTime=None, feedbackHandler=None):
"""
Play the recorded sequence. Determine the output device(s) for each
recorded from the L{DeviceMap}.
@param startTime: if specified, the time to begin playback; if omitted
playback begins immediately
"""
self._feedbackHandler = feedbackHandler
self._playing = True
if startTime is None:
self._metronome.startOutputTimer()
self._playMergedEvents(startTime)
def startRecording(self, playMetronome=True, countOffBeats=8,
endAfterBeats=None, feedbackHandler=None):
"""
Start recording into the tracks that are armed for recording.
@param playMetronome: C{True} to play the metronome during recording;
C{False} to record without it.
@param countOffBeats: the number of beats to count before recording
(most useful when L{playMetronome} is C{True}
@param endAfterBeats: if specified, the number of beats to record
(not including the L{countOffBeats}); if omitted, record
continuously until L{StopRecording} is invoked
"""
self._feedbackHandler = feedbackHandler
timenow = self._metronome.nextTickTime
countOffClocks = countOffBeats * CLOCKS_PER_BEAT
self._metronome.startOutputTimer(clock=-countOffClocks,
bar=-(countOffBeats/self._beatsPerBar))
if playMetronome:
self.startMetronome()
startTime = timenow + \
Midi.oneThread.clocks2secs(countOffClocks)
stopTime = endAfterBeats and startTime + \
Midi.oneThread.clocks2secs(endAfterBeats * CLOCKS_PER_BEAT) or None
self._recorder.start(startTime, stopTime)
# play what's been recorded (and not muted)
self.startPlayback(startTime=startTime, feedbackHandler=feedbackHandler)
def stopPlayback(self):
"""
Stop playing the recorded sequence.
"""
self._playing = False
if self._playbackThread:
self._playbackThread.join()
self._feedbackHandler = None
self._playbackThread = None
self.stopMetronome()
def stopRecording(self):
"""
Stop recording.
"""
self._recorder.stop()
self.stopPlayback()
tempo = property(fget=_getTempo, fset=_setTempo,
doc="Return or set the recording and playback tempo, in beats per minute")
class MidiDeviceMap(object):
"""
Map input devices to output devices.
"""
def __init__(self, inputDeviceFactory, outputDeviceFactory):
"""
Create a MidiDeviceMap instance.
@param inputDeviceFactory: a factory function for creating
a L{MidiInputDevice} object, given a device name
@param outputDeviceFactory: a factory function for creating
a L{MidiOutputDevice} object, given a device name
"""
self._deviceNameMap = {}
self._deviceMap = {}
self._inputDeviceFactory = inputDeviceFactory
self._outputDeviceFactory = outputDeviceFactory
def addDefaultDeviceMapping(self, merge=True):
"""
Map the default input device to the default output device.
@param merge: immediately route events from the input device to
the output device
"""
if len(MidiInput.devices()):
self.addMapping(MidiInput.devices()[pypm.GetDefaultInputDeviceID()],
MidiOutput.devices()[pypm.GetDefaultOutputDeviceID()], merge)
def addMapping(self, inputName, outputName, merge):
"""
Map an input device to an output device.
@param inputName: the name of the input device
@param outputName: the name of the output device
@param merge: immediately route events from the input device to
the output device
"""
if not self.mappingExists(inputName, outputName):
if inputName not in self._deviceNameMap:
self._deviceNameMap[inputName] = []
mappedOutputs = self._deviceNameMap[inputName]
mappedOutputs.append((outputName, merge))
inputDevice = self._inputDeviceFactory(inputName)
outputDevice = self._outputDeviceFactory(outputName)
if inputDevice not in self._deviceMap:
mappedDevices = []
self._deviceMap[inputDevice] = mappedDevices
else:
mappedDevices = self._deviceMap[inputDevice]
mappedDevices.append((outputDevice, merge))
def canMap(self, inputName, outputName):
"""
Return whether an input device can be mapped to an output device.
@param inputName: the name of the input device
@param outputName: the name of the output device
"""
return (inputName != outputName) and \
not self.mappingExists(inputName, outputName)
def getMapping(self, inputName):
"""
Get the mapping for an input device.
@param inputName: the name of the input device
@return: a list of (deviceName, merged) tuples for each output
device mapped to the input device, where deviceName is the
name of an output device, and merged is a bool that
indicates whether to immediately route input from the
input device to the output device
"""
return self._deviceNameMap.get(inputName, [])
def getOutputDevices(self, inputDevice):
"""
Return output devices mapped to an input device.
@param inputDevice: the L{MidiInputDevice} object that represents
the input device
@return: a list of (device, merged) tuples for each output device
mapped to the input device, where device is a L{MidiOutputDevice}
and merged is a bool that indicates whether to immediately
route input from the input device to the output device
"""
return self._deviceMap.get(inputDevice, [])
def mappingExists(self, inputName, outputName):
"""
Return whether an input device is mapped to an output device.
@param inputName: the name of the input device
@param outputName: the name of the output device
"""
return outputName in [name for name, _ in self.getMapping(inputName)]
def removeMapping(self, inputName, outputName):
"""
Remove the mapping between an input device and an output device.
@param inputName: the name of the input device
@param outputName: the name of the output device
"""
if self.mappingExists(inputName, outputName):
mappedParameters = self._deviceNameMap[inputName]
for i in range(len(mappedParameters)):
if mappedParameters[i][0] == outputName:
del mappedParameters[i]
# same index in the actual device map
inputDevice = self._inputDeviceFactory(inputName)
del self._deviceMap[inputDevice][i]
break
class TickMsg(MidiMsg):
"""
Message sent by the Metronome for every Midi clock event.
If the clocks field is a number, it represents the offset
in Midi clocks from the beginning of the recording. Events
sent during the countoff just prior to recording have negative
clock values; the start of recording is clock 0.
"""
def __init__(self, clocks):
MidiMsg.__init__(self, "tick")
self.clocks = clocks
def __str__(self):
return "tick %d" % self.clocks
class NewBarMsg(MidiMsg):
"""
Message sent by the Metronome at the beginning of each bar during
recording. If the bar field is a a number, it is represents the
number of bars from the beginning of the recording. Events sent
during the countoff just prior to recording have negative bar
values; the start of recording is bar 0. The clocksPerBar field
contains the length of the bar, in Midi clocks.
"""
def __init__(self, bar, clocksPerBar):
MidiMsg.__init__(self, "newbar")
self.bar = bar
self.clocksPerBar = clocksPerBar
def __str__(self):
return "bar %d (%d clocks)" % (self.bar, self.clocksPerBar)
class TrackMsg(MidiMsg):
"""
Message placed into the sequencer's feedback queue, for track-
oriented rendering in the user interface. The track field contains
the zero-based index of the track. The msg field contains a L{MidiMsg}
of L{SequencedEvent}.
"""
def __init__(self, track, msg):
MidiMsg.__init__(self, "trackmsg")
self.track = track
self.msg = msg
class Metronome(object):
"""
Keep time in Midi clocks. Play "beats" repeatedly over Midi output
(e.g., during recording).
"""
def __init__(self, outputDevice, inputHandler):
"""
Create a Metronome instance.
@param outputDevice: the MidiOutput device to which to send
the metronome's output
@param inputHandler: a callback function into which to send
the metronome's tick events
"""
self._outputDevice = outputDevice
self._inputHandler = inputHandler
self._phraseClocks = 0
self._active = False
self._audible = False
self._outputThread = None
self._restartPhrase = False
self._timeMsgClock = None
self._currentPhraseClock = None
self._currentNote = None
self._noteStack = None
self._currentBar = None
self._outputTimer = False
self.nextTickTime = None
self.phrase = self._defaultPhrase()
def _defaultPhrase(self):
# four quarter notes, with accents on 1 and 3
phrase = Phrase()
lastClock = 0
for pitch, velocity, channel, duration in \
zip([75] * 4, [80, 40, 50, 40], [10] * 4, [96] * 4):
phrase.append(SequencedNote(pitch=pitch, velocity=velocity,
channel=channel, clocks=lastClock,
duration=duration))
lastClock += duration
return phrase
def _getActive(self):
return self._active
def _setActive(self, value):
if value and not self._active:
# register to receive timer callbacks every Midi clock
self.nextTickTime = Midi.time_now()
Midi.oneThread.schedule_callback(self._onMidiClock,
self.nextTickTime)
def _getAudible(self):
return self._audible
def _setAudible(self, value):
if not self._audible:
self._restartPhrase = True
self._audible = value
def _getPhrase(self):
return self._phrase
def _setPhrase(self, phrase):
self._phrase = phrase
self._phraseClocks = sum([n.duration for n in self._phrase])
def _onMidiClock(self, now, tickTime):
# place a tick message into the Midi input queue
self._inputHandler(MidiEvent(TickMsg(self._timeMsgClock), tickTime),
None)
if self._restartPhrase:
# The metronome phrase is either being played for the first
# time since the metronome became audible, or it was fully
# played and it is now time to start it over again.
self._currentPhraseClock = 0
self._noteStack = list(reversed(range(len(self._phrase))))
self._currentNote = self._phrase[self._noteStack.pop()]
self._restartPhrase = False
if self._audible:
if self._currentPhraseClock == self._currentNote.clocks:
# it's time to play the current metronome phrase note
if self._currentPhraseClock == 0:
# beginning of the phrase == new bar
# place a new bar message into the Midi input queue
self._inputHandler(MidiEvent(\
NewBarMsg(self._currentBar, self._phraseClocks),
tickTime), None)
nextNote = copy.copy(self._currentNote)
nextNote.clocks = 0
# play the current metronome phrase note
self._outputDevice.schedule(nextNote, tickTime)
if self._noteStack:
# get the next metronome phrase note
self._currentNote = self._phrase[self._noteStack.pop()]
if self._currentPhraseClock < self._phraseClocks:
self._currentPhraseClock += 1
else:
# reached the end of the metronome phrase; start from
# the beginning next time this method is invoked
self._restartPhrase = True
if self._outputTimer:
self._currentBar += 1
if self._outputTimer:
self._timeMsgClock += 1
self.nextTickTime += Midi.oneThread.clocks2secs(1)
return self.nextTickTime
active = property(fget=_getActive, fset=_setActive,
doc="Turn the metronome on or off.")
audible = property(fget=_getAudible, fset=_setAudible,
doc="Play or stop playing the metronome.")
phrase = property(fget=_getPhrase, fset=_setPhrase,
doc="Return or set the L{Phrase} object to play when the metronome is on.")
def startOutputTimer(self, clock = 0, bar = 0):
"""
Reset the Midi clock and current bar to specified values. The
metronome will include the reset values in the next TimeMsg and
NewBarMsg that it places into Mid input, and will automatically
increment the values.
@param clock: the new Midi clock value; can be negative (e.g.,
to indicate countoff clocks prior to the beginning of recording)
@param bar: the new bar offset; can be negative (e.g., to indicate
the bar position prior to the beginning of recording)
"""
self._timeMsgClock = clock
self._currentBar = bar
self._outputTimer = True
def stopOutputTimer(self):
"""
Stop setting explicit clock and bar values for TimeMsg and NewBarMsg.
"""
self._timeMsgClock = None
self._currentPhraseClock = None
self._currentNote = None
self._noteStack = None
self._currentBar = None
self._outputTimer = False
class MidiRecorder(object):
"""
Record one or more tracks of Midi input.
"""
def __init__(self, callback):
"""
Create a MidiRecorder instance.
"""
self._on = False
self._tracks = []
self._sequence = MultitrackSequence(callback)
self._timeStart = None
self._timeStop = None
self._lastClock = None
def _getOn(self):
"""
Return whether recording is in progress.
"""
return self._on
def onMidiInput(self, event, data):
"""
Route incoming Midi events to the appropriate tracks.
"""
if not self._on:
return
elif event.time >= self._timeStart:
if self._timeStop and event.time > self._timeStop:
self.stop()
else:
if isinstance(event.midimsg, TickMsg):
self._lastClock = event.midimsg.clocks
else:
if isinstance(self._lastClock, int) and \
self._lastClock >= 0:
eventClocks = self._lastClock
else:
eventClocks = round((event.time - self._timeStart) * \
Midi.oneThread.clocks_per_second)
eventChannel = hasattr(event.midimsg, "channel") and \
event.midimsg.channel or None
for track in self._sequence:
if track.recording and (eventChannel is None or \
track.channel == eventChannel):
track.onMidiInput(\
SequencedMidiMsg(event.midimsg, eventClocks))
def _getSequence(self):
return self._sequence
def _setSequence(self, sequence):
self._sequence = sequence
on = property(fget=_getOn, doc=_getOn.__doc__)
sequence = property(fget=_getSequence, fset=_setSequence, doc = \
"Return or set the L{MultitrackSequence} for recording Midi input.")
def start(self, timeStart, timeStop=None):
"""
Start recording Midi input.
@param timeStart: the time at which to begin recording
@param clocksAfterStart: if specified, the time at which to stop
recording
"""
self._timeStart = timeStart
self._timeStop = timeStop
self._on = True
def stop(self):
"""
Stop recording Midi input.
"""
self._on = False
self._lastClock = None
class SequencerTrack(object):
"""
Manage settings for one track in a sequence.
"""
def __init__(self, channel, recording, mute, callback, phrase=None):
self.channel = channel
self.recording = recording
self.mute = mute
self.phrase = phrase and phrase or Phrase()
self._callback = callback
self._pendingNoteOffs = {}
self._barCount = 0
def erase(self):
"""
Erase all of the events in the track.
"""
del self.phrase[:]
self._barCount = 0
def getTotalClocks(self):
"""
Return the length of the track in Midi clocks.
"""
return len(self.phrase) and self.phrase[-1].clocks or 0
def onMidiInput(self, sequencedEvent):
"""
Handle Midi input.
"""
callbackMsg = None
msg = sequencedEvent.msg
if not isinstance(msg, (TickMsg, NewBarMsg)):
# add normal Midi messages to the phrase
self.phrase.append(sequencedEvent)
# ensure that the phrase remains sorted by time
if len(self.phrase) > 1 and sequencedEvent.clocks < \
self.phrase[-2].clocks:
self.phrase.sort(key=lambda e:e.clocks)
# pair NoteOn messages to NoteOffs
if isinstance(msg, NoteOn):
self._pendingNoteOffs[msg.pitch] = sequencedEvent
elif isinstance(msg, NoteOff):
# Create a SequencedNote event from a paired NoteOn+NoteOff
# and pass it to the callback function.
# This mechanism can be used to simplify GUI rendering of
# notes as they are recorded.
noteOnEvent = self._pendingNoteOffs.get(msg.pitch, None)
if noteOnEvent:
del self._pendingNoteOffs[msg.pitch]
callbackMsg = SequencedNote(msg.pitch,
velocity=noteOnEvent.msg.velocity,
channel=msg.channel, clocks=noteOnEvent.clocks,
duration=sequencedEvent.clocks - noteOnEvent.clocks,
releasevelocity=msg.velocity)
elif isinstance(msg, NewBarMsg) and msg.bar >= self._barCount:
# When a new bar is recorded, notify the callback.
self._barCount += 1
callbackMsg = msg
if self._callback and callbackMsg:
self._callback(self, callbackMsg)
class MultitrackSequence(list):
"""
A list of one or more L{SequencerTrack} objects.
"""
def __init__(self, callback, tracksOrPhrases=None):
list.__init__(self)
if tracksOrPhrases is not None and isinstance(tracksOrPhrases,
(MultitrackSequence, list)):
[self.append(track) for track in tracksOrPhrases]
self._callback = callback
def _makeTrack(self, trackOrPhrase):
if isinstance(trackOrPhrase, SequencerTrack):
return trackOrPhrase
else:
# a phrase
track = SequencerTrack(1, False, False, self._onTrackInput,
trackOrPhrase)
def _onTrackInput(self, track, msg):
if self._callback:
self._callback(TrackMsg(self.index(track), msg))
def _validateItem(self, track):
if not isinstance(track, (SequencerTrack, Phrase)):
raise Exception, \
"MultitrackSequences can only take SequencerTrack or Phrase objects!"
def append(self, trackOrPhrase):
"""
Append a track to the end of the list.
@param trackOrPhrase: a L{SequencerTrack} or L{Phrase} object
"""
self._validateItem(trackOrPhrase)
list.append(self, self._makeTrack(trackOrPhrase))
def appendTrack(self, phrase=None, channel=1, record=False, mute=False):
"""
Add a new track to the end of the list.
@param phrase: if specified, the L{Phrase} that stores the Midi
events for the track
@param channel: the Midi channel for recording and playback of
the track
@param record: True if the track is armed for recording; False
otherwise
@param mute: True if the track is muted for playback; False
otherwise
"""
track = SequencerTrack(channel, record, mute, self._onTrackInput)
self.append(track)
return track
def getTotalClocks(self):
"""
Return the length of the sequence, in Midi clocks.
"""
trackClocks = [track.getTotalClocks() for track in self]
return trackClocks and max(trackClocks) or 0
def insert(self, index, trackOrPhrase):
"""
Insert a track into the sequence.
@param index: the position into which to insert the track
@param trackOrPhrase: a L{SequencerTrack} or L{Phrase} object
"""
self._validateItem(trackOrPhrase)
list.insert(self, index, self._makeTrack(trackOrPhrase))
def insertTrack(self, index, phrase=None, channel=1, record=False,
mute=False):
"""
Insert a new track into the list.
@param index: the position into which to insert the track
@param phrase: if specified, the L{Phrase} that stores the Midi
events for the track
@param channel: the Midi channel for recording and playback of
the track
@param record: True if the track is armed for recording; False
otherwise
@param mute: True if the track is muted for playback; False
otherwise
"""
track = SequencerTrack(channel, record, mute, self._onTrackInput,
phrase)
self.insert(index, track)
return track
|
|
from __future__ import print_function, unicode_literals
import sys
import types
import traceback
# Test imports.
import time
droid = None
skip_gui = False
fOutName = True
# tests for python modification for android {{{1
def test_029_isfile(): # issue #29 {{{1
import os
# FIXME: determine path to sdcard. like: path = os.environ[""]
path = os.path.dirname(__file__)
fname = os.path.abspath(os.path.join(path, "test_isfile"))
open(fname, "w").write("this is test")
os.path.isfile(fname)
os.remove(fname)
try:
assert os.path.isfile(fname) is False
except Exception as e:
print(e)
return False
return True
def test_047_ttyname(): # issue #47 {{{1
import os
try:
os.ttyname(0)
os.ttyname(1)
except Exception as e:
print(e)
return False
return True
def test_071_anydbm(): # issue #71 {{{1
import os
if sys.version_info[0] == 2:
import anydbm
else:
import dbm as anydbm
# FIXME: determine path to sdcard. like: path = os.environ[""]
del os.chmod
for fname in (
# failed: this is not SL4A application folder...
# os.path.join("/data/data/com.googlecode.pythonforandroid",
# "files", "test_anydbm.dbm"),
# OK: _chmod work well.
# os.path.join("/data/local/abc", "test_anydbm.dbm"),
# failed: _chmod not worked in FAT (SD card)
os.path.join("/sdcard", "sl4a", "test_anydbm.dbm"),
):
try:
os.remove(fname + ".dat")
except:
pass
anydbm.open(fname, "n")
os.remove(fname + ".dat")
return True
def test_075_httpserver(): # issue #75 {{{1
import time
import threading
if sys.version_info[0] == 2:
from BaseHTTPServer import BaseHTTPRequestHandler as handler
from BaseHTTPServer import HTTPServer
else:
from http.server import BaseHTTPRequestHandler as handler
from http.server import HTTPServer
fname = "/sdcard/sl4a/test_075.html"
port = 9090
class Handler(handler):
def do_GET(s):
open(fname, "w").write("""
<html><head></head><body>fine 075</body></html>""")
html = open(fname, 'rb')
s.send_response(200)
s.send_header("Content-Type", "text/html")
s.end_headers()
s.wfile.write(html.read())
server_class = HTTPServer
httpd = server_class(('', port), Handler)
if not skip_gui:
# and manual test has passed, open http://127.0.0.1:9090 in browser.
th = threading.Thread(target=httpd.serve_forever)
th.start()
droid.startActivity('android.intent.action.VIEW',
'http://127.0.0.1:9090/')
time.sleep(3)
httpd.shutdown()
return True
def test_106_https_certification_failed(): # issue #106 {{{1
if sys.version_info[0] == 2:
import urllib2
else:
from urllib import request as urllib2
import os
import take_cacert_pem
fname = take_cacert_pem.main()
if not fname:
return False
os.environ["SSL_CERT_FILE"] = fname
# input = input.replace("!web ", "")
url = "https://ccc.de/"
# url = "https://www.openssl.org/"
req = urllib2.Request(url)
info = urllib2.urlopen(req).read()
info
# Message.Chat.SendMessage("" + info)
'''not worked...
import httplib
c = httplib.HTTPSConnection("ccc.de")
c.request("GET", "/")
response = c.getresponse()
print("%s,%s" % (response.status, response.reason))
data = response.read()
print(data)
'''
return True
def test_107_large_file_report(): # issue #107 {{{1
import os
errors = []
fname = "sample.bin"
for n in (4294967294, 4294967297):
fp = open(fname, "wb")
fp.seek(n)
fp.write("1".encode("utf-8"))
fp.close()
ans = os.path.getsize(fname)
if ans != (n + 1):
errors.append("%s(answer) vs %s(expected)" % (ans, n + 1))
os.remove(fname)
if not errors:
return True
print("can't get size collectly with %s" % str(errors))
return False
def test_013s_scanBarcode(): # issue sl4a #13 {{{1
if not skip_gui:
code = droid.scanBarcode()
ext = code.result.get('extras', None)
if ext is None:
return False
if 'SCAN_RESULT_BYTES' not in ext or 'SCAN_RESULT' not in ext:
print("no results:" + str(ext))
return False
bts = ext['SCAN_RESULT_BYTES']
msg = ext['SCAN_RESULT']
print(msg, bts, len(bts))
return True
def test_009s_airplanemode(): # issue sl4a #9 {{{1
# this cause null pointer exception in Anroid 4.4>
ret = droid.checkAirplaneMode()
if ret.error:
return False
if fOutName:
print("%s" % ret.result, end="")
ret = droid.toggleAirplaneMode(True)
if ret.error:
return False
return True
def test_032s_wificonnect(): # issue sl4a #32 {{{1
method = "WPA2"
if method == "no-security":
cfg = dict(
SSID="invalidwifi",
# below parameters are not used in example of my expalation site.
# BSSID=,
# hiddenSSID=False,
# priority=,
# apBand=,
)
elif method == "WEP":
cfg = dict(
SSID="invalidwifi",
wepKeys=["key0"],
wepTxKeyIndex=0,
)
else: # elif method == "WPA2":
cfg = dict(
SSID="invalidwifi",
preSharedKey="kuaihuawifi123",
# or you can use: password="presharedkey",
# be careful SL4A can't allow 64byte key.
)
droid.wifiConnect(cfg)
return True
# tests for some facade {{{1
def event_loop():
for i in range(10):
time.sleep(1)
droid.eventClearBuffer()
time.sleep(1)
e = droid.eventPoll(1)
if e.result is not None:
return True
return False
def test_imports():
try:
import termios
import bs4 as BeautifulSoup
import pyxmpp2 as xmpp
from xml.dom import minidom
except ImportError:
return False
return True
def test_clipboard():
previous = droid.getClipboard().result
msg = 'Hello, world!'
droid.setClipboard(msg)
echo = droid.getClipboard().result
droid.setClipboard(previous)
return echo == msg
def test_gdata():
if True:
try:
import gdata.docs.service
global skip_gui
if skip_gui:
return True
except:
return False
# Create a client class which will make HTTP requests with Google Docs server.
client = gdata.docs.service.DocsService()
# Authenticate using your Google Docs email address and password.
username = droid.dialogGetInput('Username').result
password = droid.dialogGetPassword('Password', 'For ' + username).result
try:
client.ClientLogin(username, password)
except:
return False
# Query the server for an Atom feed containing a list of your documents.
documents_feed = client.GetDocumentListFeed()
# Loop through the feed and extract each document entry.
return bool(list(documents_feed.entry))
def test_gps():
droid.startLocating()
try:
return event_loop()
finally:
droid.stopLocating()
def test_battery():
droid.batteryStartMonitoring()
time.sleep(1)
try:
return bool(droid.batteryGetStatus())
finally:
droid.batteryStopMonitoring()
def test_sensors():
ret = droid.startSensingTimed(1, 20)
if ret.error:
return False
try:
return event_loop()
finally:
droid.stopSensing()
def test_speak():
result = droid.ttsSpeak('Hello, world!')
return result.error is None
def test_phone_state():
droid.startTrackingPhoneState()
try:
return event_loop()
finally:
droid.stopTrackingPhoneState()
def test_ringer_silent():
result1 = droid.toggleRingerSilentMode()
result2 = droid.toggleRingerSilentMode()
return result1.error is None and result2.error is None
def test_ringer_volume():
get_result = droid.getRingerVolume()
if get_result.error is not None:
return False
droid.setRingerVolume(0)
set_result = droid.setRingerVolume(get_result.result)
if set_result.error is not None:
return False
return True
def test_get_last_known_location():
result = droid.getLastKnownLocation()
return result.error is None
def test_geocode():
result = droid.geocode(0.0, 0.0, 1)
return result.error is None
def test_wifi():
result1 = droid.toggleWifiState()
result2 = droid.toggleWifiState()
return result1.error is None and result2.error is None
def test_make_toast():
result = droid.makeToast('Hello, world!')
return result.error is None
def test_vibrate():
result = droid.vibrate()
return result.error is None
def test_notify():
result = droid.notify('Test Title', 'Hello, world!')
return result.error is None
def test_get_running_packages():
result = droid.getRunningPackages()
return result.error is None
# tests for USBSerialFacade {{{1
def test_usb(): # {{{2
result = droid.usbserialDeviceList()
if result.error is None:
print(result.data)
return True
return False
# tests for SL4A GUI parts {{{1
def test_alert_dialog(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'User Interface'
message = 'Welcome to the SL4A integration test.'
droid.dialogCreateAlert(title, message)
droid.dialogSetPositiveButtonText('Continue')
droid.dialogShow()
response = droid.dialogGetResponse().result
return True
def test__alert_dialog_with_buttons(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'Alert'
message = ('This alert box has 3 buttons and '
'will wait for you to press one.')
droid.dialogCreateAlert(title, message)
droid.dialogSetPositiveButtonText('Yes')
droid.dialogSetNegativeButtonText('No')
droid.dialogSetNeutralButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
assert response['which'] in ('positive', 'negative', 'neutral')
# print("debug:", response)
skip_gui = response['which'] == "negative"
return True
def test_spinner_progress(): # {{{2
title = 'Spinner'
message = 'This is simple spinner progress.'
droid.dialogCreateSpinnerProgress(title, message)
droid.dialogShow()
time.sleep(2)
droid.dialogDismiss()
return True
def test_horizontal_progress(): # {{{2
title = 'Horizontal'
message = 'This is simple horizontal progress.'
droid.dialogCreateHorizontalProgress(title, message, 50)
droid.dialogShow()
for x in range(0, 15):
time.sleep(0.1)
droid.dialogSetCurrentProgress(x)
droid.dialogDismiss()
return True
def test__alert_dialog_with_list(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'Alert'
droid.dialogCreateAlert(title)
droid.dialogSetItems(['foo', 'bar', 'baz'])
droid.dialogShow()
response = droid.dialogGetResponse().result
# print("debug:", response)
skip_gui = response.item == 1
return True
def test__alert_dialog_with_single_choice_list(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'GUI Test?'
droid.dialogCreateAlert(title)
droid.dialogSetSingleChoiceItems(['Continue', 'Skip', 'baz'])
droid.dialogSetPositiveButtonText('Yay!')
droid.dialogShow()
response = droid.dialogGetResponse().result
choices = droid.dialogGetSelectedItems().result
skip_gui = 1 in choices
return True
def test__alert_dialog_with_multi_choice_list(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'Alert'
droid.dialogCreateAlert(title)
droid.dialogSetMultiChoiceItems(['foo', 'bar', 'baz'], [])
droid.dialogSetPositiveButtonText('Yay!')
droid.dialogShow()
response = droid.dialogGetResponse().result
choices = droid.dialogGetSelectedItems().result
# print("debug:", choices)
skip_gui = 1 in choices
return True
# tests for native module {{{1
def test_ssl():
try:
import ssl
except:
return False
# TODO: make test method
ssl # missing ssl extension?
return True
def test_ctypes():
try:
import ctypes
except:
return False
# TODO: make test method
ctypes # r17-22, this cause segfault error.
return True
def test_readline():
try:
import readline
except:
return False
# TODO: make test method
readline
return True
def test0_curses(): # {{{2
import os
if not os.environ.get("TERM", ""):
os.environ["TERM"] = "vt100"
os.environ["TERMINFO"] = ("/data/data/com.googlecode.pythonforandroid"
"/files/python/share/terminfo")
try:
import _curses
except:
return False
_curses.initscr()
_curses.endwin()
return True
def test_termios():
try:
import termios
except:
return False
# TODO: make test method
termios
return True
def test_bz2():
try:
import bz2
except:
return False
# TODO: make test method
bz2
return True
def test_expat():
try:
import pyexpat
except:
return False
# TODO: make test method
pyexpat
return True
def test_sqlite3():
try:
import sqlite3
except:
return False
# TODO: make test method
sqlite3
return True
# tests for pure python module {{{1
def test_bs():
try:
import BeautifulSoup
except:
return False
# TODO: make test method
BeautifulSoup
return True
def test_xmpp():
try:
import xmpp
except:
return False
# TODO: make test method
xmpp
return True
if __name__ == '__main__': # {{{1
try:
import android
except:
import os
sys.path.insert(0, os.path.dirname(__file__))
import android_mock as android # for PC debugging
droid = android.Android()
def boilerplate(f):
try:
ret = f()
except:
print(traceback.format_exc())
return False
return ret
seq = globals().items()
seq = [i for i in seq if i[0].startswith("test_")]
seq = [i for i in seq if isinstance(i[1], types.FunctionType)]
seq.sort(key=lambda x: x[0])
for name, value in seq:
if fOutName:
print('Running %s...' % name, end="")
f = boilerplate(value)
sys.stdout.flush()
if f is True:
print(' PASS')
elif f is None:
print(' SKIP')
else:
print(' FAIL')
else:
sys.stdout.flush()
f = boilerplate(value)
if f is True:
print(".", end="")
elif f is None:
print("S", end="")
else:
print("F:%s" % name, end="")
# vi: ft=python:et:ts=4:fdm=marker
|
|
"""Build base image and pod runtime.
The construction of a pod is divided into two phases:
* Base image construction: This sets up the basic environment of a pod.
Notably, /usr/sbin/pod-exit and /var/lib/pod/exit-status.
* Pod runtime: This includes systemd unit files and exit status.
"""
__all__ = [
# Expose to apps.
'cmd_build_base_image',
'cmd_init',
'cmd_prepare_base_rootfs',
'cmd_setup_base_rootfs',
# Expose to pods.
'clear_pod_app_exit_status',
'generate_machine_id',
'generate_unit_file',
'get_pod_app_exit_status',
]
import dataclasses
import enum
import logging
import re
from pathlib import Path
import g1.files
from g1 import scripts
from g1.bases import argparses
from g1.bases import datetimes
from g1.bases import oses
from g1.bases.assertions import ASSERT
from . import bases
from . import images
from . import models
LOG = logging.getLogger(__name__)
def cmd_init():
scripts.check_command_exist('debootstrap')
#
# Base image.
#
@argparses.begin_parser(
'build-base', **argparses.make_help_kwargs('build a base image')
)
@argparses.argument(
'--prune-stash-path',
metavar='PATH',
type=Path,
help='provide path to stash pruned files',
)
@images.image_output_arguments
@argparses.end
def cmd_build_base_image(name, version, base_image_path, prune_stash_path):
oses.assert_root_privilege()
LOG.info('create base image: %s', base_image_path)
images.build_image(
images.ImageMetadata(name=name, version=version),
lambda dst_path: _create_image_rootfs(dst_path, prune_stash_path),
base_image_path,
)
def _create_image_rootfs(image_rootfs_path, prune_stash_path):
cmd_prepare_base_rootfs(image_rootfs_path)
cmd_setup_base_rootfs(image_rootfs_path, prune_stash_path)
@argparses.begin_parser(
'prepare-base-rootfs',
**argparses.make_help_kwargs(
'prepare rootfs of a base image (useful for testing)',
),
)
@argparses.argument('path', type=Path, help='provide rootfs directory path')
@argparses.end
def cmd_prepare_base_rootfs(image_rootfs_path):
ASSERT.not_predicate(image_rootfs_path, Path.exists)
oses.assert_root_privilege()
scripts.run([
'debootstrap',
'--variant=minbase',
'--components=main',
# Install dbus for convenience.
# Install sudo for changing service user/group.
# Install tzdata for /etc/localtime.
'--include=dbus,sudo,systemd,tzdata',
models.BASE_IMAGE_RELEASE_CODE_NAME,
image_rootfs_path,
'http://us.archive.ubuntu.com/ubuntu/',
])
@argparses.begin_parser(
'setup-base-rootfs',
**argparses.make_help_kwargs(
'set up rootfs of a base image (useful for testing)',
),
)
@argparses.argument(
'--prune-stash-path',
metavar='PATH',
type=Path,
help='provide path to stash pruned files',
)
@argparses.argument('path', type=Path, help='provide rootfs directory path')
@argparses.end
def cmd_setup_base_rootfs(image_rootfs_path, prune_stash_path):
"""Set up base rootfs.
Changes from 18.04 to 20.04.
* /lib is now a symlink to /usr/lib.
* system.slice has been removed:
https://github.com/systemd/systemd/commit/d8e5a9338278d6602a0c552f01f298771a384798
"""
ASSERT.predicate(image_rootfs_path, Path.is_dir)
oses.assert_root_privilege()
# Remove unneeded files.
for dir_relpath in (
'usr/share/doc',
'usr/share/info',
'usr/share/man',
'var/cache',
'var/lib/apt',
'var/lib/dpkg',
):
dir_path = image_rootfs_path / dir_relpath
if dir_path.is_dir():
if prune_stash_path:
dst_path = ASSERT.not_predicate(
prune_stash_path / dir_relpath, g1.files.lexists
)
dst_path.mkdir(mode=0o755, parents=True, exist_ok=True)
_move_dir_content(dir_path, dst_path)
else:
_clear_dir_content(dir_path)
# Remove certain config files.
for path in (
# Remove this so that systemd-nspawn may set the hostname.
image_rootfs_path / 'etc/hostname',
# systemd-nspawn uses machine-id to link journal.
image_rootfs_path / 'etc/machine-id',
image_rootfs_path / 'var/lib/dbus/machine-id',
# debootstrap seems to copy this file from the build machine,
# which is not the host machine that runs this image; so let's
# replace this with a generic stub.
image_rootfs_path / 'etc/resolv.conf',
image_rootfs_path / 'run/systemd/resolve/stub-resolv.conf',
):
LOG.info('remove: %s', path)
g1.files.remove(path)
# Replace certain config files.
for path, content in (
(image_rootfs_path / 'etc/default/locale', _LOCALE),
(image_rootfs_path / 'etc/resolv.conf', _RESOLV_CONF),
(image_rootfs_path / 'etc/systemd/journald.conf', _JOURNALD_CONF),
):
LOG.info('replace: %s', path)
path.write_text(content)
# Remove unneeded unit files.
base_units = set(_BASE_UNITS)
for unit_dir_path in (
image_rootfs_path / 'etc/systemd/system',
image_rootfs_path / 'usr/lib/systemd/system',
):
if not unit_dir_path.exists():
continue
LOG.info('clean up unit files in: %s', unit_dir_path)
for unit_path in unit_dir_path.iterdir():
if unit_path.name in base_units:
base_units.remove(unit_path.name)
continue
# There should have no duplicated units, right?
ASSERT.not_in(unit_path.name, _BASE_UNITS)
LOG.info('remove: %s', unit_path)
g1.files.remove(unit_path)
ASSERT.empty(base_units)
# Create unit files.
for unit_dir_path, unit_files in (
(image_rootfs_path / 'etc/systemd/system', _ETC_UNIT_FILES),
(image_rootfs_path / 'usr/lib/systemd/system', _LIB_UNIT_FILES),
):
for unit_file in unit_files:
ASSERT.predicate(unit_dir_path, Path.is_dir)
path = unit_dir_path / unit_file.relpath
LOG.info('create: %s', path)
if unit_file.kind is _UnitFile.Kinds.DIRECTORY:
path.mkdir(mode=0o755)
elif unit_file.kind is _UnitFile.Kinds.FILE:
path.write_text(unit_file.content)
path.chmod(0o644)
else:
ASSERT.is_(unit_file.kind, _UnitFile.Kinds.SYMLINK)
path.symlink_to(unit_file.content)
bases.chown_root(path)
# Create ``pod-exit`` script and exit status directory.
pod_exit_path = image_rootfs_path / 'usr/sbin/pod-exit'
LOG.info('create: %s', pod_exit_path)
pod_exit_path.write_text(_POD_EXIT)
bases.setup_file(pod_exit_path, 0o755, bases.chown_root)
bases.make_dir(image_rootfs_path / 'var/lib/pod', 0o755, bases.chown_root)
bases.make_dir(
image_rootfs_path / 'var/lib/pod/exit-status', 0o755, bases.chown_root
)
#
# Pod runtime.
#
def _get_pod_etc_path(root_path):
return root_path / 'etc/systemd/system'
def _get_pod_unit_path(pod_etc_path, app):
return pod_etc_path / _get_pod_unit_filename(app)
def _get_pod_unit_filename(app):
return app.name + '.service'
def _get_pod_wants_path(pod_etc_path, app):
return pod_etc_path / 'pod.target.wants' / _get_pod_unit_filename(app)
def _get_pod_app_exit_status_dir_path(root_path):
return root_path / 'var/lib/pod/exit-status'
def _get_pod_app_exit_status_path(root_path, app):
return (
_get_pod_app_exit_status_dir_path(root_path) /
_get_pod_unit_filename(app)
)
def generate_machine_id(root_path, machine_id):
machine_id_str = machine_id + '\n'
for path, mode in (
(root_path / 'etc/machine-id', 0o444),
(root_path / 'var/lib/dbus/machine-id', 0o644),
):
path.write_text(machine_id_str)
bases.setup_file(path, mode, bases.chown_root)
def generate_unit_file(root_path, pod_name, pod_version, app):
LOG.info('create unit file: %s', app.name)
pod_etc_path = ASSERT.predicate(_get_pod_etc_path(root_path), Path.is_dir)
ASSERT.not_predicate(
_get_pod_unit_path(pod_etc_path, app),
g1.files.lexists,
).write_text(_generate_unit_file_content(pod_name, pod_version, app))
ASSERT.not_predicate(
_get_pod_wants_path(pod_etc_path, app),
g1.files.lexists,
).symlink_to(Path('..') / _get_pod_unit_filename(app))
def _generate_unit_file_content(pod_name, pod_version, app):
if app.service_section is None:
if app.user != 'root' or app.group != 'root':
# Use ``sudo`` rather than "User=" and "Group=", or else
# "ExecStart" command will not be able to connect to journal
# socket, and pod-exit at "ExecStopPost" does not have the
# permission to stop the pod.
exec_start = [
'/usr/bin/sudo',
'--user=%s' % app.user,
'--group=%s' % app.group,
]
exec_start.extend(app.exec)
else:
exec_start = app.exec
# TODO: The default limit 1024 is too small for some server
# applications; we increase LimitNOFILE to 65536. But consider
# make it configurable.
service_section = '''\
{service_type}\
Restart=no
SyslogIdentifier={pod_name}/{app.name}@{pod_version}
ExecStart={exec}
ExecStopPost=/usr/sbin/pod-exit "%n"
LimitNOFILE=65536'''.format(
app=app,
exec=' '.join(map(_quote_arg, exec_start)),
pod_name=pod_name,
pod_version=pod_version,
service_type=(
'Type=%s\n' % app.type if app.type is not None else ''
),
)
else:
service_section = app.service_section
return '''\
[Unit]
Conflicts=shutdown.target
Before=pod.target shutdown.target
[Service]
{service_section}
'''.format(service_section=service_section)
_ESCAPE_PATTERN = re.compile(r'[\'"$%]')
_ESCAPE_MAP = {
'\'': '\\\'',
'"': '\\"',
'$': '$$',
'%': '%%',
}
def _quote_arg(arg):
return '"%s"' % _ESCAPE_PATTERN.sub(
lambda match: _ESCAPE_MAP[match.group(0)],
# TODO: Handle '\' escape sequence.
ASSERT.not_contains(arg, '\\'),
)
def clear_pod_app_exit_status(root_path):
_clear_dir_content(_get_pod_app_exit_status_dir_path(root_path))
def get_pod_app_exit_status(root_path, app):
"""Return exit status and the time it was recorded."""
path = _get_pod_app_exit_status_path(root_path, app)
if path.is_file():
return (
int(path.read_text()),
datetimes.utcfromtimestamp(path.stat().st_mtime),
)
else:
return None, None
def _clear_dir_content(dir_path):
LOG.info('clear directory content: %s', dir_path)
for path in dir_path.iterdir():
g1.files.remove(path)
def _move_dir_content(src_path, dst_path):
LOG.info('move directory content: %s -> %s', src_path, dst_path)
for path in src_path.iterdir():
path.rename(dst_path / path.name)
#
# Base rootfs config data.
#
# Keep these unit files of the base image.
_BASE_UNITS = frozenset((
'ctrl-alt-del.target',
# D-Bus. With it we may ``machinectl shell`` into containers, which
# is probably bad for security, but is quite convenient.
'dbus.service',
'dbus.socket',
# Journal.
'systemd-journald-audit.socket',
'systemd-journald-dev-log.socket',
'systemd-journald.service',
'systemd-journald.socket',
'systemd-journal-flush.service',
# Slices.
'machine.slice',
'slices.target',
'user.slice',
# tmpfiles.
'systemd-tmpfiles-setup-dev.service',
'systemd-tmpfiles-setup.service',
))
@dataclasses.dataclass(frozen=True)
class _UnitFile:
"""Descriptor of systemd unit file of base image."""
class Kinds(enum.Enum):
DIRECTORY = enum.auto()
FILE = enum.auto()
SYMLINK = enum.auto()
relpath: str
kind: Kinds
content: str
@classmethod
def make_dir(cls, relpath):
return cls(
relpath=relpath, kind=_UnitFile.Kinds.DIRECTORY, content=None
)
@classmethod
def make_file(cls, relpath, content):
return cls(relpath=relpath, kind=_UnitFile.Kinds.FILE, content=content)
@classmethod
def make_symlink(cls, relpath, content):
return cls(
relpath=relpath, kind=_UnitFile.Kinds.SYMLINK, content=content
)
_LOCALE = 'LANG="en_US.UTF-8"\n'
_RESOLV_CONF = 'nameserver 8.8.8.8\n'
_JOURNALD_CONF = '''\
[Journal]
SystemMaxUse=64M
RuntimeMaxUse=64M
'''
# Add these unit files to the base image.
_ETC_UNIT_FILES = (
# Apps should make pod.target "wants" them.
_UnitFile.make_dir('pod.target.wants'),
)
_LIB_UNIT_FILES = (
# NOTE: Unit files must not be empty, or else systemd will treat
# them as masked.
#
# sysinit.target.
_UnitFile.make_file('sysinit.target', '[Unit]\n'),
_UnitFile.make_dir('sysinit.target.wants'),
*(
_UnitFile.make_symlink(
'sysinit.target.wants/' + unit_name,
'../' + unit_name,
) for unit_name in (
'dbus.service',
'systemd-journald.service',
'systemd-journal-flush.service',
'systemd-tmpfiles-setup-dev.service',
'systemd-tmpfiles-setup.service',
)
),
# sockets.target.
_UnitFile.make_file('sockets.target', '[Unit]\n'),
_UnitFile.make_dir('sockets.target.wants'),
*(
_UnitFile.make_symlink(
'sockets.target.wants/' + unit_name,
'../' + unit_name,
) for unit_name in (
'dbus.socket',
'systemd-journald-audit.socket',
'systemd-journald-dev-log.socket',
'systemd-journald.socket',
)
),
# basic.target.
_UnitFile.make_file(
'basic.target', '''\
[Unit]
Requires=sysinit.target
Wants=sockets.target slices.target
After=sysinit.target sockets.target slices.target
'''
),
# pod.target.
_UnitFile.make_file(
'pod.target', '''\
[Unit]
Requires=basic.target
After=basic.target
'''
),
_UnitFile.make_symlink('default.target', 'pod.target'),
# shutdown.target.
_UnitFile.make_file(
'shutdown.target', '''\
[Unit]
DefaultDependencies=no
RefuseManualStart=yes
'''
),
# exit.target.
_UnitFile.make_file(
'exit.target', '''\
[Unit]
DefaultDependencies=no
Requires=systemd-exit.service
After=systemd-exit.service
AllowIsolate=yes
'''
),
_UnitFile.make_file(
'systemd-exit.service', '''\
[Unit]
DefaultDependencies=no
Requires=shutdown.target
After=shutdown.target
[Service]
Type=oneshot
ExecStart=/bin/systemctl --force exit
'''
),
_UnitFile.make_symlink('halt.target', 'exit.target'),
_UnitFile.make_symlink('poweroff.target', 'exit.target'),
_UnitFile.make_symlink('reboot.target', 'exit.target'),
)
_POD_EXIT = '''#!/usr/bin/env bash
set -o errexit -o nounset -o pipefail
if [[ "${#}" -ne 1 ]]; then
systemctl exit 1
exit 1
fi
# Check whether there is already any status file.
has_status="$(ls -A /var/lib/pod/exit-status)"
status="$(systemctl show --property ExecMainStatus "${1}")"
status="${status#*=}"
status="${status:-1}"
echo "${status}" > "/var/lib/pod/exit-status/${1}"
# Check whether this is the first non-zero status.
if [[ "${status}" != 0 && -z "${has_status}" ]]; then
systemctl exit "${status}"
else
systemctl exit
fi
'''
|
|
'''
Test correctness of matvec for various cases.
'''
import dynamite_test_runner as dtr
import numpy as np
import hamiltonians
from dynamite import config
from dynamite.msc_tools import msc_dtype
from dynamite.operators import identity, sigmax, sigmay, index_sum, index_product
from dynamite.subspaces import Full, Parity, Auto, SpinConserve
from dynamite.states import State
from dynamite.tools import complex_enabled
def generate_hamiltonian_tests(cls):
for H_name, real in hamiltonians.names:
if not complex_enabled() and not real:
continue
setattr(cls, 'test_'+H_name, lambda self, n=H_name: self.check_hamiltonian(n))
return cls
class FullSpace(dtr.DynamiteTestCase):
def check_nonzeros(self, state, nonzeros):
'''
Check that a vector has the correct nonzeros.
Parameters
----------
state : dynamite.states.State
The state containing the vector
nonzeros : dict
A dictionary, where the keys are the indices of the nonzero elements
and the values are the nonzero values
'''
# first check that the state's norm is right
correct_norm = sum(np.abs(v)**2 for v in nonzeros.values())
self.assertEqual(state.vec.norm(), correct_norm)
istart, iend = state.vec.getOwnershipRange()
for idx, val in nonzeros.items():
if istart <= idx < iend:
self.assertEqual(state.vec[idx], val, msg = 'idx: %d' % idx)
else:
# we have to do this for MPI
self.assertEqual(0, 0)
def test_identity(self):
s = State(state = 3)
r = identity() * s
correct = {3 : 1}
self.check_nonzeros(r, correct)
def test_spinflip(self):
H = index_product(sigmax())
s = State(state='D'*H.get_length())
r = H * s
correct = {0 : 1}
self.check_nonzeros(r, correct)
@generate_hamiltonian_tests
class FullHamiltonians(dtr.DynamiteTestCase):
def check_hamiltonian(self, H_name):
H = getattr(hamiltonians, H_name)()
bra, ket = H.create_states()
#ket.set_product(0)
ket.set_random(seed = 0)
#ket.vec.set(1)
H.dot(ket, bra)
self.assertLess(1E-3, bra.vec.norm(), msg = 'petsc vec norm incorrect')
ket_np = ket.to_numpy()
bra_check = bra.to_numpy()
if ket_np is not None:
self.assertNotEqual(np.linalg.norm(bra_check), 0, msg = 'numpy vec zero')
H_np = H.to_numpy()
bra_np = H_np.dot(ket_np)
inner_prod = bra_check.dot(bra_np.conj())
if inner_prod != 0:
inner_prod /= np.linalg.norm(bra_check) * np.linalg.norm(bra_np)
bad_idxs = np.where(np.abs(bra_check - bra_np) > 1E-12)[0]
msg = '\n'
for idx in bad_idxs:
msg += 'at {}: correct: {} check: {}\n'.format(idx, bra_np[idx], bra_check[idx])
else:
inner_prod = 1
msg = ''
self.assertLess(np.abs(1 - inner_prod), 1E-9, msg=msg)
@generate_hamiltonian_tests
class Subspaces(dtr.DynamiteTestCase):
def compare_to_full(self, H, x_sub, x_full, check_subspace):
'''
Compare multiplication under the full Hamiltonian to multiplication
in the subspace.
Parameters
----------
H : dynamite.operators.Operator
The operator to multiply.
x : dynamite.states.State
The state to multiply (subspace should be Full)
check_subspace : dynamite.subspace.Subspace
The subspace to multiply under.
'''
extra_conversion = isinstance(check_subspace, SpinConserve)
extra_conversion = extra_conversion and check_subspace.spinflip
# compare all possible combinations of going to and from the full space
self.assertTrue(isinstance(x_full.subspace, Full))
self.assertIs(x_sub.subspace, check_subspace)
to_space = identity()
if extra_conversion:
to_space.add_subspace(SpinConserve(check_subspace.L, check_subspace.k), Full())
else:
to_space.add_subspace(check_subspace, Full())
correct_full = State(subspace=Full())
H.dot(x_full, correct_full)
if extra_conversion:
tmp = State(subspace=to_space.left_subspace)
to_space.dot(correct_full, tmp)
correct_sub = SpinConserve.convert_spinflip(tmp, sign=check_subspace.spinflip)
else:
correct_sub = State(subspace=check_subspace)
to_space.dot(correct_full, correct_sub)
with self.subTest(which='s2s'):
self.check_s2s(H, x_sub, check_subspace, correct_sub)
if not extra_conversion:
with self.subTest(which='f2s'):
self.check_f2s(H, x_full, check_subspace, correct_sub)
with self.subTest(which='s2f'):
self.check_s2f(H, x_sub, check_subspace, correct_sub)
@classmethod
def generate_random_in_subspace(cls, space):
x_sub = State(subspace=space, state='random', seed=0)
if isinstance(space, SpinConserve) and space.spinflip:
tmp = SpinConserve.convert_spinflip(x_sub)
else:
tmp = x_sub
from_space = identity()
from_space.add_subspace(Full(), tmp.subspace)
x_full = State(subspace=Full())
from_space.dot(tmp, x_full)
return x_sub, x_full
def check_f2s(self, H, x_full, check_subspace, correct):
'''
check multiplication from full to subspace
'''
H.add_subspace(check_subspace, Full())
result = State(subspace=check_subspace)
H.dot(x_full, result)
eps = H.nnz*np.finfo(msc_dtype[2]).eps
self.check_vec_equal(correct, result, eps=eps)
def check_s2f(self, H, x_sub, check_subspace, correct):
'''
check multiplication from subspace to full
'''
H.add_subspace(Full(), check_subspace)
to_space = identity()
to_space.add_subspace(check_subspace, Full())
sub_state = State(subspace=check_subspace)
full_state = State(subspace=Full())
H.dot(x_sub, full_state)
to_space.dot(full_state, sub_state)
eps = H.nnz*np.finfo(msc_dtype[2]).eps
self.check_vec_equal(correct, sub_state, eps=eps)
def check_s2s(self, H, x_sub, check_subspace, correct):
'''
check multiplication from subspace to subspace
'''
H.add_subspace(check_subspace)
result = H.dot(x_sub)
eps = H.nnz*np.finfo(msc_dtype[2]).eps
self.check_vec_equal(correct, result, eps=eps)
def test_parity_XX_even(self):
H = index_sum(sigmax(0)*sigmax(1))
sp = Parity('even')
xs = self.generate_random_in_subspace(sp)
self.compare_to_full(H, *xs, sp)
def test_parity_XX_odd(self):
H = index_sum(sigmax(0)*sigmax(1))
sp = Parity('odd')
xs = self.generate_random_in_subspace(sp)
self.compare_to_full(H, *xs, sp)
def test_parity_YY_even(self):
H = index_sum(sigmay(0)*sigmay(1))
sp = Parity('even')
xs = self.generate_random_in_subspace(sp)
self.compare_to_full(H, *xs, sp)
def test_parity_YY_odd(self):
H = index_sum(sigmay(0)*sigmay(1))
sp = Parity('odd')
xs = self.generate_random_in_subspace(sp)
self.compare_to_full(H, *xs, sp)
def test_spin_conserve_half_filling(self):
H = index_sum(sigmax(0)*sigmax(1) + sigmay(0)*sigmay(1))
for spinflip in ['+', '-', None]:
if spinflip is not None and config.L%2 != 0:
continue
with self.subTest(spinflip=spinflip):
sp = SpinConserve(config.L, config.L//2, spinflip=spinflip)
xs = self.generate_random_in_subspace(sp)
self.compare_to_full(H, *xs, sp)
def test_spin_conserve_third_filling(self):
H = index_sum(sigmax(0)*sigmax(1) + sigmay(0)*sigmay(1))
sp = SpinConserve(config.L, config.L//3)
xs = self.generate_random_in_subspace(sp)
self.compare_to_full(H, *xs, sp)
def check_hamiltonian(self, H_name):
for space in [1, 2]:
for sort in [True, False]:
with self.subTest(space=space):
with self.subTest(sort=sort):
H = getattr(hamiltonians, H_name)()
sp = Auto(H, (1 << (H.L//2))-space, sort=sort)
xs = self.generate_random_in_subspace(sp)
self.compare_to_full(H, *xs, sp)
# TODO: write tests where this is not just the identity
class Projection(dtr.DynamiteTestCase):
def check_projection(self, from_subspace, to_subspace):
s = State(subspace=from_subspace)
s.set_random(seed=0)
r = State(subspace=to_subspace)
project = identity()
project.add_subspace(to_subspace, from_subspace)
project.dot(s, result=r)
s_np = s.to_numpy()
r_np = r.to_numpy()
from_states = set(from_subspace.idx_to_state(np.arange(from_subspace.get_dimension())))
if s_np is not None:
states = to_subspace.idx_to_state(np.arange(to_subspace.get_dimension()))
for i,state in enumerate(states):
if state not in from_states:
self.assertEqual(r_np[i], 0, msg=i)
else:
self.assertEqual(s_np[from_subspace.state_to_idx(state)], r_np[i])
def test_projections(self):
half_chain = config.L // 2
state = 'U'*half_chain + 'D'*(config.L-half_chain)
full = Full()
even_parity = Parity('even')
odd_parity = Parity('odd')
auto = Auto(hamiltonians.localized(), state)
subspace_list = [full, even_parity, odd_parity, auto]
for from_subspace in subspace_list:
for to_subspace in subspace_list:
with self.subTest(from_s=from_subspace, to_s=to_subspace):
self.check_projection(from_subspace, to_subspace)
if __name__ == '__main__':
dtr.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.