gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
import unittest
from datetime import datetime
import json
import time
import sys
import re
import os
import responses
from tests import settings
from mangopay.utils import timestamp_from_date
today = datetime.utcnow().date()
today_timestamp = timestamp_from_date(today)
def get_fixture(name):
path = os.path.abspath(__file__)
fixtures_path = os.path.join(os.path.dirname(path), 'fixtures')
filepath = os.path.join(fixtures_path, '%s.json' % name)
if sys.version_info < (3, 0):
with open(filepath, 'r') as file:
return file.read()
with open(filepath, newline='', encoding='utf-8') as file:
return file.read()
class RegisteredMocks(unittest.TestCase):
def setUp(self):
self.mock_oauth()
def register_mock(self, data):
match_querystring = False
if 'match_querystring' in data:
match_querystring = data['match_querystring'] or False
if isinstance(data, list):
for d in data:
self.register_mock(d)
else:
if isinstance(data['body'], (dict, list)):
data['body'] = json.dumps(data['body'])
responses.add(data['method'], data['url'],
body=data['body'], status=data['status'],
content_type='application/json',
match_querystring=match_querystring)
def mock_oauth(self):
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+'oauth/token',
'body': {
"access_token": "67b036bd007c40378d4be5a934f197e6",
"token_type": "Bearer",
"expires_in": 3600
},
'status': 200
})
def mock_natural_user(self):
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/natural',
'body': get_fixture('natural_user') % today_timestamp,
'status': 200
})
def mock_legal_user(self):
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/legal',
'body': get_fixture('legal_user') % today_timestamp,
'status': 200
})
def mock_user_wallet(self):
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets',
'body': get_fixture('user_wallet'),
'status': 200
})
def mock_natural_user_wallet(self):
self.register_mock([
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets',
'body': get_fixture('natural_user_wallet'),
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets/1169420',
'body': get_fixture('natural_user_wallet'),
'status': 200
}])
def mock_legal_user_wallet(self):
return self.register_mock([
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets',
'body': get_fixture('legal_user_wallet'),
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets/1169421',
'body': get_fixture('legal_user_wallet'),
'status': 200
}])
def mock_natural_user_wallet_9(self):
self.register_mock([
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets',
'body': get_fixture('natural_user_wallet_9'),
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets/1169420',
'body': get_fixture('natural_user_wallet_9'),
'status': 200
}])
def mock_legal_user_wallet_89(self):
self.register_mock([
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets',
'body': get_fixture('legal_user_wallet_89'),
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets/1169421',
'body': get_fixture('legal_user_wallet_89'),
'status': 200
}])
def mock_legal_user_wallet_99(self):
self.register_mock([
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets',
'body': get_fixture('legal_user_wallet_99'),
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/wallets/1169421',
'body': get_fixture('legal_user_wallet_99'),
'status': 200
}])
def mock_card(self):
self.register_mock([
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/cardregistrations',
'body': get_fixture('cardregistrations'),
'status': 200
},
{
'method': responses.PUT,
'url': re.compile(r''+settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/cardregistrations/\d+'),
'body': get_fixture('cardregistrations_update'),
'status': 200
},
{
'method': responses.GET,
'url': re.compile(r''+settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/cards/\d+'),
'body': get_fixture('card'),
'status': 200
}])
def mock_tokenization_request(self):
self.register_mock({
'method': responses.POST,
'url': 'https://homologation-webpayment.payline.com/webpayment/getToken',
'body': "data=gcpSOxwNHZutpFWmFCAYQu1kk25qPfJFdPaHT9kM3gKumDF3GeqSw8f-k8nh-s5OC3GNnhGoFONuAyg1RZQW6rVXooQ_ysKsz09HxQFEJfb-6H4zbY2Nnp1TliwkEFi4",
'status': 200
})
def mock_user_list_full(self):
self.register_mock({
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users',
'body': get_fixture('user_list_full'),
'status': 200,
'match_querystring': True
})
def mock_user_list_2_per_page_page1(self):
self.register_mock({
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users?page=1&per_page=2',
'body': get_fixture('user_list_2_per_page_page1'),
'status': 200,
'match_querystring': True
})
def mock_user_list_3_per_page_page2(self):
self.register_mock({
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users?page=2&per_page=3',
'body': get_fixture('user_list_3_per_page_page2'),
'status': 200,
'match_querystring': True
})
def mock_user_list_page1(self):
self.register_mock({
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users?page=1',
'body': get_fixture('user_list_page1'),
'status': 200,
'match_querystring': True
})
def mock_user_list_2_per_page(self):
self.register_mock({
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users?per_page=2',
'body': get_fixture('user_list_2_per_page'),
'status': 200,
'match_querystring': True
})
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-13 23:50
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Lookup', '0001_initial'),
('Relationships', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Citations',
fields=[
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('citationid', models.AutoField(db_column='citationid', primary_key=True, serialize=False)),
('referencetext', models.CharField(blank=True, db_column='referencetext', max_length=50, null=True, verbose_name='description')),
('authorprimary', models.CharField(blank=True, db_column='authorprimary', max_length=255, null=True, verbose_name='primary author')),
('authorsecondary', models.CharField(blank=True, db_column='authorsecondary', max_length=255, null=True, verbose_name='secondary author')),
('placeofinterview', models.CharField(blank=True, db_column='placeofinterview', max_length=255, null=True, verbose_name='place of interview')),
('year', models.IntegerField(blank=True, db_column='year', null=True)),
('title', models.TextField(blank=True, db_column='title', null=True)),
('seriestitle', models.CharField(blank=True, db_column='seriestitle', max_length=255, null=True, verbose_name='series title')),
('seriesvolume', models.CharField(blank=True, db_column='seriesvolume', max_length=50, null=True, verbose_name='series volume')),
('serieseditor', models.CharField(blank=True, db_column='serieseditor', max_length=255, null=True, verbose_name='series editor')),
('publisher', models.CharField(blank=True, db_column='publisher', max_length=100, null=True)),
('publishercity', models.CharField(blank=True, db_column='publishercity', max_length=255, null=True, verbose_name='city')),
('preparedfor', models.CharField(blank=True, db_column='preparedfor', max_length=100, null=True, verbose_name='prepared_for')),
('comments', models.TextField(blank=True, db_column='comments', null=True)),
('authortype', models.ForeignKey(blank=True, db_column='authortype', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupAuthorType', verbose_name='author type')),
('intervieweeid', models.ForeignKey(blank=True, db_column='intervieweeid', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='interviewee', to='Lookup.People', verbose_name='interviewee')),
('interviewerid', models.ForeignKey(blank=True, db_column='interviewerid', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='interviewer', to='Lookup.People', verbose_name='interviewer')),
('referencetype', models.ForeignKey(db_column='referencetype', help_text='Select a reference type to continue', max_length=255, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupReferenceType', verbose_name='reference type')),
],
options={
'db_table': 'citations',
'verbose_name': 'Bibliographic Source',
'managed': True,
'verbose_name_plural': 'Bibliographic Sources',
},
),
migrations.CreateModel(
name='Locality',
fields=[
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('localityid', models.AutoField(db_column='localityid', primary_key=True, serialize=False)),
('englishname', models.CharField(blank=True, db_column='englishname', max_length=255, null=True, verbose_name='english name')),
('indigenousname', models.CharField(blank=True, db_column='indigenousname', max_length=255, null=True, verbose_name='indigenous name')),
('geometry', django.contrib.gis.db.models.fields.GeometryField(blank=True, default=None, null=True, srid=3857, verbose_name='Place Geometry')),
('Source', models.CharField(blank=True, db_column='source', default=None, max_length=255, null=True, verbose_name='source')),
('DigitizedBy', models.CharField(blank=True, db_column='digitizedby', default=None, max_length=255, null=True, verbose_name='digitized by')),
('DigitizedDate', models.DateTimeField(blank=True, db_column='digitizeddate', default=None, null=True, verbose_name='digitized date')),
('localitytype', models.ForeignKey(blank=True, db_column='localitytype', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupLocalityType', verbose_name='type')),
],
options={
'db_table': 'locality',
'verbose_name': 'Locality',
'managed': True,
'verbose_name_plural': 'Localities',
},
),
migrations.CreateModel(
name='Media',
fields=[
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('mediaid', models.AutoField(db_column='mediaid', primary_key=True, serialize=False)),
('medianame', models.CharField(blank=True, db_column='medianame', max_length=255, null=True, verbose_name='name')),
('mediadescription', models.TextField(blank=True, db_column='mediadescription', null=True, verbose_name='description')),
('medialink', models.CharField(blank=True, db_column='medialink', max_length=255, null=True, verbose_name='historic location')),
('mediafile', models.FileField(blank=True, db_column='mediafile', max_length=255, null=True, upload_to='', verbose_name='file')),
('limitedaccess', models.NullBooleanField(db_column='limitedaccess', default=False, verbose_name='limited access?')),
('mediatype', models.ForeignKey(blank=True, db_column='mediatype', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupMediaType', verbose_name='type')),
],
options={
'db_table': 'media',
'verbose_name': 'Medium',
'managed': True,
'verbose_name_plural': 'Media',
},
),
migrations.CreateModel(
name='Places',
fields=[
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('placeid', models.AutoField(db_column='placeid', primary_key=True, serialize=False)),
('indigenousplacename', models.CharField(blank=True, db_column='indigenousplacename', max_length=255, null=True, verbose_name='indigenous name')),
('indigenousplacenamemeaning', models.CharField(blank=True, db_column='indigenousplacenamemeaning', max_length=255, null=True, verbose_name='english translation')),
('englishplacename', models.CharField(blank=True, db_column='englishplacename', max_length=255, null=True, verbose_name='english name')),
('islocked', models.BooleanField(db_column='islocked', default=False, verbose_name='locked?')),
('geometry', django.contrib.gis.db.models.fields.GeometryField(blank=True, default=None, null=True, srid=3857, verbose_name='Place Geometry')),
('Source', models.CharField(blank=True, db_column='source', default=None, max_length=255, null=True, verbose_name='source')),
('DigitizedBy', models.CharField(blank=True, db_column='digitizedby', default=None, max_length=255, null=True, verbose_name='digitized by')),
('DigitizedDate', models.DateTimeField(blank=True, db_column='digitizeddate', default=None, null=True, verbose_name='digitized date')),
('planningunitid', models.ForeignKey(blank=True, db_column='planningunitid', null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupPlanningUnit', verbose_name='planning unit')),
('primaryhabitat', models.ForeignKey(blank=True, db_column='primaryhabitat', max_length=100, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupHabitat', verbose_name='primary habitat')),
('tribeid', models.ForeignKey(blank=True, db_column='tribeid', null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupTribe', verbose_name='tribe')),
],
options={
'db_table': 'places',
'verbose_name': 'Place',
'managed': True,
'verbose_name_plural': 'Places',
},
),
migrations.CreateModel(
name='Resources',
fields=[
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('resourceid', models.AutoField(db_column='resourceid', primary_key=True, serialize=False)),
('commonname', models.CharField(blank=True, db_column='commonname', max_length=255, null=True, unique=True, verbose_name='common name')),
('indigenousname', models.CharField(blank=True, db_column='indigenousname', max_length=255, null=True, verbose_name='indigenous name')),
('genus', models.CharField(blank=True, db_column='genus', max_length=255, null=True, verbose_name='genus')),
('species', models.CharField(blank=True, db_column='species', max_length=255, null=True)),
('specific', models.BooleanField(db_column='specific', default=False)),
('islocked', models.BooleanField(db_column='islocked', default=False, verbose_name='locked?')),
('resourceclassificationgroup', models.ForeignKey(blank=True, db_column='resourceclassificationgroup', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupResourceGroup', verbose_name='broad species group')),
],
options={
'db_table': 'resources',
'verbose_name': 'Resource',
'managed': True,
'verbose_name_plural': 'Resources',
},
),
migrations.CreateModel(
name='ResourcesActivityEvents',
fields=[
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('resourceactivityid', models.AutoField(db_column='resourceactivityid', primary_key=True, serialize=False)),
('relationshipdescription', models.TextField(blank=True, db_column='relationshipdescription', null=True, verbose_name='excerpt')),
('activitylongdescription', models.TextField(blank=True, db_column='activitylongdescription', null=True, verbose_name='full activity description')),
('gear', models.CharField(blank=True, db_column='gear', max_length=255, null=True)),
('customaryuse', models.CharField(blank=True, db_column='customaryuse', max_length=255, null=True, verbose_name='customary use')),
('timingdescription', models.CharField(blank=True, db_column='timingdescription', max_length=255, null=True, verbose_name='timing description')),
('islocked', models.BooleanField(db_column='islocked', default=False, verbose_name='locked?')),
('activityshortdescription', models.ForeignKey(blank=True, db_column='activityshortdescription', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupActivity', verbose_name='activity type')),
('participants', models.ForeignKey(blank=True, db_column='participants', max_length=50, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupParticipants')),
('partused', models.ForeignKey(blank=True, db_column='partused', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupPartUsed', verbose_name='part used')),
('placeresourceid', models.ForeignKey(db_column='placeresourceid', on_delete=django.db.models.deletion.CASCADE, to='Relationships.PlacesResourceEvents', verbose_name='place resource')),
('technique', models.ForeignKey(blank=True, db_column='technique', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupTechniques')),
('timing', models.ForeignKey(blank=True, db_column='timing', max_length=255, null=True, on_delete=django.db.models.deletion.CASCADE, to='Lookup.LookupTiming')),
],
options={
'db_table': 'resourcesactivityevents',
'verbose_name': 'Activity',
'managed': True,
'verbose_name_plural': 'Activities',
},
),
migrations.AddField(
model_name='locality',
name='placeid',
field=models.ForeignKey(blank=True, db_column='placeid', null=True, on_delete=django.db.models.deletion.CASCADE, to='TEKDB.Places', verbose_name='place'),
),
]
|
|
"""Recurrence Operators"""
from __future__ import print_function, division
from sympy import symbols, Symbol, S
from sympy.printing import sstr
from sympy.core.compatibility import range, string_types
from sympy.core.sympify import sympify
def RecurrenceOperators(base, generator):
"""
Returns an Algebra of Recurrence Operators and the operator for
shifting i.e. the `Sn` operator.
The first argument needs to be the base polynomial ring for the algebra
and the second argument must be a generator which can be either a
noncommutative Symbol or a string.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.recurrence import RecurrenceOperators
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
"""
ring = RecurrenceOperatorAlgebra(base, generator)
return (ring, ring.shift_operator)
class RecurrenceOperatorAlgebra(object):
"""
A Recurrence Operator Algebra is a set of noncommutative polynomials
in intermediate `Sn` and coefficients in a base ring A. It follows the
commutation rule:
Sn * a(n) = a(n + 1) * Sn
This class represents a Recurrence Operator Algebra and serves as the parent ring
for Recurrence Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.recurrence import RecurrenceOperators
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
>>> R
Univariate Recurrence Operator Algebra in intermediate Sn over the base ring
ZZ[n]
See Also
========
RecurrenceOperator
"""
def __init__(self, base, generator):
# the base ring for the algebra
self.base = base
# the operator representing shift i.e. `Sn`
self.shift_operator = RecurrenceOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = symbols('Sn', commutative=False)
else:
if isinstance(generator, string_types):
self.gen_symbol = symbols(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Recurrence Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
def _add_lists(list1, list2):
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
class RecurrenceOperator(object):
"""
The Recurrence Operators are defined by a list of polynomials
in the base ring and the parent ring of the Operator.
Takes a list of polynomials for each power of Sn and the
parent ring which must be an instance of RecurrenceOperatorAlgebra.
A Recurrence Operator can be created easily using
the operator `Sn`. See examples below.
Examples
========
>>> from sympy.holonomic.recurrence import RecurrenceOperator, RecurrenceOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n),'Sn')
>>> RecurrenceOperator([0, 1, n**2], R)
(1)Sn + (n**2)Sn**2
>>> Sn*n
(n + 1)Sn
>>> n*Sn*n + 1 - Sn**2*n
(1) + (n**2 + n)Sn + (-n - 2)Sn**2
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
# the parent ring for this operator
# must be an RecurrenceOperatorAlgebra object
self.parent = parent
# sequence of polynomials in n for each power of Sn
# represents the operator
# convert the expressions into ring elements using from_sympy
if isinstance(list_of_poly, list):
for i, j in enumerate(list_of_poly):
if isinstance(j, int):
list_of_poly[i] = self.parent.base.from_sympy(S(j))
elif not isinstance(j, self.parent.base.dtype):
list_of_poly[i] = self.parent.base.from_sympy(j)
self.listofpoly = list_of_poly
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two Operators and returns another
RecurrenceOperator instance using the commutation rule
Sn * a(n) = a(n + 1) * Sn
"""
listofself = self.listofpoly
base = self.parent.base
if not isinstance(other, RecurrenceOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiply a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Sn^i * b
def _mul_Sni_b(b):
sol = [base.zero]
if isinstance(b, list):
for i in b:
j = base.to_sympy(i).subs(base.gens[0], base.gens[0] + S.One)
sol.append(base.from_sympy(j))
else:
j = b.subs(base.gens[0], base.gens[0] + S.One)
sol.append(base.from_sympy(j))
return sol
for i in range(1, len(listofself)):
# find Sn^i * b in ith iteration
listofother = _mul_Sni_b(listofother)
# solution = solution + listofself[i] * (Sn^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return RecurrenceOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, RecurrenceOperator):
if isinstance(other, int):
other = S(other)
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(other)
sol = []
for j in self.listofpoly:
sol.append(other * j)
return RecurrenceOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, RecurrenceOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return RecurrenceOperator(sol, self.parent)
else:
if isinstance(other, int):
other = S(other)
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(other)]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return RecurrenceOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return RecurrenceOperator([self.parent.base.one], self.parent)
# if self is `Sn`
if self.listofpoly == self.parent.shift_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return RecurrenceOperator(sol, self.parent)
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')Sn'
continue
print_str += '(' + sstr(j) + ')' + 'Sn**' + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, RecurrenceOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in self.listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
class HolonomicSequence(object):
"""
A Holonomic Sequence is a type of sequence satisfying a linear homogeneous
recurrence relation with Polynomial coefficients. Alternatively, A sequence
is Holonomic if and only if its generating function is a Holonomic Function.
"""
def __init__(self, recurrence, u0=[]):
self.recurrence = recurrence
if not isinstance(u0, list):
self.u0 = [u0]
else:
self.u0 = u0
if len(self.u0) == 0:
self._have_init_cond = False
else:
self._have_init_cond = True
self.n = recurrence.parent.base.gens[0]
def __repr__(self):
str_sol = 'HolonomicSequence(%s, %s)' % ((self.recurrence).__repr__(), sstr(self.n))
if not self._have_init_cond:
return str_sol
else:
cond_str = ''
seq_str = 0
for i in self.u0:
cond_str += ', u(%s) = %s' % (sstr(seq_str), sstr(i))
seq_str += 1
sol = str_sol + cond_str
return sol
__str__ = __repr__
def __eq__(self, other):
if self.recurrence == other.recurrence:
if self.n == other.n:
if self._have_init_cond and other._have_init_cond:
if self.u0 == other.u0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
|
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import re
import os
import json
import shutil
import tempfile
import time
import unittest
import itertools
import urllib
import mock
import flask
from gevent import monkey
monkey.patch_all()
from bs4 import BeautifulSoup
import PIL.Image
from urlparse import urlparse
from cStringIO import StringIO
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
import digits.webapp
import digits.test_views
import digits.dataset.images.classification.test_views
from digits.config import config_value
# May be too short on a slow system
TIMEOUT_DATASET = 15
TIMEOUT_MODEL = 20
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
"""
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
return cls.CAFFE_NETWORK
class BaseViewsTestWithDataset(BaseViewsTest,
digits.dataset.images.classification.test_views.BaseViewsTestWithDataset):
"""
Provides a dataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithDataset, cls).tearDownClass()
@classmethod
def create_model(cls, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
data = {
'model_name': 'test_model',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': cls.network(),
'batch_size': 10,
'train_epochs': 1,
'framework' : cls.FRAMEWORK
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data)
div = s.select('div.alert-danger')
if div:
raise RuntimeError(div[0])
else:
raise RuntimeError('Failed to create model')
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithModel(BaseViewsTestWithDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithModel, cls).setUpClass()
cls.model_id = cls.create_model(json=True)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework='+self.FRAMEWORK,
data = {'custom_network': self.network()}
)
s = BeautifulSoup(rv.data)
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
image = s.select('img')
assert image is not None, "didn't return an image"
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe_root')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe_root')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i+1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
class BaseTestCreated(BaseViewsTestWithModel):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_models_page(self):
rv = self.app.get('/models', follow_redirects=True)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'Models' in rv.data, 'unexpected page format'
def test_model_json(self):
rv = self.app.get('/models/%s.json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'id %s != %s' % (content['id'], self.model_id)
assert content['dataset_id'] == self.dataset_id, 'dataset_id %s != %s' % (content['dataset_id'], self.dataset_id)
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_classify_one(self):
category = self.imageset_paths.keys()[0]
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data)
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_json(self):
category = self.imageset_paths.keys()[0]
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one.json?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['predictions'][0][0] == category, 'image misclassified'
def test_classify_many(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data)
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_json(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many.json?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'classifications' in data, 'invalid response'
def test_top_n(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data)
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
model_id = self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedWide(BaseTestCreated):
IMAGE_WIDTH = 20
class BaseTestCreatedTall(BaseTestCreated):
IMAGE_HEIGHT = 20
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
"""
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews):
FRAMEWORK = 'caffe'
class TestCaffeCreation(BaseTestCreation):
FRAMEWORK = 'caffe'
class TestCaffeCreated(BaseTestCreated):
FRAMEWORK = 'caffe'
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'caffe'
class TestCaffeCreatedWide(BaseTestCreatedWide):
FRAMEWORK = 'caffe'
class TestCaffeCreatedTall(BaseTestCreatedTall):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'caffe'
class TestCaffeLeNet(TestCaffeCreated):
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
CAFFE_NETWORK=open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'caffe', 'lenet.prototxt')
).read()
|
|
"""
Giving models a custom manager
You can use a custom ``Manager`` in a particular model by extending the base
``Manager`` class and instantiating your custom ``Manager`` in your model.
There are two reasons you might want to customize a ``Manager``: to add extra
``Manager`` methods, and/or to modify the initial ``QuerySet`` the ``Manager``
returns.
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
class PublishedBookManager(models.Manager):
def get_queryset(self):
return super(PublishedBookManager, self).get_queryset().filter(is_published=True)
class CustomQuerySet(models.QuerySet):
def filter(self, *args, **kwargs):
queryset = super(CustomQuerySet, self).filter(fun=True)
queryset._filter_CustomQuerySet = True
return queryset
def public_method(self, *args, **kwargs):
return self.all()
def _private_method(self, *args, **kwargs):
return self.all()
def optout_public_method(self, *args, **kwargs):
return self.all()
optout_public_method.queryset_only = True
def _optin_private_method(self, *args, **kwargs):
return self.all()
_optin_private_method.queryset_only = False
class BaseCustomManager(models.Manager):
def __init__(self, arg):
super(BaseCustomManager, self).__init__()
self.init_arg = arg
def filter(self, *args, **kwargs):
queryset = super(BaseCustomManager, self).filter(fun=True)
queryset._filter_CustomManager = True
return queryset
def manager_only(self):
return self.all()
CustomManager = BaseCustomManager.from_queryset(CustomQuerySet)
class CustomInitQuerySet(models.QuerySet):
# QuerySet with an __init__() method that takes an additional argument.
def __init__(self, custom_optional_arg=None, model=None, query=None, using=None, hints=None):
super(CustomInitQuerySet, self).__init__(model=model, query=query, using=using, hints=hints)
class DeconstructibleCustomManager(BaseCustomManager.from_queryset(CustomQuerySet)):
def __init__(self, a, b, c=1, d=2):
super(DeconstructibleCustomManager, self).__init__(a)
class FunPeopleManager(models.Manager):
def get_queryset(self):
return super(FunPeopleManager, self).get_queryset().filter(fun=True)
class BoringPeopleManager(models.Manager):
def get_queryset(self):
return super(BoringPeopleManager, self).get_queryset().filter(fun=False)
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=False)
favorite_book = models.ForeignKey('Book', models.SET_NULL, null=True, related_name='favorite_books')
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = PersonManager()
fun_people = FunPeopleManager()
boring_people = BoringPeopleManager()
custom_queryset_default_manager = CustomQuerySet.as_manager()
custom_queryset_custom_manager = CustomManager('hello')
custom_init_queryset_manager = CustomInitQuerySet.as_manager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class FunPerson(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=True)
favorite_book = models.ForeignKey(
'Book',
models.SET_NULL,
null=True,
related_name='fun_people_favorite_books',
)
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = FunPeopleManager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField(default=False)
published_objects = PublishedBookManager()
authors = models.ManyToManyField(Person, related_name='books')
fun_authors = models.ManyToManyField(FunPerson, related_name='books')
favorite_things = GenericRelation(Person,
content_type_field='favorite_thing_type', object_id_field='favorite_thing_id')
fun_people_favorite_things = GenericRelation(FunPerson,
content_type_field='favorite_thing_type', object_id_field='favorite_thing_id')
def __str__(self):
return self.title
class FastCarManager(models.Manager):
def get_queryset(self):
return super(FastCarManager, self).get_queryset().filter(top_speed__gt=150)
@python_2_unicode_compatible
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
def __str__(self):
return self.name
class RestrictedManager(models.Manager):
def get_queryset(self):
return super(RestrictedManager, self).get_queryset().filter(is_public=True)
@python_2_unicode_compatible
class RelatedModel(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class RestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.ForeignKey(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class OneToOneRestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.OneToOneField(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
class AbstractPerson(models.Model):
abstract_persons = models.Manager()
objects = models.CharField(max_length=30)
class Meta:
abstract = True
class PersonFromAbstract(AbstractPerson):
pass
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# ext4slower Trace slow ext4 operations.
# For Linux, uses BCC, eBPF.
#
# USAGE: ext4slower [-h] [-j] [-p PID] [min_ms]
#
# This script traces common ext4 file operations: reads, writes, opens, and
# syncs. It measures the time spent in these operations, and prints details
# for each that exceeded a threshold.
#
# WARNING: This adds low-overhead instrumentation to these ext4 operations,
# including reads and writes from the file system cache. Such reads and writes
# can be very frequent (depending on the workload; eg, 1M/sec), at which
# point the overhead of this tool (even if it prints no "slower" events) can
# begin to become significant.
#
# By default, a minimum millisecond threshold of 10 is used.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 11-Feb-2016 Brendan Gregg Created this.
# 15-Oct-2016 Dina Goldshtein -p to filter by process ID.
# 13-Jun-2018 Joe Yin modify generic_file_read_iter to ext4_file_read_iter.
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
# symbols
kallsyms = "/proc/kallsyms"
# arguments
examples = """examples:
./ext4slower # trace operations slower than 10 ms (default)
./ext4slower 1 # trace operations slower than 1 ms
./ext4slower -j 1 # ... 1 ms, parsable output (csv)
./ext4slower 0 # trace all operations (warning: verbose)
./ext4slower -p 185 # trace PID 185 only
"""
parser = argparse.ArgumentParser(
description="Trace common ext4 file operations slower than a threshold",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-j", "--csv", action="store_true",
help="just print fields: comma-separated values")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("min_ms", nargs="?", default='10',
help="minimum I/O duration to trace, in ms (default 10)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
min_ms = int(args.min_ms)
pid = args.pid
csv = args.csv
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/dcache.h>
// XXX: switch these to char's when supported
#define TRACE_READ 0
#define TRACE_WRITE 1
#define TRACE_OPEN 2
#define TRACE_FSYNC 3
struct val_t {
u64 ts;
u64 offset;
struct file *fp;
};
struct data_t {
// XXX: switch some to u32's when supported
u64 ts_us;
u64 type;
u64 size;
u64 offset;
u64 delta_us;
u64 pid;
char task[TASK_COMM_LEN];
char file[DNAME_INLINE_LEN];
};
BPF_HASH(entryinfo, u64, struct val_t);
BPF_PERF_OUTPUT(events);
//
// Store timestamp and size on entry
//
// The current ext4 (Linux 4.5) uses generic_file_read_iter(), instead of it's
// own function, for reads. So we need to trace that and then filter on ext4,
// which I do by checking file->f_op.
// The new Linux version (since form 4.10) uses ext4_file_read_iter(), And if the 'CONFIG_FS_DAX'
// is not set ,then ext4_file_read_iter() will call generic_file_read_iter(), else it will call
// ext4_dax_read_iter(), and trace generic_file_read_iter() will fail.
int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// ext4 filter on file->f_op == ext4_file_operations
struct file *fp = iocb->ki_filp;
if ((u64)fp->f_op != EXT4_FILE_OPERATIONS)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = fp;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// ext4_file_write_iter():
int trace_write_entry(struct pt_regs *ctx, struct kiocb *iocb)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = iocb->ki_filp;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// ext4_file_open():
int trace_open_entry(struct pt_regs *ctx, struct inode *inode,
struct file *file)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// ext4_sync_file():
int trace_fsync_entry(struct pt_regs *ctx, struct file *file)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
//
// Output
//
static int trace_return(struct pt_regs *ctx, int type)
{
struct val_t *valp;
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
valp = entryinfo.lookup(&id);
if (valp == 0) {
// missed tracing issue or filtered
return 0;
}
// calculate delta
u64 ts = bpf_ktime_get_ns();
u64 delta_us = (ts - valp->ts) / 1000;
entryinfo.delete(&id);
if (FILTER_US)
return 0;
// populate output struct
u32 size = PT_REGS_RC(ctx);
struct data_t data = {.type = type, .size = size, .delta_us = delta_us,
.pid = pid};
data.ts_us = ts / 1000;
data.offset = valp->offset;
bpf_get_current_comm(&data.task, sizeof(data.task));
// workaround (rewriter should handle file to d_name in one step):
struct dentry *de = NULL;
struct qstr qs = {};
de = valp->fp->f_path.dentry;
qs = de->d_name;
if (qs.len == 0)
return 0;
bpf_probe_read_kernel(&data.file, sizeof(data.file), (void *)qs.name);
// output
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
int trace_read_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_READ);
}
int trace_write_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_WRITE);
}
int trace_open_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_OPEN);
}
int trace_fsync_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_FSYNC);
}
"""
# code replacements
with open(kallsyms) as syms:
ops = ''
for line in syms:
(addr, size, name) = line.rstrip().split(" ", 2)
name = name.split("\t")[0]
if name == "ext4_file_operations":
ops = "0x" + addr
break
if ops == '':
print("ERROR: no ext4_file_operations in /proc/kallsyms. Exiting.")
print("HINT: the kernel should be built with CONFIG_KALLSYMS_ALL.")
exit()
bpf_text = bpf_text.replace('EXT4_FILE_OPERATIONS', ops)
if min_ms == 0:
bpf_text = bpf_text.replace('FILTER_US', '0')
else:
bpf_text = bpf_text.replace('FILTER_US',
'delta_us <= %s' % str(min_ms * 1000))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
type = 'R'
if event.type == 1:
type = 'W'
elif event.type == 2:
type = 'O'
elif event.type == 3:
type = 'S'
if (csv):
print("%d,%s,%d,%s,%d,%d,%d,%s" % (
event.ts_us, event.task.decode('utf-8', 'replace'), event.pid,
type, event.size, event.offset, event.delta_us,
event.file.decode('utf-8', 'replace')))
return
print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"),
event.task.decode('utf-8', 'replace'), event.pid, type, event.size,
event.offset / 1024, float(event.delta_us) / 1000,
event.file.decode('utf-8', 'replace')))
# initialize BPF
b = BPF(text=bpf_text)
# Common file functions. See earlier comment about generic_file_read_iter().
if BPF.get_kprobe_functions(b'ext4_file_read_iter'):
b.attach_kprobe(event="ext4_file_read_iter", fn_name="trace_read_entry")
else:
b.attach_kprobe(event="generic_file_read_iter", fn_name="trace_read_entry")
b.attach_kprobe(event="ext4_file_write_iter", fn_name="trace_write_entry")
b.attach_kprobe(event="ext4_file_open", fn_name="trace_open_entry")
b.attach_kprobe(event="ext4_sync_file", fn_name="trace_fsync_entry")
if BPF.get_kprobe_functions(b'ext4_file_read_iter'):
b.attach_kretprobe(event="ext4_file_read_iter", fn_name="trace_read_return")
else:
b.attach_kretprobe(event="generic_file_read_iter", fn_name="trace_read_return")
b.attach_kretprobe(event="ext4_file_write_iter", fn_name="trace_write_return")
b.attach_kretprobe(event="ext4_file_open", fn_name="trace_open_return")
b.attach_kretprobe(event="ext4_sync_file", fn_name="trace_fsync_return")
# header
if (csv):
print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
else:
if min_ms == 0:
print("Tracing ext4 operations")
else:
print("Tracing ext4 operations slower than %d ms" % min_ms)
print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME", "COMM", "PID", "T",
"BYTES", "OFF_KB", "LAT(ms)", "FILENAME"))
# read events
b["events"].open_perf_buffer(print_event, page_cnt=64)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
#!/usr/bin/env python
# encoding: utf-8
"""Wrapper functionality around the functions we need from Vim."""
import re
import vim # pylint:disable=import-error
from vim import error # pylint:disable=import-error,unused-import
from UltiSnips.compatibility import col2byte, byte2col, \
as_unicode, as_vimencoding
from UltiSnips.position import Position
from contextlib import contextmanager
class VimBuffer(object):
"""Wrapper around the current Vim buffer."""
def __getitem__(self, idx):
if isinstance(idx, slice): # Py3
return self.__getslice__(idx.start, idx.stop)
rv = vim.current.buffer[idx]
return as_unicode(rv)
def __getslice__(self, i, j): # pylint:disable=no-self-use
rv = vim.current.buffer[i:j]
return [as_unicode(l) for l in rv]
def __setitem__(self, idx, text):
if isinstance(idx, slice): # Py3
return self.__setslice__(idx.start, idx.stop, text)
vim.current.buffer[idx] = as_vimencoding(text)
def __setslice__(self, i, j, text): # pylint:disable=no-self-use
vim.current.buffer[i:j] = [as_vimencoding(l) for l in text]
def __len__(self):
return len(vim.current.buffer)
@property
def line_till_cursor(self): # pylint:disable=no-self-use
"""Returns the text before the cursor."""
_, col = self.cursor
return as_unicode(vim.current.line)[:col]
@property
def number(self): # pylint:disable=no-self-use
"""The bufnr() of the current buffer."""
return vim.current.buffer.number
@property
def cursor(self): # pylint:disable=no-self-use
"""The current windows cursor.
Note that this is 0 based in col and 0 based in line which is
different from Vim's cursor.
"""
line, nbyte = vim.current.window.cursor
col = byte2col(line, nbyte)
return Position(line - 1, col)
@cursor.setter
def cursor(self, pos): # pylint:disable=no-self-use
"""See getter."""
nbyte = col2byte(pos.line + 1, pos.col)
vim.current.window.cursor = pos.line + 1, nbyte
buf = VimBuffer() # pylint:disable=invalid-name
@contextmanager
def toggle_opt(name, new_value):
old_value = eval('&' + name)
command('set {0}={1}'.format(name, new_value))
try:
yield
finally:
command('set {0}={1}'.format(name, old_value))
@contextmanager
def save_mark(name):
old_pos = get_mark_pos(name)
try:
yield
finally:
if _is_pos_zero(old_pos):
delete_mark(name)
else:
set_mark_from_pos(name, old_pos)
def escape(inp):
"""Creates a vim-friendly string from a group of
dicts, lists and strings."""
def conv(obj):
"""Convert obj."""
if isinstance(obj, list):
rv = as_unicode('[' + ','.join(conv(o) for o in obj) + ']')
elif isinstance(obj, dict):
rv = as_unicode('{' + ','.join([
'%s:%s' % (conv(key), conv(value))
for key, value in obj.iteritems()]) + '}')
else:
rv = as_unicode('"%s"') % as_unicode(obj).replace('"', '\\"')
return rv
return conv(inp)
def command(cmd):
"""Wraps vim.command."""
return as_unicode(vim.command(as_vimencoding(cmd)))
def eval(text):
"""Wraps vim.eval."""
rv = vim.eval(as_vimencoding(text))
if not isinstance(rv, (dict, list)):
return as_unicode(rv)
return rv
def feedkeys(keys, mode='n'):
"""Wrapper around vim's feedkeys function.
Mainly for convenience.
"""
if eval('mode()') == 'n':
if keys == 'a':
cursor_pos = get_cursor_pos()
cursor_pos[2] = int(cursor_pos[2]) + 1
set_cursor_from_pos(cursor_pos)
if keys in 'ai':
keys = 'startinsert'
if keys == 'startinsert':
command('startinsert')
else:
command(as_unicode(r'call feedkeys("%s", "%s")') % (keys, mode))
def new_scratch_buffer(text):
"""Create a new scratch buffer with the text given."""
vim.command('botright new')
vim.command('set ft=')
vim.command('set buftype=nofile')
vim.current.buffer[:] = text.splitlines()
feedkeys(r"\<Esc>")
def virtual_position(line, col):
"""Runs the position through virtcol() and returns the result."""
nbytes = col2byte(line, col)
return line, int(eval('virtcol([%d, %d])' % (line, nbytes)))
def select(start, end):
"""Select the span in Select mode."""
_unmap_select_mode_mapping()
selection = eval('&selection')
col = col2byte(start.line + 1, start.col)
vim.current.window.cursor = start.line + 1, col
mode = eval('mode()')
move_cmd = ''
if mode != 'n':
move_cmd += r"\<Esc>"
if start == end:
# Zero Length Tabstops, use 'i' or 'a'.
if col == 0 or mode not in 'i' and \
col < len(buf[start.line]):
move_cmd += 'i'
else:
move_cmd += 'a'
else:
# Non zero length, use Visual selection.
move_cmd += 'v'
if 'inclusive' in selection:
if end.col == 0:
move_cmd += '%iG$' % end.line
else:
move_cmd += '%iG%i|' % virtual_position(end.line + 1, end.col)
elif 'old' in selection:
move_cmd += '%iG%i|' % virtual_position(end.line + 1, end.col)
else:
move_cmd += '%iG%i|' % virtual_position(end.line + 1, end.col + 1)
move_cmd += 'o%iG%i|o\\<c-g>' % virtual_position(
start.line + 1, start.col + 1)
feedkeys(move_cmd)
def set_mark_from_pos(name, pos):
return _set_pos("'" + name, pos)
def get_mark_pos(name):
return _get_pos("'" + name)
def set_cursor_from_pos(pos):
return _set_pos('.', pos)
def get_cursor_pos():
return _get_pos('.')
def delete_mark(name):
try:
return command('delma ' + name)
except:
return False
def _set_pos(name, pos):
return eval("setpos(\"{0}\", {1})".format(name, pos))
def _get_pos(name):
return eval("getpos(\"{0}\")".format(name))
def _is_pos_zero(pos):
return ['0'] * 4 == pos or [0] == pos
def _unmap_select_mode_mapping():
"""This function unmaps select mode mappings if so wished by the user.
Removes select mode mappings that can actually be typed by the user
(ie, ignores things like <Plug>).
"""
if int(eval('g:UltiSnipsRemoveSelectModeMappings')):
ignores = eval('g:UltiSnipsMappingsToIgnore') + ['UltiSnips']
for option in ('<buffer>', ''):
# Put all smaps into a var, and then read the var
command(r"redir => _tmp_smaps | silent smap %s " % option +
'| redir END')
# Check if any mappings where found
all_maps = list(filter(len, eval(r"_tmp_smaps").splitlines()))
if len(all_maps) == 1 and all_maps[0][0] not in ' sv':
# "No maps found". String could be localized. Hopefully
# it doesn't start with any of these letters in any
# language
continue
# Only keep mappings that should not be ignored
maps = [m for m in all_maps if
not any(i in m for i in ignores) and len(m.strip())]
for map in maps:
# The first three chars are the modes, that might be listed.
# We are not interested in them here.
trig = map[3:].split()[0] if len(
map[3:].split()) != 0 else None
if trig is None:
continue
# The bar separates commands
if trig[-1] == '|':
trig = trig[:-1] + '<Bar>'
# Special ones
if trig[0] == '<':
add = False
# Only allow these
for valid in ['Tab', 'NL', 'CR', 'C-Tab', 'BS']:
if trig == '<%s>' % valid:
add = True
if not add:
continue
# UltiSnips remaps <BS>. Keep this around.
if trig == '<BS>':
continue
# Actually unmap it
try:
command('silent! sunmap %s %s' % (option, trig))
except: # pylint:disable=bare-except
# Bug 908139: ignore unmaps that fail because of
# unprintable characters. This is not ideal because we
# will not be able to unmap lhs with any unprintable
# character. If the lhs stats with a printable
# character this will leak to the user when he tries to
# type this character as a first in a selected tabstop.
# This case should be rare enough to not bother us
# though.
pass
|
|
#!/usr/bin/env python3
"""K-Means Clustering on data set """
__author__ = "Chuanping Yu"
__date__ = "Aug 6, 2017"
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import style
style.use("ggplot")
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA as sklearnPCA
from sklearn.metrics import silhouette_samples, silhouette_score
tools.set_credentials_file(username='cyugatech', api_key='k8B6qyhH9B3fujv684CX')
df = pd.read_csv(
filepath_or_buffer='https://raw.githubusercontent.com/datascienceinc/learn-data-science/master/Introduction-to-K-means-Clustering/Data/data_1024.csv',
sep='\s+')
f1 = df['Distance_Feature'].values
f1 = f1.reshape([len(f1),1])
f2 = df['Speeding_Feature'].values
f2 = f2.reshape([len(f2),1])
X = np.hstack((f1,f2))
plt.scatter(f1,f2)
plt.show()
k = 2 #2 clusters
kmeans = KMeans(n_clusters=k).fit(X)
labels = kmeans.predict(X)
centroids = kmeans.cluster_centers_
colors = ["g.","r.","c.","y."]
for i in range(len(X)):
plt.plot(X[i][0], X[i][1], colors[labels[i]], markersize = 10)
plt.scatter(centroids[:, 0],centroids[:, 1], marker = "x", color = 'b', s=150, linewidths = 5, zorder = 10)
plt.show()
k = 3 #3 clusters
kmeans = KMeans(n_clusters=k).fit(X)
labels = kmeans.predict(X)
centroids = kmeans.cluster_centers_
colors = ["g.","r.","c.","y."]
for i in range(len(X)):
plt.plot(X[i][0], X[i][1], colors[labels[i]], markersize = 10)
plt.scatter(centroids[:, 0],centroids[:, 1], marker = "x", color = 'b', s=150, linewidths = 5, zorder = 10)
plt.show()
k = 4 #4 clusters
kmeans = KMeans(n_clusters=k).fit(X)
labels = kmeans.predict(X)
centroids = kmeans.cluster_centers_
colors = ["g.","r.","c.","y."]
for i in range(len(X)):
plt.plot(X[i][0], X[i][1], colors[labels[i]], markersize = 10)
plt.scatter(centroids[:, 0],centroids[:, 1], marker = "x", color = 'b', s=150, linewidths = 5, zorder = 10)
plt.show()
#######################################################################
#Transformed data from last assignment
df = pd.read_csv(
filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/ecoli/ecoli.data',
header=None,
sep='\s+')
df.columns = ['SequenceName', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'class']
df.dropna(how="all", inplace=True) # drops the empty line at file-end
X = df.ix[:, 1:8].values
y = df.ix[:, 8].values
X_std = StandardScaler().fit_transform(X)
cov_mat = np.cov(X_std.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
sklearn_pca = sklearnPCA(n_components=5)
Y_sklearn = sklearn_pca.fit_transform(X_std)
range_n_clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10]
figures = []
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig = tools.make_subplots(rows=1, cols=2,
print_grid=False,
subplot_titles=('The silhouette plot for the various clusters.',
'The visualization of the clustered data.'))
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
fig['layout']['xaxis1'].update(title='The silhouette coefficient values',
range=[-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
fig['layout']['yaxis1'].update(title='Cluster label',
showticklabels=False,
range=[0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(Y_sklearn)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(Y_sklearn, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(Y_sklearn, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
filled_area = go.Scatter(y=np.arange(y_lower, y_upper),
x=ith_cluster_silhouette_values,
mode='lines',
showlegend=False,
line=dict(width=0.5,
color=colors),
fill='tozerox')
fig.append_trace(filled_area, 1, 1)
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
# The vertical line for average silhouette score of all the values
axis_line = go.Scatter(x=[silhouette_avg],
y=[0, len(Y_sklearn) + (n_clusters + 1) * 10],
showlegend=False,
mode='lines',
line=dict(color="red", dash='dash',
width =1) )
fig.append_trace(axis_line, 1, 1)
# 2nd Plot showing the actual clusters formed
colors = matplotlib.colors.colorConverter.to_rgb(cm.spectral(float(i) / n_clusters))
colors = 'rgb'+str(colors)
clusters = go.Scatter(x=Y_sklearn[:, 0],
y=Y_sklearn[:, 1],
showlegend=False,
mode='markers',
marker=dict(color=colors,
size=4)
)
fig.append_trace(clusters, 1, 2)
# Labeling the clusters
centers_ = clusterer.cluster_centers_
# Draw white circles at cluster centers
centers = go.Scatter(x=centers_[:, 0],
y=centers_[:, 1],
showlegend=False,
mode='markers',
marker=dict(color='green', size=10,
line=dict(color='black',
width=1))
)
fig.append_trace(centers, 1, 2)
fig['layout']['xaxis2'].update(title='Feature space for the 1st feature',
zeroline=False)
fig['layout']['yaxis2'].update(title='Feature space for the 2nd feature',
zeroline=False)
fig['layout'].update(title="Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters)
figures.append(fig)
py.iplot(figures[4])
|
|
import logging
import os
import types
from io import BytesIO, IOBase
import pickle
import string
from collections import defaultdict
import archinfo
from archinfo.arch_soot import SootAddressDescriptor, ArchSoot
import cle
from .misc.ux import deprecated
l = logging.getLogger(name=__name__)
def load_shellcode(shellcode, arch, start_offset=0, load_address=0, thumb=False, **kwargs):
"""
Load a new project based on a snippet of assembly or bytecode.
:param shellcode: The data to load, as either a bytestring of instructions or a string of assembly text
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0)
:param thumb: Whether this is ARM Thumb shellcode
"""
if not isinstance(arch, archinfo.Arch):
arch = archinfo.arch_from_id(arch)
if type(shellcode) is str:
shellcode = arch.asm(shellcode, load_address, thumb=thumb)
if thumb:
start_offset |= 1
return Project(
BytesIO(shellcode),
main_opts={
'backend': 'blob',
'arch': arch,
'entry_point': start_offset,
'base_addr': load_address,
},
**kwargs
)
class Project:
"""
This is the main class of the angr module. It is meant to contain a set of binaries and the relationships between
them, and perform analyses on them.
:param thing: The path to the main executable object to analyze, or a CLE Loader object.
The following parameters are optional.
:param default_analysis_mode: The mode of analysis to use by default. Defaults to 'symbolic'.
:param ignore_functions: A list of function names that, when imported from shared libraries, should
never be stepped into in analysis (calls will return an unconstrained value).
:param use_sim_procedures: Whether to replace resolved dependencies for which simprocedures are
available with said simprocedures.
:param exclude_sim_procedures_func: A function that, when passed a function name, returns whether or not to wrap
it with a simprocedure.
:param exclude_sim_procedures_list: A list of functions to *not* wrap with simprocedures.
:param arch: The target architecture (auto-detected otherwise).
:param simos: a SimOS class to use for this project.
:param engine: The SimEngine class to use for this project.
:param bool translation_cache: If True, cache translated basic blocks rather than re-translating them.
:param support_selfmodifying_code: Whether we aggressively support self-modifying code. When enabled, emulation
will try to read code from the current state instead of the original memory,
regardless of the current memory protections.
:type support_selfmodifying_code: bool
:param store_function: A function that defines how the Project should be stored. Default to pickling.
:param load_function: A function that defines how the Project should be loaded. Default to unpickling.
:param analyses_preset: The plugin preset for the analyses provider (i.e. Analyses instance).
:type analyses_preset: angr.misc.PluginPreset
Any additional keyword arguments passed will be passed onto ``cle.Loader``.
:ivar analyses: The available analyses.
:type analyses: angr.analysis.Analyses
:ivar entry: The program entrypoint.
:ivar factory: Provides access to important analysis elements such as path groups and symbolic execution results.
:type factory: AngrObjectFactory
:ivar filename: The filename of the executable.
:ivar loader: The program loader.
:type loader: cle.Loader
:ivar storage: Dictionary of things that should be loaded/stored with the Project.
:type storage: defaultdict(list)
"""
def __init__(self, thing,
default_analysis_mode=None,
ignore_functions=None,
use_sim_procedures=True,
exclude_sim_procedures_func=None,
exclude_sim_procedures_list=(),
arch=None, simos=None,
engine=None,
load_options=None,
translation_cache=True,
support_selfmodifying_code=False,
store_function=None,
load_function=None,
analyses_preset=None,
concrete_target=None,
**kwargs):
# Step 1: Load the binary
if load_options is None: load_options = {}
load_options.update(kwargs)
if arch is not None:
load_options.update({'arch': arch})
if isinstance(thing, cle.Loader):
if load_options:
l.warning("You provided CLE options to angr but you also provided a completed cle.Loader object!")
self.loader = thing
self.filename = self.loader.main_object.binary
elif hasattr(thing, 'read') and hasattr(thing, 'seek'):
l.info("Loading binary from stream")
self.filename = None
self.loader = cle.Loader(thing, **load_options)
elif not isinstance(thing, str) or not os.path.exists(thing) or not os.path.isfile(thing):
raise Exception("Not a valid binary file: %s" % repr(thing))
else:
# use angr's loader, provided by cle
l.info("Loading binary %s", thing)
self.filename = thing
self.loader = cle.Loader(self.filename, concrete_target=concrete_target, **load_options)
# Step 2: determine its CPU architecture, ideally falling back to CLE's guess
if isinstance(arch, str):
self.arch = archinfo.arch_from_id(arch) # may raise ArchError, let the user see this
elif isinstance(arch, archinfo.Arch):
self.arch = arch
elif arch is None:
self.arch = self.loader.main_object.arch
else:
raise ValueError("Invalid arch specification.")
# Step 3: Set some defaults and set the public and private properties
if not default_analysis_mode:
default_analysis_mode = 'symbolic'
if not ignore_functions:
ignore_functions = []
if isinstance(exclude_sim_procedures_func, types.LambdaType):
l.warning("Passing a lambda type as the exclude_sim_procedures_func argument to "
"Project causes the resulting object to be un-serializable.")
self._sim_procedures = {}
self.concrete_target = concrete_target
# It doesn't make any sense to have auto_load_libs
# if you have the concrete target, let's warn the user about this.
if self.concrete_target and load_options.get('auto_load_libs', None):
l.critical("Incompatible options selected for this project, please disable auto_load_libs if "
"you want to use a concrete target.")
raise Exception("Incompatible options for the project")
if self.concrete_target and self.arch.name not in ['X86', 'AMD64', 'ARMHF']:
l.critical("Concrete execution does not support yet the selected architecture. Aborting.")
raise Exception("Incompatible options for the project")
self._default_analysis_mode = default_analysis_mode
self._exclude_sim_procedures_func = exclude_sim_procedures_func
self._exclude_sim_procedures_list = exclude_sim_procedures_list
self.use_sim_procedures = use_sim_procedures
self._ignore_functions = ignore_functions
self._support_selfmodifying_code = support_selfmodifying_code
self._translation_cache = translation_cache
self._executing = False # this is a flag for the convenience API, exec() and terminate_execution() below
if self._support_selfmodifying_code:
if self._translation_cache is True:
self._translation_cache = False
l.warning("Disabling IRSB translation cache because support for self-modifying code is enabled.")
self.entry = self.loader.main_object.entry
self.storage = defaultdict(list)
self.store_function = store_function or self._store
self.load_function = load_function or self._load
# Step 4: Set up the project's hubs
# Step 4.1 Factory
self.factory = AngrObjectFactory(self, default_engine=engine)
# Step 4.2: Analyses
self.analyses = AnalysesHub(self)
self.analyses.use_plugin_preset(analyses_preset if analyses_preset is not None else 'default')
# Step 4.3: ...etc
self.kb = KnowledgeBase(self)
# Step 5: determine the guest OS
if isinstance(simos, type) and issubclass(simos, SimOS):
self.simos = simos(self) #pylint:disable=invalid-name
elif isinstance(simos, str):
self.simos = os_mapping[simos](self)
elif simos is None:
self.simos = os_mapping[self.loader.main_object.os](self)
else:
raise ValueError("Invalid OS specification or non-matching architecture.")
self.is_java_project = isinstance(self.arch, ArchSoot)
self.is_java_jni_project = isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support
# Step 6: Register simprocedures as appropriate for library functions
if isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support:
# If we execute a Java archive that includes native JNI libraries,
# we need to use the arch of the native simos for all (native) sim
# procedures.
sim_proc_arch = self.simos.native_arch
else:
sim_proc_arch = self.arch
for obj in self.loader.initial_load_objects:
self._register_object(obj, sim_proc_arch)
# Step 7: Run OS-specific configuration
self.simos.configure_project()
def _register_object(self, obj, sim_proc_arch):
"""
This scans through an objects imports and hooks them with simprocedures from our library whenever possible
"""
# Step 1: get the set of libraries we are allowed to use to resolve unresolved symbols
missing_libs = []
for lib_name in self.loader.missing_dependencies:
try:
missing_libs.append(SIM_LIBRARIES[lib_name])
except KeyError:
l.info("There are no simprocedures for missing library %s :(", lib_name)
# additionally provide libraries we _have_ loaded as a fallback fallback
# this helps in the case that e.g. CLE picked up a linux arm libc to satisfy an android arm binary
for lib in self.loader.all_objects:
if lib.provides in SIM_LIBRARIES:
simlib = SIM_LIBRARIES[lib.provides]
if simlib not in missing_libs:
missing_libs.append(simlib)
# Step 2: Categorize every "import" symbol in each object.
# If it's IGNORED, mark it for stubbing
# If it's blacklisted, don't process it
# If it matches a simprocedure we have, replace it
for reloc in obj.imports.values():
# Step 2.1: Quick filter on symbols we really don't care about
func = reloc.symbol
if func is None:
continue
if not func.is_function and func.type != cle.backends.symbol.SymbolType.TYPE_NONE:
continue
if func.resolvedby is None:
# I don't understand the binary which made me add this case. If you are debugging and see this comment,
# good luck.
# ref: https://github.com/angr/angr/issues/1782
# (I also don't know why the TYPE_NONE check in the previous clause is there but I can't find a ref for
# that. they are probably related.)
continue
if not reloc.resolved:
# This is a hack, effectively to support Binary Ninja, which doesn't provide access to dependency
# library names. The backend creates the Relocation objects, but leaves them unresolved so that
# we can try to guess them here. Once the Binary Ninja API starts supplying the dependencies,
# The if/else, along with Project._guess_simprocedure() can be removed if it has no other utility,
# just leave behind the 'unresolved' debug statement from the else clause.
if reloc.owner.guess_simprocs:
l.debug("Looking for matching SimProcedure for unresolved %s from %s with hint %s",
func.name, reloc.owner, reloc.owner.guess_simprocs_hint)
self._guess_simprocedure(func, reloc.owner.guess_simprocs_hint)
else:
l.debug("Ignoring unresolved import '%s' from %s ...?", func.name, reloc.owner)
continue
export = reloc.resolvedby
if self.is_hooked(export.rebased_addr):
l.debug("Already hooked %s (%s)", export.name, export.owner)
continue
# Step 2.2: If this function has been resolved by a static dependency,
# check if we actually can and want to replace it with a SimProcedure.
# We opt out of this step if it is blacklisted by ignore_functions, which
# will cause it to be replaced by ReturnUnconstrained later.
if export.owner is not self.loader._extern_object and \
export.name not in self._ignore_functions:
if self._check_user_blacklists(export.name):
continue
owner_name = export.owner.provides
if isinstance(self.loader.main_object, cle.backends.pe.PE):
owner_name = owner_name.lower()
if owner_name not in SIM_LIBRARIES:
continue
sim_lib = SIM_LIBRARIES[owner_name]
if not sim_lib.has_implementation(export.name):
continue
l.info("Using builtin SimProcedure for %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch))
# Step 2.3: If 2.2 didn't work, check if the symbol wants to be resolved
# by a library we already know something about. Resolve it appropriately.
# Note that _check_user_blacklists also includes _ignore_functions.
# An important consideration is that even if we're stubbing a function out,
# we still want to try as hard as we can to figure out where it comes from
# so we can get the calling convention as close to right as possible.
elif reloc.resolvewith is not None and reloc.resolvewith in SIM_LIBRARIES:
sim_lib = SIM_LIBRARIES[reloc.resolvewith]
if self._check_user_blacklists(export.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get_stub(export.name, sim_proc_arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch))
# Step 2.4: If 2.3 didn't work (the symbol didn't request a provider we know of), try
# looking through each of the SimLibraries we're using to resolve unresolved
# functions. If any of them know anything specifically about this function,
# resolve it with that. As a final fallback, just ask any old SimLibrary
# to resolve it.
elif missing_libs:
for sim_lib in missing_libs:
if sim_lib.has_metadata(export.name):
if self._check_user_blacklists(export.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get_stub(export.name, sim_proc_arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch))
break
else:
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", export.name)
self.hook_symbol(export.rebased_addr, missing_libs[0].get(export.name, sim_proc_arch))
# Step 2.5: If 2.4 didn't work (we have NO SimLibraries to work with), just
# use the vanilla ReturnUnconstrained, assuming that this isn't a weak func
elif not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", export.name)
self.hook_symbol(export.rebased_addr, SIM_PROCEDURES['stubs']['ReturnUnconstrained'](display_name=export.name, is_stub=True))
def _guess_simprocedure(self, f, hint):
"""
Does symbol name `f` exist as a SIM_PROCEDURE? If so, return it, else return None.
Narrows down the set of libraries to search based on hint.
Part of the hack to enable Binary Ninja support. Remove if _register_objects() stops using it.
"""
# First, filter the SIM_LIBRARIES to a reasonable subset based on the hint
hinted_libs = []
if hint == "win":
hinted_libs = filter(lambda lib: lib if lib.endswith(".dll") else None, SIM_LIBRARIES)
else:
hinted_libs = filter(lambda lib: lib if ".so" in lib else None, SIM_LIBRARIES)
for lib in hinted_libs:
if SIM_LIBRARIES[lib].has_implementation(f.name):
l.debug("Found implementation for %s in %s", f, lib)
self.hook_symbol(f.relative_addr, (SIM_LIBRARIES[lib].get(f.name, self.arch)))
break
else:
l.debug("Could not find matching SimProcedure for %s, ignoring.", f.name)
def _check_user_blacklists(self, f):
"""
Has symbol name `f` been marked for exclusion by any of the user
parameters?
"""
return not self.use_sim_procedures or \
f in self._exclude_sim_procedures_list or \
f in self._ignore_functions or \
(self._exclude_sim_procedures_func is not None and self._exclude_sim_procedures_func(f))
@staticmethod
def _addr_to_str(addr):
return "%s" % repr(addr) if isinstance(addr, SootAddressDescriptor) else "%#x" % addr
#
# Public methods
# They're all related to hooking!
#
# pylint: disable=inconsistent-return-statements
def hook(self, addr, hook=None, length=0, kwargs=None, replace=False):
"""
Hook a section of code with a custom function. This is used internally to provide symbolic
summaries of library functions, and can be used to instrument execution or to modify
control flow.
When hook is not specified, it returns a function decorator that allows easy hooking.
Usage::
# Assuming proj is an instance of angr.Project, we will add a custom hook at the entry
# point of the project.
@proj.hook(proj.entry)
def my_hook(state):
print("Welcome to execution!")
:param addr: The address to hook.
:param hook: A :class:`angr.project.Hook` describing a procedure to run at the
given address. You may also pass in a SimProcedure class or a function
directly and it will be wrapped in a Hook object for you.
:param length: If you provide a function for the hook, this is the number of bytes
that will be skipped by executing the hook by default.
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:param replace: Control the behavior on finding that the address is already hooked. If
true, silently replace the hook. If false (default), warn and do not
replace the hook. If none, warn and replace the hook.
"""
if hook is None:
# if we haven't been passed a thing to hook with, assume we're being used as a decorator
return self._hook_decorator(addr, length=length, kwargs=kwargs)
if kwargs is None: kwargs = {}
l.debug('hooking %s with %s', self._addr_to_str(addr), str(hook))
if self.is_hooked(addr):
if replace is True:
pass
elif replace is False:
l.warning("Address is already hooked, during hook(%s, %s). Not re-hooking.", self._addr_to_str(addr), hook)
return
else:
l.warning("Address is already hooked, during hook(%s, %s). Re-hooking.", self._addr_to_str(addr), hook)
if isinstance(hook, type):
raise TypeError("Please instanciate your SimProcedure before hooking with it")
if callable(hook):
hook = SIM_PROCEDURES['stubs']['UserHook'](user_func=hook, length=length, **kwargs)
self._sim_procedures[addr] = hook
def is_hooked(self, addr):
"""
Returns True if `addr` is hooked.
:param addr: An address.
:returns: True if addr is hooked, False otherwise.
"""
return addr in self._sim_procedures
def hooked_by(self, addr):
"""
Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked.
"""
if not self.is_hooked(addr):
l.warning("Address %s is not hooked", self._addr_to_str(addr))
return None
return self._sim_procedures[addr]
def unhook(self, addr):
"""
Remove a hook.
:param addr: The address of the hook.
"""
if not self.is_hooked(addr):
l.warning("Address %s not hooked", self._addr_to_str(addr))
return
del self._sim_procedures[addr]
def hook_symbol(self, symbol_name, simproc, kwargs=None, replace=None):
"""
Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that
address. If the symbol was not available in the loaded libraries, this address may be provided
by the CLE externs object.
Additionally, if instead of a symbol name you provide an address, some secret functionality will
kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some
yet-unknown scary ABI that has its function pointers point to something other than the actual
functions, in which case it'll do the right thing.
:param symbol_name: The name of the dependency to resolve.
:param simproc: The SimProcedure instance (or function) with which to hook the symbol
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:param replace: Control the behavior on finding that the address is already hooked. If
true, silently replace the hook. If false, warn and do not replace the
hook. If none (default), warn and replace the hook.
:returns: The address of the new symbol.
:rtype: int
"""
if type(symbol_name) is not int:
sym = self.loader.find_symbol(symbol_name)
if sym is None:
# it could be a previously unresolved weak symbol..?
new_sym = None
for reloc in self.loader.find_relevant_relocations(symbol_name):
if not reloc.symbol.is_weak:
raise Exception("Symbol is strong but we couldn't find its resolution? Report to @rhelmot.")
if new_sym is None:
new_sym = self.loader.extern_object.make_extern(symbol_name)
reloc.resolve(new_sym)
reloc.relocate([])
if new_sym is None:
l.error("Could not find symbol %s", symbol_name)
return None
sym = new_sym
basic_addr = sym.rebased_addr
else:
basic_addr = symbol_name
symbol_name = None
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=basic_addr)
self.hook(hook_addr, simproc, kwargs=kwargs, replace=replace)
return hook_addr
def is_symbol_hooked(self, symbol_name):
"""
Check if a symbol is already hooked.
:param str symbol_name: Name of the symbol.
:return: True if the symbol can be resolved and is hooked, False otherwise.
:rtype: bool
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
return self.is_hooked(hook_addr)
def unhook_symbol(self, symbol_name):
"""
Remove the hook on a symbol.
This function will fail if the symbol is provided by the extern object, as that would result in a state where
analysis would be unable to cope with a call to this symbol.
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
if sym.owner is self.loader._extern_object:
l.warning("Refusing to unhook external symbol %s, replace it with another hook if you want to change it",
symbol_name)
return False
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
self.unhook(hook_addr)
return True
def rehook_symbol(self, new_address, symbol_name, stubs_on_sync):
"""
Move the hook for a symbol to a specific address
:param new_address: the new address that will trigger the SimProc execution
:param symbol_name: the name of the symbol (f.i. strcmp )
:return: None
"""
new_sim_procedures = {}
for key_address, simproc_obj in self._sim_procedures.items():
# if we don't want stubs during the sync let's skip those, we will execute the real function.
if not stubs_on_sync and simproc_obj.is_stub:
continue
if simproc_obj.display_name == symbol_name:
new_sim_procedures[new_address] = simproc_obj
else:
new_sim_procedures[key_address] = simproc_obj
self._sim_procedures = new_sim_procedures
#
# A convenience API (in the style of triton and manticore) for symbolic execution.
#
def execute(self, *args, **kwargs):
"""
This function is a symbolic execution helper in the simple style
supported by triton and manticore. It designed to be run after
setting up hooks (see Project.hook), in which the symbolic state
can be checked.
This function can be run in three different ways:
- When run with no parameters, this function begins symbolic execution
from the entrypoint.
- It can also be run with a "state" parameter specifying a SimState to
begin symbolic execution from.
- Finally, it can accept any arbitrary keyword arguments, which are all
passed to project.factory.full_init_state.
If symbolic execution finishes, this function returns the resulting
simulation manager.
"""
if args:
state = args[0]
else:
state = self.factory.full_init_state(**kwargs)
pg = self.factory.simulation_manager(state)
self._executing = True
return pg.run(until=lambda lpg: not self._executing)
def terminate_execution(self):
"""
Terminates a symbolic execution that was started with Project.execute().
"""
self._executing = False
#
# Private methods related to hooking
#
def _hook_decorator(self, addr, length=0, kwargs=None):
"""
Return a function decorator that allows easy hooking. Please refer to hook() for its usage.
:return: The function decorator.
"""
def hook_decorator(func):
self.hook(addr, func, length=length, kwargs=kwargs)
return func
return hook_decorator
#
# Pickling
#
def __getstate__(self):
try:
store_func, load_func = self.store_function, self.load_function
self.store_function, self.load_function = None, None
return dict(self.__dict__)
finally:
self.store_function, self.load_function = store_func, load_func
def __setstate__(self, s):
self.__dict__.update(s)
def _store(self, container):
# If container is a filename.
if isinstance(container, str):
with open(container, 'wb') as f:
try:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
except RuntimeError as e: # maximum recursion depth can be reached here
l.error("Unable to store Project: '%s' during pickling", e)
# If container is an open file.
elif isinstance(container, IOBase):
try:
pickle.dump(self, container, pickle.HIGHEST_PROTOCOL)
except RuntimeError as e: # maximum recursion depth can be reached here
l.error("Unable to store Project: '%s' during pickling", e)
# If container is just a variable.
else:
try:
container = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
except RuntimeError as e: # maximum recursion depth can be reached here
l.error("Unable to store Project: '%s' during pickling", e)
@staticmethod
def _load(container):
if isinstance(container, str):
# If container is a filename.
if all(c in string.printable for c in container) and os.path.exists(container):
with open(container, 'rb') as f:
return pickle.load(f)
# If container is a pickle string.
else:
return pickle.loads(container)
# If container is an open file
elif isinstance(container, IOBase):
return pickle.load(container)
# What else could it be?
else:
l.error("Cannot unpickle container of type %s", type(container))
return None
def __repr__(self):
return '<Project %s>' % (self.filename if self.filename is not None else 'loaded from stream')
#
# Compatibility
#
@property
@deprecated(replacement='simos')
def _simos(self):
return self.simos
from .factory import AngrObjectFactory
from angr.simos import SimOS, os_mapping
from .analyses.analysis import AnalysesHub
from .knowledge_base import KnowledgeBase
from .procedures import SIM_PROCEDURES, SIM_LIBRARIES
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.fiptables
~~~~~~~~~~~~
IP tables management functions.
"""
from collections import defaultdict
import copy
import logging
import random
import time
import itertools
import re
from gevent import subprocess
import gevent
import sys
from calico.felix import frules, futils
from calico.felix.actor import (
Actor, actor_message, ResultOrExc, SplitBatchAndRetry
)
from calico.felix.frules import FELIX_PREFIX
from calico.felix.futils import FailedSystemCall, StatCounter
_log = logging.getLogger(__name__)
_correlators = ("ipt-%s" % ii for ii in itertools.count())
MAX_IPT_RETRIES = 10
MAX_IPT_BACKOFF = 0.2
class IptablesUpdater(Actor):
"""
Actor that owns and applies updates to a particular iptables table.
Supports batching updates for performance and dependency tracking
between chains.
iptables safety
~~~~~~~~~~~~~~~
Concurrent access to the same table is not allowed by the
underlying iptables architecture so there should be one instance of
this class for each table. Each IP version has its own set of
non-conflicting tables.
However, this class tries to be robust against concurrent access
from outside the process by detecting and retrying such errors.
Batching support
~~~~~~~~~~~~~~~~
This actor supports batching of multiple updates. It applies updates that
are on the queue in one atomic batch. This is dramatically faster than
issuing single iptables requests.
If a request fails, it does a binary chop using the SplitBatchAndRetry
mechanism to report the error to the correct request.
Dependency tracking
~~~~~~~~~~~~~~~~~~~
To offload a lot of coordination complexity from the classes that
use this one, this class supports tracking dependencies between chains
and programming stubs for missing chains:
* When calling rewrite_chains() the caller must supply a dict that
maps from chain to a set of chains it requires (i.e. the chains
that appear in its --jump and --goto targets).
* Any chains that are required but not present are created as "stub"
chains, which drop all traffic. They are marked as such in the
iptables rules with an iptables comment.
* When a required chain is later explicitly created, the stub chain is
replaced with the required contents of the chain.
* If a required chain is explicitly deleted, it is rewritten as a stub
chain.
* If a chain exists only as a stub chain to satisfy a dependency, then it
is cleaned up when the dependency is removed.
"""
def __init__(self, table, config, ip_version=4):
super(IptablesUpdater, self).__init__(qualifier="v%d-%s" %
(ip_version, table))
self.table = table
self.refresh_interval = config.REFRESH_INTERVAL
if ip_version == 4:
self._restore_cmd = "iptables-restore"
self._save_cmd = "iptables-save"
self._iptables_cmd = "iptables"
else:
assert ip_version == 6
self._restore_cmd = "ip6tables-restore"
self._save_cmd = "ip6tables-save"
self._iptables_cmd = "ip6tables"
self._chains_in_dataplane = None
"""
Set of chains that we know are actually in the dataplane. Loaded
at start of day and then kept in sync.
"""
self._grace_period_finished = False
"""
Flag that is set after the graceful restart window is over.
"""
self._programmed_chain_contents = {}
"""Map from chain name to chain contents, only contains chains that
have been explicitly programmed."""
self._inserted_rule_fragments = set()
"""Special-case rule fragments that we've explicitly inserted."""
self._removed_rule_fragments = set()
"""Special-case rule fragments that we've explicitly removed.
We need to cache this to defend against other processes accidentally
reverting our removal."""
self._required_chains = defaultdict(set)
"""Map from chain name to the set of names of chains that it
depends on."""
self._requiring_chains = defaultdict(set)
"""Map from chain to the set of chains that depend on it.
Inverse of self.required_chains."""
# Since it's fairly complex to keep track of the changes required
# for a particular batch and still be able to roll-back the changes
# to our data structures, we delegate to a per-batch object that
# does that calculation.
self._txn = None
""":type _Transaction: object used to track index changes
for this batch."""
self._completion_callbacks = None
"""List of callbacks to issue once the current batch completes."""
# Diagnostic counters.
self._stats = StatCounter("IPv%s %s iptables updater" %
(ip_version, table))
# Avoid duplicating init logic.
self._reset_batched_work()
self._load_chain_names_from_iptables(async=True)
# Optionally, start periodic refresh timer.
if self.refresh_interval > 0:
_log.info("Periodic iptables refresh enabled, starting "
"resync greenlet")
refresh_greenlet = gevent.spawn(self._periodic_refresh)
refresh_greenlet.link_exception(self._on_worker_died)
@property
def _explicitly_prog_chains(self):
return set(self._programmed_chain_contents.keys())
def _reset_batched_work(self):
"""Reset the per-batch state in preparation for a new batch."""
self._txn = _Transaction(self._programmed_chain_contents,
self._required_chains,
self._requiring_chains)
self._completion_callbacks = []
@actor_message(needs_own_batch=True)
def _load_chain_names_from_iptables(self):
"""
Loads the set of (our) chains that already exist from iptables.
Populates self._chains_in_dataplane.
"""
self._stats.increment("Refreshed chain list")
raw_ipt_output = subprocess.check_output([self._save_cmd, "--table",
self.table])
self._chains_in_dataplane = _extract_our_chains(self.table,
raw_ipt_output)
def _get_unreferenced_chains(self):
"""
Reads the list of chains in the dataplane which are not referenced.
:returns list[str]: list of chains currently in the dataplane that
are not referenced by other chains.
"""
raw_ipt_output = subprocess.check_output(
[self._iptables_cmd,
"--wait", # Wait for the xtables lock.
"--list", # Action to perform.
"--numeric", # Avoid DNS lookups.
"--table", self.table])
return _extract_our_unreffed_chains(raw_ipt_output)
@actor_message()
def rewrite_chains(self, update_calls_by_chain,
dependent_chains, callback=None):
"""
Atomically apply a set of updates to the table.
:param update_calls_by_chain: map from chain name to list of
iptables-style update calls,
e.g. {"chain_name": ["-A chain_name -j ACCEPT"]}. Chain will
be flushed.
:param dependent_chains: map from chain name to a set of chains
that that chain requires to exist. They will be created
(with a default drop) if they don't exist.
:raises FailedSystemCall if a problem occurred.
"""
# We actually apply the changes in _finish_msg_batch(). Index the
# changes by table and chain.
_log.info("iptables update to chains %s", update_calls_by_chain.keys())
_log.debug("iptables update: %s", update_calls_by_chain)
_log.debug("iptables deps: %s", dependent_chains)
self._stats.increment("Chain rewrites")
for chain, updates in update_calls_by_chain.iteritems():
# TODO: double-check whether this flush is needed.
updates = ["--flush %s" % chain] + updates
deps = dependent_chains.get(chain, set())
self._txn.store_rewrite_chain(chain, updates, deps)
if callback:
self._completion_callbacks.append(callback)
# Does direct table manipulation, forbid batching with other messages.
@actor_message(needs_own_batch=True)
def ensure_rule_inserted(self, rule_fragment):
"""
Runs the given rule fragment, prefixed with --insert. If the
rule was already present, it is removed and reinserted at the
start of the chain.
This covers the case where we need to insert a rule into the
pre-existing kernel chains (only). For chains that are owned by Felix,
use the more robust approach of rewriting the whole chain using
rewrite_chains().
:param rule_fragment: fragment to be inserted. For example,
"INPUT --jump felix-INPUT"
"""
self._stats.increment("Rule inserts")
_log.info("Inserting rule %r", rule_fragment)
self._inserted_rule_fragments.add(rule_fragment)
self._removed_rule_fragments.discard(rule_fragment)
self._insert_rule(rule_fragment)
def _insert_rule(self, rule_fragment, log_level=logging.INFO):
"""
Execute the iptables commands to atomically (re)insert the
given rule fragment into iptables.
Has the side-effect of moving the rule to the top of the
chain.
:param rule_fragment: A rule fragment, starting with the chain
name; will be prefixed with "--insert ", for example, to
create the actual iptables line to execute.
"""
try:
# Do an atomic delete + insert of the rule. If the rule already
# exists then the rule will be moved to the start of the chain.
_log.log(log_level, "Attempting to move any existing instance "
"of rule %r to top of chain.", rule_fragment)
self._execute_iptables(['*%s' % self.table,
'--delete %s' % rule_fragment,
'--insert %s' % rule_fragment,
'COMMIT'],
fail_log_level=logging.DEBUG)
except FailedSystemCall:
# Assume the rule didn't exist. Try inserting it.
_log.log(log_level, "Didn't find any existing instance of rule "
"%r, inserting it instead.", rule_fragment)
self._execute_iptables(['*%s' % self.table,
'--insert %s' % rule_fragment,
'COMMIT'])
@actor_message(needs_own_batch=True)
def ensure_rule_removed(self, rule_fragment):
"""
If it exists, removes the given rule fragment. Caches that the
rule fragment should now not be present.
WARNING: due to the caching, this is only suitable for a small
number of static rules. For example, to add and remove our
"root" rules, which dispatch to our dynamic chains, from the
top-level kernel chains.
The caching is required to defend against other poorly-written
processes, which use an iptables-save and then iptables-restore
call to update their rules. That clobbers our updates (including
deletions).
:param rule_fragment: fragment to be deleted. For example,
"INPUT --jump felix-INPUT"
"""
_log.info("Removing rule %r", rule_fragment)
self._stats.increment("Rule removals")
self._inserted_rule_fragments.discard(rule_fragment)
self._removed_rule_fragments.add(rule_fragment)
self._remove_rule(rule_fragment)
def _remove_rule(self, rule_fragment, log_level=logging.INFO):
"""
Execute the iptables commands required to (atomically) remove
the given rule_fragment if it is present.
:param rule_fragment: A rule fragment, starting with the chain
name; will be prefixed with "--delete " to create the
actual iptables line to execute.
"""
_log.log(log_level, "Ensuring rule is not present %r", rule_fragment)
num_instances = 0
try:
while True: # Delete all instances of rule.
self._execute_iptables(['*%s' % self.table,
'--delete %s' % rule_fragment,
'COMMIT'],
fail_log_level=logging.DEBUG)
num_instances += 1
assert num_instances < 100, "Too many deletes, infinite loop?"
except FailedSystemCall as e:
if num_instances == 0:
if "line 2 failed" in e.stderr:
# Rule was parsed OK but failed to apply, this means that
# it wasn't present.
_log.log(log_level, "Removal of rule %r rejected; not "
"present?", rule_fragment)
elif "at line: 2" in e.stderr and "doesn't exist" in e.stderr:
# Rule was rejected because some pre-requisite (such as an
# ipset) didn't exist.
_log.log(log_level, "Removal of rule %r failed due to "
"missing pre-requisite; rule must "
"not be present.", rule_fragment)
else:
_log.exception("Unexpected failure when trying to "
"delete rule %r" % rule_fragment)
raise
else:
_log.log(log_level, "%s instances of rule %r removed",
num_instances, rule_fragment)
@actor_message()
def delete_chains(self, chain_names, callback=None):
"""
Deletes the named chains.
:raises FailedSystemCall if a problem occurred.
"""
# We actually apply the changes in _finish_msg_batch(). Index the
# changes by table and chain.
_log.info("Deleting chains %s", chain_names)
self._stats.increment("Chain deletes")
for chain in chain_names:
self._txn.store_delete(chain)
if callback:
self._completion_callbacks.append(callback)
# It's much simpler to do cleanup in its own batch so that it doesn't have
# to worry about in-flight updates.
@actor_message(needs_own_batch=True)
def cleanup(self):
"""
Tries to clean up any left-over chains from a previous run that
are no longer required.
"""
_log.info("Cleaning up left-over iptables state.")
self._stats.increment("Cleanups performed")
# Start with the current state.
self._load_chain_names_from_iptables()
required_chains = set(self._requiring_chains.keys())
if not self._grace_period_finished:
# Ensure that all chains that are required but not explicitly
# programmed are stubs.
#
# We have to do this at the end of the graceful restart period
# during which we may have re-used old chains.
chains_to_stub = (required_chains -
self._explicitly_prog_chains)
_log.info("Graceful restart window finished, stubbing out "
"chains: %s", chains_to_stub)
try:
self._stub_out_chains(chains_to_stub)
except NothingToDo:
pass
self._grace_period_finished = True
# Now the generic cleanup, look for chains that we're not expecting to
# be there and delete them.
chains_we_tried_to_delete = set()
finished = False
while not finished:
# Try to delete all the unreferenced chains, we use a loop to
# ensure that we then clean up any chains that become unreferenced
# when we delete the previous lot.
unreferenced_chains = self._get_unreferenced_chains()
orphans = (unreferenced_chains -
self._explicitly_prog_chains -
required_chains)
if not chains_we_tried_to_delete.issuperset(orphans):
_log.info("Cleanup found these unreferenced chains to "
"delete: %s", orphans)
self._stats.increment("Orphans found during cleanup",
by=len(orphans))
chains_we_tried_to_delete.update(orphans)
self._delete_best_effort(orphans)
else:
# We've already tried to delete all the chains we found,
# give up.
_log.info("Cleanup finished, deleted %d chains, failed to "
"delete these chains: %s",
len(chains_we_tried_to_delete) - len(orphans),
orphans)
finished = True
# Then some sanity checks:
expected_chains = self._chains_in_dataplane
self._load_chain_names_from_iptables()
loaded_chains = self._chains_in_dataplane
missing_chains = ((self._explicitly_prog_chains | required_chains) -
self._chains_in_dataplane)
if expected_chains != self._chains_in_dataplane or missing_chains:
# This is serious, either there's a bug in our model of iptables
# or someone else has changed iptables under our feet.
_log.error("Chains in data plane inconsistent with calculated "
"index. In dataplane but not in index: %s; In index: "
"but not dataplane: %s; missing from iptables: %s. "
"Another process may have clobbered our updates.",
loaded_chains - expected_chains,
expected_chains - loaded_chains,
missing_chains)
# Try to recover: trigger a full refresh of the dataplane to
# bring it into sync.
self.refresh_iptables()
def _periodic_refresh(self):
while True:
# Jitter our sleep times by 20%.
gevent.sleep(self.refresh_interval * (1 + random.random() * 0.2))
self.refresh_iptables(async=True)
def _on_worker_died(self, watch_greenlet):
"""
Greenlet: spawned by the gevent Hub if the etcd watch loop ever
stops, kills the process.
"""
_log.critical("Worker greenlet died: %s; exiting.", watch_greenlet)
sys.exit(1)
@actor_message()
def refresh_iptables(self):
"""
Re-apply our iptables state to the kernel.
"""
_log.info("Refreshing all our chains")
self._txn.store_refresh()
def _start_msg_batch(self, batch):
self._reset_batched_work()
return batch
def _finish_msg_batch(self, batch, results):
start = time.time()
try:
# We use two passes to update the dataplane. In the first pass,
# we make any updates, create new chains and replace to-be-deleted
# chains with stubs (in case we fail to delete them below).
try:
input_lines = self._calculate_ipt_modify_input()
except NothingToDo:
_log.info("%s no updates in this batch.", self)
else:
self._execute_iptables(input_lines)
_log.info("%s Successfully processed iptables updates.", self)
self._chains_in_dataplane.update(self._txn.affected_chains)
except (IOError, OSError, FailedSystemCall) as e:
if isinstance(e, FailedSystemCall):
rc = e.retcode
else:
rc = "unknown"
if len(batch) == 1:
# We only executed a single message, report the failure.
_log.error("Non-retryable %s failure. RC=%s",
self._restore_cmd, rc)
self._stats.increment("Messages failed due to iptables "
"error")
if self._completion_callbacks:
self._completion_callbacks[0](e)
final_result = ResultOrExc(None, e)
results[0] = final_result
else:
_log.error("Non-retryable error from a combined batch, "
"splitting the batch to narrow down culprit.")
self._stats.increment("Split batch due to error")
raise SplitBatchAndRetry()
else:
# Modify succeeded, update our indexes for next time.
self._update_indexes()
# Make a best effort to delete the chains we no longer want.
# If we fail due to a stray reference from an orphan chain, we
# should catch them on the next cleanup().
self._delete_best_effort(self._txn.chains_to_delete)
for c in self._completion_callbacks:
c(None)
if self._txn.refresh:
# Re-apply our inserts and deletions. We do this after the
# above processing because our inserts typically reference
# our other chains and if the insert has been "rolled back"
# by another process then it's likely that the referenced
# chain was too.
_log.info("Transaction included a refresh, re-applying our "
"inserts and deletions.")
try:
for fragment in self._inserted_rule_fragments:
self._insert_rule(fragment, log_level=logging.DEBUG)
for fragment in self._removed_rule_fragments:
self._remove_rule(fragment, log_level=logging.DEBUG)
except FailedSystemCall:
_log.error("Failed to refresh inserted/removed rules")
finally:
self._reset_batched_work()
self._stats.increment("Batches finished")
end = time.time()
_log.debug("Batch time: %.2f %s", end - start, len(batch))
def _delete_best_effort(self, chains):
"""
Try to delete all the chains in the input list. Any errors are silently
swallowed.
"""
if not chains:
return
chain_batches = [list(chains)]
while chain_batches:
batch = chain_batches.pop(0)
try:
# Try the next batch of chains...
_log.debug("Attempting to delete chains: %s", batch)
self._attempt_delete(batch)
except (IOError, OSError, FailedSystemCall):
_log.warning("Deleting chains %s failed", batch)
if len(batch) > 1:
# We were trying to delete multiple chains, split the
# batch in half and put the batches back on the queue to
# try again.
_log.info("Batch was of length %s, splitting", len(batch))
split_point = len(batch) // 2
first_half = batch[:split_point]
second_half = batch[split_point:]
assert len(first_half) + len(second_half) == len(batch)
if chain_batches:
chain_batches[0][:0] = second_half
else:
chain_batches[:0] = [second_half]
chain_batches[:0] = [first_half]
else:
# Only trying to delete one chain, give up. It must still
# be referenced.
_log.error("Failed to delete chain %s, giving up. Maybe "
"it is still referenced?", batch[0])
self._stats.increment("Chain delete failures")
else:
_log.debug("Deleted chains %s successfully, remaining "
"batches: %s", batch, len(chain_batches))
def _stub_out_chains(self, chains):
input_lines = self._calculate_ipt_stub_input(chains)
self._execute_iptables(input_lines)
def _attempt_delete(self, chains):
try:
input_lines = self._calculate_ipt_delete_input(chains)
except NothingToDo:
_log.debug("No chains to delete %s", chains)
else:
self._execute_iptables(input_lines, fail_log_level=logging.WARNING)
self._chains_in_dataplane -= set(chains)
def _update_indexes(self):
"""
Called after successfully processing a batch, updates the
indices with the values calculated by the _Transaction.
"""
self._programmed_chain_contents = self._txn.prog_chains
self._required_chains = self._txn.required_chns
self._requiring_chains = self._txn.requiring_chns
def _calculate_ipt_modify_input(self):
"""
Calculate the input for phase 1 of a batch, where we only modify and
create chains.
:raises NothingToDo: if the batch requires no modify operations.
"""
# Valid input looks like this.
#
# *table
# :chain_name
# :chain_name_2
# -F chain_name
# -A chain_name -j ACCEPT
# COMMIT
#
# The chains are created if they don't exist.
input_lines = []
# Track the chains that we decide we need to touch so that we can
# prepend the appropriate iptables header for each chain.
modified_chains = set()
# Generate rules to stub out chains. We stub chains out if they're
# referenced by another chain but they're not present for some reason.
for chain in self._txn.chains_to_stub_out:
if (self._grace_period_finished or
chain in self._txn.explicit_deletes or
chain not in self._chains_in_dataplane):
# During graceful restart, we only stub out chains if
# * the chain is genuinely missing from the dataplane, or
# * we were told to delete the chain explicitly (but decided
# we couldn't because it was still referenced), implying
# that we now know the state of that chain and we should not
# wait for the end of graceful restart to clean it up.
modified_chains.add(chain)
input_lines.extend(_stub_drop_rules(chain))
# Generate rules to stub out chains that we're about to delete, just
# in case the delete fails later on. Stubbing it out also stops it
# from referencing other chains, accidentally keeping them alive.
for chain in self._txn.chains_to_delete:
modified_chains.add(chain)
input_lines.extend(_stub_drop_rules(chain))
# Now add the actual chain updates.
for chain, chain_updates in self._txn.updates.iteritems():
modified_chains.add(chain)
input_lines.extend(chain_updates)
# Finally, prepend the input with instructions that do an idempotent
# create-and-flush operation for the chains that we need to create or
# rewrite.
input_lines[:0] = [":%s -" % chain for chain in modified_chains]
if not input_lines:
raise NothingToDo
return ["*%s" % self.table] + input_lines + ["COMMIT"]
def _calculate_ipt_delete_input(self, chains):
"""
Calculate the input for phase 2 of a batch, where we actually
try to delete chains.
:raises NothingToDo: if the batch requires no delete operations.
"""
input_lines = []
found_delete = False
input_lines.append("*%s" % self.table)
for chain_name in chains:
# Delete the chain
input_lines.append(":%s -" % chain_name)
input_lines.append("--delete-chain %s" % chain_name)
found_delete = True
input_lines.append("COMMIT")
if found_delete:
return input_lines
else:
raise NothingToDo()
def _calculate_ipt_stub_input(self, chains):
"""
Calculate input to replace the given chains with stubs.
"""
input_lines = []
found_chain_to_stub = False
input_lines.append("*%s" % self.table)
for chain_name in chains:
# Stub the chain
input_lines.append(":%s -" % chain_name)
input_lines.extend(_stub_drop_rules(chain_name))
found_chain_to_stub = True
input_lines.append("COMMIT")
if found_chain_to_stub:
return input_lines
else:
raise NothingToDo()
def _execute_iptables(self, input_lines, fail_log_level=logging.ERROR):
"""
Runs ip(6)tables-restore with the given input. Retries iff
the COMMIT fails.
:raises FailedSystemCall: if the command fails on a non-commit
line or if it repeatedly fails and retries are exhausted.
"""
backoff = 0.01
num_tries = 0
success = False
while not success:
input_str = "\n".join(input_lines) + "\n"
_log.debug("%s input:\n%s", self._restore_cmd, input_str)
# Run iptables-restore in noflush mode so that it doesn't
# blow away all the tables we're not touching.
cmd = [self._restore_cmd, "--noflush", "--verbose"]
try:
futils.check_call(cmd, input_str=input_str)
except FailedSystemCall as e:
# Parse the output to determine if error is retryable.
retryable, detail = _parse_ipt_restore_error(input_lines,
e.stderr)
num_tries += 1
if retryable:
if num_tries < MAX_IPT_RETRIES:
_log.info("%s failed with retryable error. Retry in "
"%.2fs", self._iptables_cmd, backoff)
self._stats.increment("iptables commit failure "
"(retryable)")
gevent.sleep(backoff)
if backoff > MAX_IPT_BACKOFF:
backoff = MAX_IPT_BACKOFF
backoff *= (1.5 + random.random())
continue
else:
_log.log(
fail_log_level,
"Failed to run %s. Out of retries: %s.\n"
"Output:\n%s\n"
"Error:\n%s\n"
"Input was:\n%s",
self._restore_cmd, detail, e.stdout, e.stderr,
input_str)
self._stats.increment("iptables commit failure "
"(out of retries)")
else:
_log.log(
fail_log_level,
"%s failed with non-retryable error: %s.\n"
"Output:\n%s\n"
"Error:\n%s\n"
"Input was:\n%s",
self._restore_cmd, detail, e.stdout, e.stderr,
input_str)
self._stats.increment("iptables non-retryable failure")
raise
else:
self._stats.increment("iptables success")
success = True
class _Transaction(object):
"""
This class keeps track of a sequence of updates to an
IptablesUpdater's indexing data structures.
It takes a copy of the data structures at creation and then
gets fed the sequence of updates and deletes; then, on-demand
it calculates the dataplane deltas that are required and
caches the results.
The general idea is that, if the iptables-restore call fails,
the Transaction object can be thrown away, leaving the
IptablesUpdater's state unchanged.
"""
def __init__(self,
old_prog_chain_contents,
old_deps,
old_requiring_chains):
# Figure out what stub chains should already be present.
old_required_chains = set(old_requiring_chains.keys())
old_explicitly_programmed_chains = set(old_prog_chain_contents.keys())
self.already_stubbed = (old_required_chains -
old_explicitly_programmed_chains)
# Deltas.
self.updates = {}
self.explicit_deletes = set()
# New state. These will be copied back to the IptablesUpdater
# if the transaction succeeds.
self.prog_chains = old_prog_chain_contents.copy()
self.required_chns = copy.deepcopy(old_deps)
self.requiring_chns = copy.deepcopy(old_requiring_chains)
# Memoized values of the properties below. See chains_to_stub(),
# affected_chains() and chains_to_delete() below.
self._chains_to_stub = None
self._affected_chains = None
self._chains_to_delete = None
# Whether to do a refresh.
self.refresh = False
def store_delete(self, chain):
"""
Records the delete of the given chain, updating the per-batch
indexes as required.
"""
_log.debug("Storing delete of chain %s", chain)
assert chain is not None
# Clean up dependency index.
self._update_deps(chain, set())
# Mark for deletion.
self.explicit_deletes.add(chain)
# Remove any now-stale rewrite state.
self.updates.pop(chain, None)
self.prog_chains.pop(chain, None)
self._invalidate_cache()
def store_rewrite_chain(self, chain, updates, dependencies):
"""
Records the rewrite of the given chain, updating the per-batch
indexes as required.
"""
_log.debug("Storing updates to chain %s", chain)
assert chain is not None
assert updates is not None
assert dependencies is not None
# Clean up reverse dependency index.
self._update_deps(chain, dependencies)
# Remove any deletion, if present.
self.explicit_deletes.discard(chain)
# Store off the update.
self.updates[chain] = updates
self.prog_chains[chain] = updates
self._invalidate_cache()
def store_refresh(self):
"""
Records that we should refresh all chains as part of this transaction.
"""
# Copy the whole state over to the delta for this transaction so it
# all gets reapplied. The dependency index should already be correct.
self.updates.update(self.prog_chains)
self.refresh = True
self._invalidate_cache()
def _update_deps(self, chain, new_deps):
"""
Updates the forward/backward dependency indexes for the given
chain.
"""
# Remove all the old deps from the reverse index..
old_deps = self.required_chns.get(chain, set())
for dependency in old_deps:
self.requiring_chns[dependency].discard(chain)
if not self.requiring_chns[dependency]:
del self.requiring_chns[dependency]
# Add in the new deps to the reverse index.
for dependency in new_deps:
self.requiring_chns[dependency].add(chain)
# And store them off in the forward index.
if new_deps:
self.required_chns[chain] = new_deps
else:
self.required_chns.pop(chain, None)
def _invalidate_cache(self):
self._chains_to_stub = None
self._affected_chains = None
self._chains_to_delete = None
@property
def affected_chains(self):
"""
The set of chains that are touched by this update (whether
deleted, modified, or to be stubbed).
"""
if self._affected_chains is None:
updates = set(self.updates.keys())
stubs = self.chains_to_stub_out
deletes = self.chains_to_delete
_log.debug("Affected chains: deletes=%s, updates=%s, stubs=%s",
deletes, updates, stubs)
self._affected_chains = deletes | updates | stubs
return self._affected_chains
@property
def chains_to_stub_out(self):
"""
The set of chains that need to be stubbed as part of this update.
"""
if self._chains_to_stub is None:
# Don't stub out chains that we're now explicitly programming.
impl_required_chains = (self.referenced_chains -
set(self.prog_chains.keys()))
if self.refresh:
# Re-stub all chains that should be stubbed.
_log.debug("Refresh in progress, re-stub all stubbed chains.")
self._chains_to_stub = impl_required_chains
else:
# Don't stub out chains that are already stubbed.
_log.debug("No refresh in progress.")
self._chains_to_stub = (impl_required_chains -
self.already_stubbed)
return self._chains_to_stub
@property
def chains_to_delete(self):
"""
The set of chains to actually delete from the dataplane. Does
not include the chains that we need to stub out.
"""
if self._chains_to_delete is None:
# We'd like to get rid of these chains if we can.
chains_we_dont_want = self.explicit_deletes | self.already_stubbed
_log.debug("Chains we'd like to delete: %s", chains_we_dont_want)
# But we need to keep the chains that are explicitly programmed
# or referenced.
chains_we_need = (set(self.prog_chains.keys()) |
self.referenced_chains)
_log.debug("Chains we still need for some reason: %s",
chains_we_need)
self._chains_to_delete = chains_we_dont_want - chains_we_need
_log.debug("Chains we can delete: %s", self._chains_to_delete)
return self._chains_to_delete
@property
def referenced_chains(self):
"""
Set of chains referred to by other chains.
Does not include chains that are explicitly programmed but not
referenced by anything else.
"""
return set(self.requiring_chns.keys())
def _stub_drop_rules(chain):
"""
:return: List of rule fragments to replace the given chain with a
single drop rule.
"""
return ["--flush %s" % chain,
frules.commented_drop_fragment(chain,
'WARNING Missing chain DROP:')]
def _extract_our_chains(table, raw_ipt_save_output):
"""
Parses the output from iptables-save to extract the set of
felix-programmed chains.
"""
chains = set()
current_table = None
for line in raw_ipt_save_output.splitlines():
line = line.strip()
if line.startswith("*"):
current_table = line[1:]
elif line.startswith(":") and current_table == table:
chain = line[1:line.index(" ")]
if chain.startswith(FELIX_PREFIX):
chains.add(chain)
return chains
def _extract_our_unreffed_chains(raw_ipt_output):
"""
Parses the output from "ip(6)tables --list" to find the set of
felix-programmed chains that are not referenced.
"""
chains = set()
last_line = None
for line in raw_ipt_output.splitlines():
# Look for lines that look like this after a blank line.
# Chain ufw-user-output (1 references)
if ((not last_line or not last_line.strip()) and
line.startswith("Chain")):
if "policy" in line:
_log.debug("Skipping root-level chain")
continue
m = re.match(r'^Chain ([^ ]+) \((\d+).+\)', line)
assert m, "Regex failed to match Chain line %r" % line
chain_name = m.group(1)
ref_count = int(m.group(2))
_log.debug("Found chain %s, ref count %s", chain_name, ref_count)
if chain_name.startswith(FELIX_PREFIX) and ref_count == 0:
chains.add(chain_name)
last_line = line
return chains
def _parse_ipt_restore_error(input_lines, err):
"""
Parses the stderr output from an iptables-restore call.
:param input_lines: list of lines of input that we passed to
iptables-restore. (Used for debugging.)
:param str err: captures stderr from iptables-restore.
:return tuple[bool,str]: tuple, the first (bool) element indicates
whether the error is retryable; the second is a detail message.
"""
match = re.search(r"line (\d+) failed", err)
if match:
# Have a line number, work out if this was a commit
# failure, which is caused by concurrent access and is
# retryable.
line_number = int(match.group(1))
_log.debug("ip(6)tables-restore failure on line %s", line_number)
line_index = line_number - 1
offending_line = input_lines[line_index]
if offending_line.strip == "COMMIT":
return True, "COMMIT failed; likely concurrent access."
else:
return False, "Line %s failed: %s" % (line_number, offending_line)
else:
return False, "ip(6)tables-restore failed with output: %s" % err
class NothingToDo(Exception):
pass
class IptablesInconsistent(Exception):
pass
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import math
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.framework import Program, program_guard
class TestOneHotOp(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
depth = 10
depth_np = np.array(10).astype('int32')
dimension = 12
x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
for i in range(np.product(x.shape)):
out[i, x[i]] = 1.0
self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)}
self.outputs = {'Out': (out, x_lod)}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestOneHotOp_attr(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
depth = 10
dimension = 12
x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])
out = np.zeros(shape=(np.product(x.shape[:-1]), 1,
depth)).astype('float32')
for i in range(np.product(x.shape)):
out[i, 0, x[i]] = 1.0
self.inputs = {'X': (x, x_lod)}
self.attrs = {'dtype': int(core.VarDesc.VarType.FP32), 'depth': depth}
self.outputs = {'Out': (out, x_lod)}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestOneHotOp_default_dtype(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
depth = 10
depth_np = np.array(10).astype('int32')
dimension = 12
x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
for i in range(np.product(x.shape)):
out[i, x[i]] = 1.0
self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
self.attrs = {}
self.outputs = {'Out': (out, x_lod)}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestOneHotOp_default_dtype_attr(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
depth = 10
dimension = 12
x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])
out = np.zeros(shape=(np.product(x.shape[:-1]), 1,
depth)).astype('float32')
for i in range(np.product(x.shape)):
out[i, 0, x[i]] = 1.0
self.inputs = {'X': (x, x_lod)}
self.attrs = {'depth': depth}
self.outputs = {'Out': (out, x_lod)}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestOneHotOp_out_of_range(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
depth = 10
x_lod = [[4, 1, 3, 3]]
x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
self.inputs = {'X': (x, x_lod)}
self.attrs = {'depth': depth, 'allow_out_of_range': True}
self.outputs = {'Out': (out, x_lod)}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestOneHotOp_exception(unittest.TestCase):
def setUp(self):
self.op_type = 'one_hot_v2'
self.depth = 10
self.place = core.CPUPlace()
self.dimension = 12
self.x = core.LoDTensor()
x_lod = [[4, 1, 3, 3]]
data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))]
data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1])
self.x.set(data, self.place)
self.x.set_recursive_sequence_lengths(x_lod)
def test_check_output(self):
program = Program()
with program_guard(program):
x = fluid.layers.data(
name='x', shape=[self.dimension], dtype='float32', lod_level=1)
block = program.current_block()
one_hot_out = block.create_var(
name="one_hot_out",
type=core.VarDesc.VarType.LOD_TENSOR,
dtype='float32')
block.append_op(
type='one_hot',
inputs={'X': x},
attrs={'depth': self.depth},
outputs={'Out': one_hot_out})
exe = fluid.Executor(self.place)
def run():
exe.run(feed={'x': self.x},
fetch_list=[one_hot_out],
return_numpy=False)
self.assertRaises(ValueError, run)
class TestOneHotOpApi(unittest.TestCase):
def test_api(self):
depth = 10
self._run(depth)
def test_api_with_depthTensor(self):
depth = fluid.layers.assign(input=np.array([10], dtype=np.int32))
self._run(depth)
def test_api_with_dygraph(self):
depth = 10
label = np.array([np.random.randint(0, depth - 1)
for i in range(6)]).reshape([6, 1])
with fluid.dygraph.guard():
one_hot_label = fluid.one_hot(
input=fluid.dygraph.to_variable(label), depth=depth)
def _run(self, depth):
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
one_hot_label = fluid.one_hot(input=label, depth=depth)
place = fluid.CPUPlace()
label_data = np.array([np.random.randint(0, 10 - 1)
for i in range(6)]).reshape([6, 1])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
ret = exe.run(feed={'label': label_data, },
fetch_list=[one_hot_label],
return_numpy=False)
class BadInputTestOnehotV2(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
label = fluid.layers.data(
name="label",
shape=[4],
append_batch_size=False,
dtype="float32")
one_hot_label = fluid.one_hot(input=label, depth=4)
self.assertRaises(TypeError, test_bad_x)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
|
from django.db import connection, connections
from django.db.migrations.exceptions import (
AmbiguityError, InconsistentMigrationHistory, NodeNotFoundError,
)
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, modify_settings, override_settings
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
multi_db = True
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
{("myapp", "0432_ponies")},
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
{(x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App with migrations module file not in unmigrated apps."
)
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App missing __init__.py in migrations module not in unmigrated apps."
)
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
)
def test_marked_as_migrated(self):
"""
Undefined MIGRATION_MODULES implies default migration module.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, {'migrated_app'})
self.assertEqual(migration_loader.unmigrated_apps, set())
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={"migrated_app": None},
)
def test_marked_as_unmigrated(self):
"""
MIGRATION_MODULES allows disabling of migrations for a particular app.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={'migrated_app': 'missing-module'},
)
def test_explicit_missing_module(self):
"""
If a MIGRATION_MODULES override points to a missing module, the error
raised during the importation attempt should be propagated unless
`ignore_no_migrations=True`.
"""
with self.assertRaisesMessage(ImportError, 'missing-module'):
migration_loader = MigrationLoader(connection)
migration_loader = MigrationLoader(connection, ignore_no_migrations=True)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 to 5 cannot use the squashed migration
recorder.record_applied("migrations", "3_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "4_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps(self):
loader = MigrationLoader(connection)
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps_partially_applied(self):
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"})
def test_loading_squashed_erroneous(self):
"Tests loading a complex but erroneous set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 or 4, nonexistent migrations would be needed.
msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). "
"Django tried to replace migration migrations.5_auto with any of "
"[migrations.3_squashed_5] but wasn't able to because some of the replaced "
"migrations are already applied.")
recorder.record_applied("migrations", "3_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
recorder.record_applied("migrations", "4_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history(self):
loader = MigrationLoader(connection=None)
loader.check_consistent_history(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0002_second')
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial on database 'default'."
)
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
loader.check_consistent_history(connection)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed_extra'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history_squashed(self):
"""
MigrationLoader.check_consistent_history() should ignore unapplied
squashed migrations that have all of their `replaces` applied.
"""
loader = MigrationLoader(connection=None)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0001_initial')
recorder.record_applied('migrations', '0002_second')
loader.check_consistent_history(connection)
recorder.record_applied('migrations', '0003_third')
loader.check_consistent_history(connection)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_ref_squashed.app1",
"app2": "migrations.test_migrations_squashed_ref_squashed.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_ref_squashed.app1",
"migrations.test_migrations_squashed_ref_squashed.app2",
]})
def test_loading_squashed_ref_squashed(self):
"Tests loading a squashed migration with a new migration referencing it"
r"""
The sample migrations are structured like this:
app_1 1 --> 2 ---------------------*--> 3 *--> 4
\ / /
*-------------------*----/--> 2_sq_3 --*
\ / /
=============== \ ============= / == / ======================
app_2 *--> 1_sq_2 --* /
\ /
*--> 1 --> 2 --*
Where 2_sq_3 is a replacing migration for 2 and 3 in app_1,
as 1_sq_2 is a replacing migration for 1 and 2 in app_2.
"""
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Load with nothing applied: both migrations squashed.
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply a few from app1: unsquashes migration in app1.
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply one from app2: unsquashes migration in app2 too.
recorder.record_applied('app2', '1_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '2_auto'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains TF-Slim code for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments. Note that the training loop uses the
tf.train.Supervisor and its managed_session in its implementation to ensure the
ability of worker processes to recover from failures.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run training.
slim.learning.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to train, TF-Slim's train loop needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. slim.learning.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=4)
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
gradient_multipliers=gradient_multipliers)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. slim.learning.create_train_op allows
a user to pass in a list of update_ops to call along with the gradient updates.
train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops)
By default, slim.learning.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, TF-Slim's
slim.batch_norm function adds the moving mean and moving variance updates to
this collection. Consequently, users who want to use slim.batch_norm will not
need to take any additional steps in order to have the moving mean and moving
variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force TF-Slim NOT to use ANY update_ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use an alternative set of update ops in addition to the default updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = slim.learning.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
TF-Slim provides a convenient mechanism for doing so:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = slim.get_model_variables()
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint': slim.get_unique_variable('var0'),
'name_var_1_in_checkpoint': slim.get_unique_variable('var1')
}
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = slim.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = slim.get_variables_to_restore(exclude=["conv"])
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values from an arbitrary
source (a text document, matlab file, etc). While this is technically feasible
using plain TensorFlow, it also results in the values of your weights being
stored in the graph. For large models, this becomes prohibitively large. TF-Slim
allows you to perform this initial assignment without having to store the values
of the initial model in the graph itself by using placeholders and a feed
dictionary:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_assign_op, init_feed_dict = slim.assign_from_values(var_names_to_values)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.training.python.training import training
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import timeline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import supervisor
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
__all__ = [
'add_gradients_summaries', 'clip_gradient_norms', 'multiply_gradients',
'create_train_op', 'train_step', 'train'
]
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
multiplier = gradient_multipliers[key]
if not isinstance(multiplier, ops.Tensor):
multiplier = constant_op.constant(multiplier, dtype=grad.dtype)
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * multiplier
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '/gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '/gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
clip_gradient_norm=0,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
gradient_multipliers=None,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then slim.variables.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
a warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.trainable_variables().
clip_gradient_norm: If greater than 0 then the gradients would be clipped
by it.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
gradient_multipliers: A dictionary of either `Variables` or `Variable` op
names to the coefficient by which the associated gradient should be
scaled.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
def transform_grads_fn(grads):
if gradient_multipliers:
with ops.name_scope('multiply_grads'):
grads = multiply_gradients(grads, gradient_multipliers)
# Clip gradients.
if clip_gradient_norm > 0:
with ops.name_scope('clip_grads'):
grads = clip_gradient_norms(grads, clip_gradient_norm)
return grads
return training.create_train_op(
total_loss=total_loss,
optimizer=optimizer,
global_step=global_step,
update_ops=update_ops,
variables_to_train=variables_to_train,
transform_grads_fn=transform_grads_fn,
summarize_gradients=summarize_gradients,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
check_numerics=check_numerics)
def _wait_for_step(sess, global_step, step):
"""Wait till the global step has reached at least 'step'.
Args:
sess: A session.
global_step: A Tensor.
step: Int. The global step to reach.
"""
while True:
if training_util.global_step(sess, global_step) >= step:
break
time.sleep(1.0)
def train_step(sess, train_op, global_step, train_step_kwargs):
"""Function that takes a gradient step and specifies whether to stop.
Args:
sess: The current session.
train_op: An `Operation` that evaluates the gradients and returns the
total loss.
global_step: A `Tensor` representing the global training step.
train_step_kwargs: A dictionary of keyword arguments.
Returns:
The total loss and a boolean indicating whether or not to stop training.
Raises:
ValueError: if 'should_trace' is in `train_step_kwargs` but `logdir` is not.
"""
start_time = time.time()
trace_run_options = None
run_metadata = None
if 'should_trace' in train_step_kwargs:
if 'logdir' not in train_step_kwargs:
raise ValueError('logdir must be present in train_step_kwargs when '
'should_trace is present')
if sess.run(train_step_kwargs['should_trace']):
trace_run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
total_loss, np_global_step = sess.run([train_op, global_step],
options=trace_run_options,
run_metadata=run_metadata)
time_elapsed = time.time() - start_time
if run_metadata is not None:
tl = timeline.Timeline(run_metadata.step_stats)
trace = tl.generate_chrome_trace_format()
trace_filename = os.path.join(train_step_kwargs['logdir'],
'tf_trace-%d.json' % np_global_step)
logging.info('Writing trace to %s', trace_filename)
file_io.write_string_to_file(trace_filename, trace)
if 'summary_writer' in train_step_kwargs:
train_step_kwargs['summary_writer'].add_run_metadata(run_metadata,
'run_metadata-%d' %
np_global_step)
if 'should_log' in train_step_kwargs:
if sess.run(train_step_kwargs['should_log']):
logging.info('global step %d: loss = %.4f (%.3f sec/step)',
np_global_step, total_loss, time_elapsed)
# TODO(nsilberman): figure out why we can't put this into sess.run. The
# issue right now is that the stop check depends on the global step. The
# increment of global step often happens via the train op, which used
# created using optimizer.apply_gradients.
#
# Since running `train_op` causes the global step to be incremented, one
# would expected that using a control dependency would allow the
# should_stop check to be run in the same session.run call:
#
# with ops.control_dependencies([train_op]):
# should_stop_op = ...
#
# However, this actually seems not to work on certain platforms.
if 'should_stop' in train_step_kwargs:
should_stop = sess.run(train_step_kwargs['should_stop'])
else:
should_stop = False
return total_loss, should_stop
_USE_DEFAULT = 0
def train(train_op,
logdir,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is
greater than 'number_of_steps'. If the value is left as None, training
proceeds indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling `tf.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.local_variables_initializer()` and `tf.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None`
to indicate that no summaries should be written. If unset, we
create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer, or a list of
them. If the argument is supplied, gradient updates will be synchronous.
If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
if train_op is None:
raise ValueError('train_op cannot be None.')
if logdir is None:
if summary_op != _USE_DEFAULT:
raise ValueError('Cannot provide summary_op because logdir=None')
if saver is not None:
raise ValueError('Cannot provide saver because logdir=None')
if trace_every_n_steps is not None:
raise ValueError('Cannot provide trace_every_n_steps because '
'logdir=None')
if isinstance(sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
sync_optimizer = [sync_optimizer]
if sync_optimizer is not None and startup_delay_steps > 0:
raise ValueError(
'startup_delay_steps must be zero when sync_optimizer is supplied.')
if number_of_steps is not None and number_of_steps <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
graph = graph or ops.get_default_graph()
with graph.as_default():
if global_step is None:
global_step = variables.get_or_create_global_step()
saver = saver or tf_saver.Saver()
if sync_optimizer is not None:
for opt in sync_optimizer:
if not isinstance(opt, sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'`sync_optimizer` must be a tf.train.SyncReplicasOptimizer.')
with ops.name_scope('init_ops'):
if init_op == _USE_DEFAULT:
init_op = tf_variables.global_variables_initializer()
if ready_op == _USE_DEFAULT:
ready_op = tf_variables.report_uninitialized_variables()
if local_init_op == _USE_DEFAULT:
local_init_op = control_flow_ops.group(
tf_variables.local_variables_initializer(),
lookup_ops.tables_initializer())
if sync_optimizer is not None and isinstance(sync_optimizer, list):
with ops.control_dependencies([local_init_op] if local_init_op is
not None else []):
if is_chief:
local_init_op = control_flow_ops.group(
*[opt.chief_init_op for opt in sync_optimizer])
else:
local_init_op = control_flow_ops.group(
*[opt.local_step_init_op for opt in sync_optimizer])
ready_for_local_init_op = control_flow_ops.group(
*[opt.ready_for_local_init_op for opt in sync_optimizer])
else:
ready_for_local_init_op = None
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
if summary_writer == _USE_DEFAULT:
summary_writer = supervisor.Supervisor.USE_DEFAULT
if is_chief and sync_optimizer is not None:
# Need to create these BEFORE the supervisor finalizes the graph:
init_tokens_op = [opt.get_init_tokens_op() for opt in sync_optimizer]
chief_queue_runner = [
opt.get_chief_queue_runner() for opt in sync_optimizer]
if train_step_kwargs == _USE_DEFAULT:
with ops.name_scope('train_step'):
train_step_kwargs = {}
if number_of_steps:
should_stop_op = math_ops.greater_equal(global_step, number_of_steps)
else:
should_stop_op = constant_op.constant(False)
train_step_kwargs['should_stop'] = should_stop_op
if log_every_n_steps > 0:
train_step_kwargs['should_log'] = math_ops.equal(
math_ops.mod(global_step, log_every_n_steps), 0)
if is_chief and trace_every_n_steps is not None:
train_step_kwargs['should_trace'] = math_ops.equal(
math_ops.mod(global_step, trace_every_n_steps), 0)
train_step_kwargs['logdir'] = logdir
sv = supervisor.Supervisor(
graph=graph,
is_chief=is_chief,
logdir=logdir,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
ready_op=ready_op,
summary_op=summary_op,
summary_writer=summary_writer,
global_step=global_step,
saver=saver,
save_summaries_secs=save_summaries_secs,
save_model_secs=save_interval_secs,
init_fn=init_fn)
if summary_writer is not None:
train_step_kwargs['summary_writer'] = sv.summary_writer
should_retry = True
while should_retry:
try:
should_retry = False
with sv.managed_session(
master, start_standard_services=False, config=session_config) as sess:
logging.info('Starting Session.')
if is_chief:
if logdir:
sv.start_standard_services(sess)
elif startup_delay_steps > 0:
_wait_for_step(sess, global_step,
min(startup_delay_steps, number_of_steps or
sys.maxint))
threads = sv.start_queue_runners(sess)
logging.info('Starting Queues.')
if is_chief and sync_optimizer is not None:
sv.start_queue_runners(sess, chief_queue_runner)
sess.run(init_tokens_op)
try:
while not sv.should_stop():
total_loss, should_stop = train_step_fn(
sess, train_op, global_step, train_step_kwargs)
if should_stop:
logging.info('Stopping Training.')
sv.request_stop()
break
except errors.OutOfRangeError:
# OutOfRangeError is thrown when epoch limit per
# tf.train.limit_epochs is reached.
logging.info('Caught OutOfRangeError. Stopping Training.')
if logdir and sv.is_chief:
logging.info('Finished training! Saving model to disk.')
sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
sv.stop(threads, close_summary_writer=True)
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
logging.info('Retrying training!')
should_retry = True
return total_loss
|
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import numbers
from functools import partial
from queue import LifoQueue
from pyro import poutine
from pyro.infer.util import is_validation_enabled
from pyro.poutine import Trace
from pyro.poutine.enum_messenger import enumerate_site
from pyro.poutine.util import prune_subsample_sites
from pyro.util import check_model_guide_match, check_site_shape, ignore_jit_warnings
def iter_discrete_escape(trace, msg):
return (
(msg["type"] == "sample")
and (not msg["is_observed"])
and (msg["infer"].get("enumerate") == "sequential")
and (msg["name"] not in trace) # only sequential
)
def iter_discrete_extend(trace, site, **ignored):
values = enumerate_site(site)
enum_total = values.shape[0]
with ignore_jit_warnings(
[
"Converting a tensor to a Python index",
("Iterating over a tensor", RuntimeWarning),
]
):
values = iter(values)
for i, value in enumerate(values):
extended_site = site.copy()
extended_site["infer"] = site["infer"].copy()
extended_site["infer"]["_enum_total"] = enum_total
extended_site["value"] = value
extended_trace = trace.copy()
extended_trace.add_node(site["name"], **extended_site)
yield extended_trace
def get_importance_trace(
graph_type, max_plate_nesting, model, guide, args, kwargs, detach=False
):
"""
Returns a single trace from the guide, which can optionally be detached,
and the model that is run against it.
"""
guide_trace = poutine.trace(guide, graph_type=graph_type).get_trace(*args, **kwargs)
if detach:
guide_trace.detach_()
model_trace = poutine.trace(
poutine.replay(model, trace=guide_trace), graph_type=graph_type
).get_trace(*args, **kwargs)
if is_validation_enabled():
check_model_guide_match(model_trace, guide_trace, max_plate_nesting)
guide_trace = prune_subsample_sites(guide_trace)
model_trace = prune_subsample_sites(model_trace)
model_trace.compute_log_prob()
guide_trace.compute_score_parts()
if is_validation_enabled():
for site in model_trace.nodes.values():
if site["type"] == "sample":
check_site_shape(site, max_plate_nesting)
for site in guide_trace.nodes.values():
if site["type"] == "sample":
check_site_shape(site, max_plate_nesting)
return model_trace, guide_trace
def iter_discrete_traces(graph_type, fn, *args, **kwargs):
"""
Iterate over all discrete choices of a stochastic function.
When sampling continuous random variables, this behaves like `fn`.
When sampling discrete random variables, this iterates over all choices.
This yields traces scaled by the probability of the discrete choices made
in the `trace`.
:param str graph_type: The type of the graph, e.g. "flat" or "dense".
:param callable fn: A stochastic function.
:returns: An iterator over traces pairs.
"""
queue = LifoQueue()
queue.put(Trace())
traced_fn = poutine.trace(
poutine.queue(
fn, queue, escape_fn=iter_discrete_escape, extend_fn=iter_discrete_extend
),
graph_type=graph_type,
)
while not queue.empty():
yield traced_fn.get_trace(*args, **kwargs)
def _config_fn(default, expand, num_samples, tmc, site):
if site["type"] != "sample" or site["is_observed"]:
return {}
if type(site["fn"]).__name__ == "_Subsample":
return {}
if num_samples is not None:
return {
"enumerate": site["infer"].get("enumerate", default),
"num_samples": site["infer"].get("num_samples", num_samples),
"expand": site["infer"].get("expand", expand),
"tmc": site["infer"].get("tmc", tmc),
}
if getattr(site["fn"], "has_enumerate_support", False):
return {
"enumerate": site["infer"].get("enumerate", default),
"expand": site["infer"].get("expand", expand),
}
return {}
def _config_enumerate(default, expand, num_samples, tmc):
return partial(_config_fn, default, expand, num_samples, tmc)
def config_enumerate(
guide=None, default="parallel", expand=False, num_samples=None, tmc="diagonal"
):
"""
Configures enumeration for all relevant sites in a guide. This is mainly
used in conjunction with :class:`~pyro.infer.traceenum_elbo.TraceEnum_ELBO`.
When configuring for exhaustive enumeration of discrete variables, this
configures all sample sites whose distribution satisfies
``.has_enumerate_support == True``.
When configuring for local parallel Monte Carlo sampling via
``default="parallel", num_samples=n``, this configures all sample sites.
This does not overwrite existing annotations ``infer={"enumerate": ...}``.
This can be used as either a function::
guide = config_enumerate(guide)
or as a decorator::
@config_enumerate
def guide1(*args, **kwargs):
...
@config_enumerate(default="sequential", expand=True)
def guide2(*args, **kwargs):
...
:param callable guide: a pyro model that will be used as a guide in
:class:`~pyro.infer.svi.SVI`.
:param str default: Which enumerate strategy to use, one of
"sequential", "parallel", or None. Defaults to "parallel".
:param bool expand: Whether to expand enumerated sample values. See
:meth:`~pyro.distributions.Distribution.enumerate_support` for details.
This only applies to exhaustive enumeration, where ``num_samples=None``.
If ``num_samples`` is not ``None``, then this samples will always be
expanded.
:param num_samples: if not ``None``, use local Monte Carlo sampling rather
than exhaustive enumeration. This makes sense for both continuous and
discrete distributions.
:type num_samples: int or None
:param tmc: "mixture" or "diagonal" strategies to use in Tensor Monte Carlo
:type tmc: string or None
:return: an annotated guide
:rtype: callable
"""
if default not in ["sequential", "parallel", "flat", None]:
raise ValueError(
"Invalid default value. Expected 'sequential', 'parallel', or None, but got {}".format(
repr(default)
)
)
if expand not in [True, False]:
raise ValueError(
"Invalid expand value. Expected True or False, but got {}".format(
repr(expand)
)
)
if num_samples is not None:
if not (isinstance(num_samples, numbers.Number) and num_samples > 0):
raise ValueError(
"Invalid num_samples, expected None or positive integer, but got {}".format(
repr(num_samples)
)
)
if default == "sequential":
raise ValueError(
'Local sampling does not support "sequential" sampling; '
'use "parallel" sampling instead.'
)
if tmc == "full" and num_samples is not None and num_samples > 1:
# tmc strategies validated elsewhere (within enum handler)
expand = True
# Support usage as a decorator:
if guide is None:
return lambda guide: config_enumerate(
guide, default=default, expand=expand, num_samples=num_samples, tmc=tmc
)
return poutine.infer_config(
guide, config_fn=_config_enumerate(default, expand, num_samples, tmc)
)
|
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Indicators for network status."""
from makani.avionics.common import cvt
from makani.avionics.common import pack_avionics_messages
from makani.gs.monitor2.apps.layout import indicator
from makani.gs.monitor2.apps.layout import stoplights
from makani.gs.monitor2.apps.plugins import common
from makani.lib.python import struct_tree
# TODO: Find a global definition source for it.
_XLR_RSSI_WARNING_THRESHOLD = -112 + 20
# The Microhard pDDL radios support the joystick traffic without
# packet drops down to an RSSI of -92 dBm. The additional free space
# loss from perch to maximal glide range is 11 dB.
_PDDL_RSSI_WARNING_THRESHOLD = -92 + 11
def _IsSwitchCommsLinkUp(switch_stats, port):
return (switch_stats and (switch_stats.link_status_bits & (1 << port)) and
switch_stats.stats[port].rx_multicast_packet_rate > 0)
class BaseCommsStatusIndicator(indicator.BaseAttributeIndicator):
"""Base class for comms status."""
def __init__(self, name, node_a, port_a, node_b, port_b, message_type,
show_label, ignore_error=False):
super(BaseCommsStatusIndicator, self).__init__([
(message_type, node_a, 'switch_stats'),
(message_type, node_b, 'switch_stats'),
], name)
self._ignore_error = ignore_error
self._port_a = port_a
self._port_b = port_b
self._show_label = show_label
def _ShowLinkStatus(self, is_up):
return 'Up' if is_up else 'Down'
def _DictToString(self, results, item_length=10):
text = []
keys = sorted(results.keys())
if self._show_label:
text.append(' '.join(k.rjust(item_length) for k in keys))
text.append(' '.join(results[k].rjust(item_length) for k in keys))
return '\n'.join(text)
def _Filter(self, status_a, status_b):
results = {}
total_links = 0
total_up_links = 0
if self._port_a is None:
results['Link A'] = '--'
else:
is_a_up = _IsSwitchCommsLinkUp(status_a, self._port_a)
results['Link A'] = self._ShowLinkStatus(is_a_up)
total_links += 1
if is_a_up:
total_up_links += 1
if self._port_b is None:
results['Link B'] = '--'
else:
is_b_up = _IsSwitchCommsLinkUp(status_b, self._port_b)
results['Link B'] = self._ShowLinkStatus(is_b_up)
total_links += 1
if is_b_up:
total_up_links += 1
if self._ignore_error:
stoplight = stoplights.STOPLIGHT_ANY
else:
if total_links == 0:
stoplight = stoplights.STOPLIGHT_ANY
elif total_up_links == total_links:
stoplight = stoplights.STOPLIGHT_NORMAL
elif total_up_links == 0:
stoplight = stoplights.STOPLIGHT_ERROR
else:
stoplight = stoplights.STOPLIGHT_WARNING
return self._DictToString(results), stoplight
class CommsStatusPoFIndicator(BaseCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(CommsStatusPoFIndicator, self).__init__(
name, 'CsGsA', 20, 'CsGsB', 20, 'CoreSwitchSlowStatus', show_label,
ignore_error=True)
class CommsStatusEoPIndicator(BaseCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(CommsStatusEoPIndicator, self).__init__(
name, 'CsGsA', None, 'CsGsB', None, 'CoreSwitchSlowStatus', show_label,
ignore_error=True)
class CommsStatusWifiIndicator(BaseCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(CommsStatusWifiIndicator, self).__init__(
name, 'CsGsA', 22, 'CsGsB', 18, 'CoreSwitchSlowStatus', show_label)
class JoystickRadioStatusIndicator(indicator.BaseAttributeIndicator):
"""Joystick radio status."""
def __init__(self, name):
super(JoystickRadioStatusIndicator, self).__init__([
('JoystickMonitorStatus', 'JoystickA', 'microhard_status'),
('TetherDown', 'CsB', 'comms_status'),
], name)
def _HandleStatus(self, connected, rssi):
if connected:
if rssi < _PDDL_RSSI_WARNING_THRESHOLD:
stoplight = stoplights.STOPLIGHT_WARNING
else:
stoplight = stoplights.STOPLIGHT_NORMAL
return '% 4d' % rssi, stoplight
else:
return ' n/a', stoplights.STOPLIGHT_WARNING
def _Filter(self, down_status, up_status):
if struct_tree.IsValidElement(down_status):
text, down_stoplight = self._HandleStatus(
down_status.connected, down_status.rssi)
else:
text, down_stoplight = '--', stoplights.STOPLIGHT_WARNING
result = '%s dBm down, ' % text
if struct_tree.IsValidElement(up_status):
is_connected = (up_status.links_up &
pack_avionics_messages.kTetherCommsLinkJoystick)
text, up_stoplight = self._HandleStatus(
is_connected, up_status.received_signal_strength)
else:
text, up_stoplight = '--', stoplights.STOPLIGHT_WARNING
result += '%s dBm up' % text
stoplight = stoplights.MostSevereStoplight(down_stoplight, up_stoplight)
return result, stoplight
class TetherLongRangeRadioStatusIndicator(indicator.SingleAttributeIndicator):
def __init__(self, name):
super(TetherLongRangeRadioStatusIndicator, self).__init__(
('TetherDown', 'CsGsA'), name)
def _Filter(self, tether_down):
if not struct_tree.IsValidElement(tether_down):
return 'Link down', stoplights.STOPLIGHT_ERROR
down_signal_strength = tether_down.received_signal_strength
up_signal_strength = tether_down.comms_status.received_signal_strength
text = '% 4d dBm down, % 4d dBm up' % (
down_signal_strength, up_signal_strength)
if (down_signal_strength < _XLR_RSSI_WARNING_THRESHOLD or
up_signal_strength < _XLR_RSSI_WARNING_THRESHOLD):
stoplight = stoplights.STOPLIGHT_WARNING
else:
stoplight = stoplights.STOPLIGHT_NORMAL
return text, stoplight
class BaseTetherCommsStatusIndicator(indicator.BaseAttributeIndicator):
"""Base class for tether comms status."""
def __init__(self, name, link_type, sources_per_link, link_names, show_label,
ignore_error=False):
super(BaseTetherCommsStatusIndicator, self).__init__(
self._PackArguments(sources_per_link), name)
assert len(sources_per_link) == len(link_names)
self._sources_per_link = sources_per_link
self._link_names = link_names
self._link_type = link_type
self._show_label = show_label
self._ignore_error = ignore_error
def _PackArguments(self, sources_per_link):
"""Construct the list of arguments telling statuses of various links.
Args:
sources_per_link: A list of source list. Each source list contains
TetherDownSources for a particular link.
Returns:
The packed argument list is in the form of
[<status_link_0>, <valid_link_0>, <status_link_1>, <valid_link_1>, ...]
"""
attributes = []
for sources in sources_per_link:
for source in sources:
attributes.append(
('filtered', 'merge_tether_down', 'comms_status[%d]' % source))
attributes.append(
('filtered', 'merge_tether_down',
'comms_status_valid[%d]' % source))
return attributes
def _UnpackArguments(self, *attributes):
"""Unpack attributes to comms status, valid bits, and indices per link."""
comms_status = attributes[0:len(attributes):2]
valid = attributes[1:len(attributes):2]
# A dictionary of source indices in `comms_status` and `valid` arrays
# per link. E.g., comms_status[source_indice_per_link[0][1]] is the
# comms_status for source 1 of link 0.
source_indices_per_link = {}
source_idx = 0
for link_idx, sources in enumerate(self._sources_per_link):
source_indices_per_link[link_idx] = range(
source_idx, source_idx + len(sources))
source_idx += len(sources)
return comms_status, valid, source_indices_per_link
def _ShowLinkStatus(self, is_up):
return 'Up' if is_up else 'Down'
def _DictToString(self, results, item_length=10):
text = []
keys = sorted(results.keys())
if self._show_label:
text.append(' '.join(k.rjust(item_length) for k in keys))
text.append(' '.join(results[k].rjust(item_length) for k in keys))
return '\n'.join(text)
def _Filter(self, *attributes):
comms_status, valid, source_indices_per_link = self._UnpackArguments(
*attributes)
results = {}
total_links = 0
total_up_links = 0
# Iterate through all links in the order of _sources_per_link.
for link_index in range(len(self._sources_per_link)):
link_name = self._link_names[link_index]
link_up = False
for source_idx in source_indices_per_link[link_index]:
if (valid[source_idx] and comms_status[source_idx].no_update_count <
common.MAX_NO_UPDATE_COUNT_COMMS_STATUS):
link_up = comms_status[source_idx].links_up & self._link_type
break
# Links are regarded as DOWN, if comms_status is obsolete.
results[link_name] = self._ShowLinkStatus(link_up)
total_links += 1
total_up_links += (1 if link_up else 0)
if self._ignore_error:
stoplight = stoplights.STOPLIGHT_ANY
else:
if total_links == 0:
stoplight = stoplights.STOPLIGHT_ANY
elif total_up_links == total_links:
stoplight = stoplights.STOPLIGHT_NORMAL
elif total_up_links == 0:
stoplight = stoplights.STOPLIGHT_ERROR
else:
stoplight = stoplights.STOPLIGHT_WARNING
return self._DictToString(results), stoplight
class TetherCommsStatusPoFIndicator(BaseTetherCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(TetherCommsStatusPoFIndicator, self).__init__(
name, cvt.kTetherCommsLinkPof,
[[cvt.kTetherDownSourceCsGsA, cvt.kTetherDownSourceCsA],
[cvt.kTetherDownSourceCsB]], ['Link A', 'Link B'], show_label,
ignore_error=True)
class TetherCommsStatusEoPIndicator(BaseTetherCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(TetherCommsStatusEoPIndicator, self).__init__(
name, cvt.kTetherCommsLinkEop,
[[cvt.kTetherDownSourceCsGsA, cvt.kTetherDownSourceCsA],
[cvt.kTetherDownSourceCsB]], ['Link A', 'Link B'], show_label,
ignore_error=True)
class TetherCommsStatusWifiIndicator(BaseTetherCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(TetherCommsStatusWifiIndicator, self).__init__(
name, cvt.kTetherCommsLinkWifi,
[[cvt.kTetherDownSourceCsGsA, cvt.kTetherDownSourceCsA],
[cvt.kTetherDownSourceCsB]], ['Link A', 'Link B'], show_label)
|
|
#!/usr/bin/env python
"""
Cloud connected autonomous RC car.
Copyright 2016 Visible Energy Inc. All Rights Reserved.
"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Simple steering and throttle prediction from a trained model and compared with a telemetry image.
Usage:
./predict.py <DATA-DIR>
Arguments:
<DATA-DIR> image directory
"""
import cv2
import numpy as np
import sys
import os
import time
import math
from keras.models import model_from_json
from config import DataConfig
import utils
import ntpath
interactive = True
# show an image in a proper scale
def show_img(img):
screen_res = 320. * 2 , 240. * 2
scale_width = screen_res[0] / img.shape[1]
scale_height = screen_res[1] / img.shape[0]
scale = min(scale_width, scale_height)
window_width = int(img.shape[1] * scale)
window_height = int(img.shape[0] * scale)
cv2.namedWindow('dst1_rt', cv2.WINDOW_NORMAL)
cv2.resizeWindow('dst1_rt', window_width, window_height)
cv2.imshow('dst1_rt', img)
return
if __name__ == "__main__":
config = DataConfig()
try:
data_path = os.path.expanduser(sys.argv[1])
except Exception as e:
print(e, "Usage: ./predict.py <DATA-DIR>")
sys.exit(-1)
if not os.path.exists(data_path):
print("Directory %s not found." % data_path)
sys.exit(-1)
log = np.load('log.npy')
model = model_from_json(open("{}/autonomia_cnn.json".format(data_path)).read())
# Load model weights
model.load_weights("{}/autonomia_cnn.h5".format(data_path))
model.summary()
img_height, img_width, num_channels = config.img_resample_dim[0], config.img_resample_dim[1], config.num_channels
skip = config.skip_ahead
# open labels csv file (frame filename, steering, throttle)
#with open("{}/labels.csv".format(data_path)) as f:
#labels = f.readlines()
#nlabels = len(labels)
#print("found %d labels" % nlabels)
#out_file = open("{}/labels_pred.csv".format(data_path), 'w')
# Load model structure
model = model_from_json(open("{}/autonomia_cnn.json".format(data_path)).read())
# Load model weights
model.load_weights("{}/autonomia_cnn.h5".format(data_path))
model.summary()
img_height, img_width, num_channels = config.img_resample_dim[0], config.img_resample_dim[1], config.num_channels
skip = config.skip_ahead
for i in range(len(log)):
if i < skip:
continue
filename, steering, throttle= log[i][0], log[i][1], log[i][2]
print('***************** {} | {} | {}'.format(filename, steering, throttle))
steering = int(steering)
# throttle
throttle = int(throttle)
print(filename, steering, throttle)
# load image
img = cv2.imread(filename)
# convert to YCrCb
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
if num_channels == 1:
# extract and use Y plane only
X_img, _, _ = cv2.split(gray_img)
else:
# use YCrCb
X_img = gray_img
if interactive: show_img(X_img)
# crop image
X_img = X_img[config.ycrop_range[0]:config.ycrop_range[1], :]
# resample image
X_img = cv2.resize(X_img, config.img_resample_dim[::-1] , cv2.INTER_LINEAR)
# X_img is of shape (1,:,:,:)
X_img = X_img.reshape(1, img_height, img_width, num_channels)
# normalize the image values
X_img = X_img / 255.0 - 0.5
now = time.time()
# predict steering and throttle
steering = model.predict(X_img)
t = time.time() - now
print("execution time:", t)
# steering = np.argmax(p[:, :15], 1)
# throttle = np.argmax(p[:, 15:], 1)
# print p[0, :15]
# print p[0, 15:]
steering = steering + 90
print(steering)
#out_file.write("%s,%d\n" % (ntpath.basename(filename), steering))
if interactive:
key = cv2.waitKey(0)
if key == 27:
sys.exit(0)
'''
for i,line in enumerate(labels):
if i < skip:
continue
filename, steering, throttle= line.split(',')
# image filename
filename = data_path + '/' + filename
# steering
steering = int(steering)
# throttle
throttle = int(throttle)
print filename, steering, throttle, utils.steering2bucket(steering), utils.throttle2bucket(throttle)
# load image
img = cv2.imread(filename)
# convert to YCrCb
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
if num_channels == 1:
# extract and use Y plane only
X_img, _, _ = cv2.split(gray_img)
else:
# use YCrCb
X_img = gray_img
if interactive: show_img(X_img)
# crop image
X_img = X_img[config.img_yaxis_start:config.img_yaxis_end + 1, config.img_xaxis_start:config.img_xaxis_end + 1]
# resample image
X_img = cv2.resize(X_img, config.img_resample_dim, cv2.INTER_LINEAR)
# X_img is of shape (1,:,:,:)
X_img = X_img.reshape(1, img_height, img_width, num_channels)
# normalize the image values
X_img = X_img / 127.5 - 1
now = time.time()
# predict steering and throttle
steering, throttle = model.predict(X_img[0:1])
t = time.time() - now
print "execution time:", t
# steering = np.argmax(p[:, :15], 1)
# throttle = np.argmax(p[:, 15:], 1)
# print p[0, :15]
# print p[0, 15:]
steering = np.argmax(steering[0])
throttle = np.argmax(throttle[0])
print steering, throttle
steering = utils.bucket2steering(steering)
throttle = utils.bucket2throttle(throttle)
print steering, throttle
out_file.write("%s,%d,%d\n" % (ntpath.basename(filename), steering, throttle))
if interactive:
key = cv2.waitKey(0)
if key == 27:
sys.exit(0)
'''
|
|
import datetime
import logging
from nowin_core.database import tables
class BookingModel(object):
"""Model for managing server status
"""
def __init__(self, session, logger=None):
self.logger = logger
if self.logger is None:
self.logger = logging.getLogger(__name__)
self.session = session
def get_host_by_ip(self, host_ip):
"""Get host by IP
"""
host = self.session.query(tables.Host).filter_by(ip=host_ip).first()
return host
def get_server_table(self, type):
"""Get server table by type
"""
table_type = None
if type == 'proxy':
table_type = tables.Proxy
elif type == 'broadcast':
table_type = tables.Broadcast
return table_type
def get_server_by_name(self, server_name):
"""Get a server by name
"""
server = self.session \
.query(tables.Server) \
.filter_by(name=server_name) \
.first()
return server
def get_listener_count(self):
"""Get listener count for all radios
"""
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.functions import sum
OnAir = tables.OnAir
User = tables.User
Conn = tables.ProxyConnection
query = self.session \
.query(User.user_name, func.ifnull(sum(Conn.listener), 0)) \
.join((OnAir, OnAir.user_id == User.user_id)) \
.outerjoin((Conn, Conn.user_id == User.user_id)) \
.group_by(User.user_id)
radios = {}
for name, count in query:
radios[str(name)] = int(count)
return radios
def add_host(self, host_ip, name, alive, loading, supervisor_url=None):
"""Add a host and return
"""
host = tables.Host(
ip=host_ip,
name=name,
alive=alive,
loading=loading,
supervisor_url=supervisor_url,
online_time=tables.now_func(),
last_updated=tables.now_func(),
created=tables.now_func()
)
self.session.add(host)
self.session.flush()
host = self.get_host_by_ip(host_ip)
self.logger.info('Add host %s', host)
def update_host(self, host_ip, name, state, loading, supervisor_url):
"""Update state of host
"""
host = self.get_host_by_ip(host_ip)
if state == 'online':
host.online_time = tables.now_func()
host.alive = True
self.logger.info('Host %s goes online', host)
elif state == 'offline':
host.alive = False
self.logger.info('Host %s goes offline', host)
elif state == 'normal':
# Listen to Still alive: http://www.youtube.com/watch?v=Y6ljFaKRTrI
# :)
# the server back online
if not host.alive:
self.logger.info('Host %s is back online', host)
else:
self.logger.info('Host %s is still alive', host)
host.alive = True
host.name = name
host.loading = loading
host.supervisor_url = supervisor_url
host.last_updated = tables.now_func()
self.session.add(host)
self.session.flush()
def update_server(
self,
server_name,
type,
host,
state,
ports,
pid,
revision,
user_count,
user_limit,
resource_count,
resource_limit,
inbound_rate,
outbound_rate,
loading,
):
"""Update state of server
server_name
name of serverto update
type
type of server
state
current state of server heart beat
ports
ports of server, format should be
dict(port_name=port_address, ...)
pid
pid of process
revision
revision of server
user_count
count of user
user_limit
limit of user
resource_count
count of resource
resource_limit
limit of resource
inbound_rate
inbound bandwidth
outbound_rate
outbound bandwidth
loading
loading of server
radios
map of listener count on proxy server or
name of alive radios on braodcast server
"""
now = tables.now_func()
table_type = self.get_server_table(type)
server = self.session \
.query(table_type) \
.filter_by(name=server_name) \
.first()
if server is None:
server = table_type(name=server_name)
server.created = now
server.online_time = now
self.logger.info('Add server %r', server_name)
if state == 'online':
server.online_time = now
server.alive = True
self.logger.info('Server %r goes online',
server.name)
elif state == 'offline':
server.alive = False
self.logger.info('Server %r goes offline',
server.name)
elif state == 'normal':
# Listen to Still alive: http://www.youtube.com/watch?v=Y6ljFaKRTrI
# :)
if not server.alive:
self.logger.info('Server %r is back online',
server.name)
else:
self.logger.info('Server %r is still alive',
server.name)
server.alive = True
# get all old ports
old_ports = {}
for port in server.ports:
old_ports[port.name] = port
old_set = set(old_ports)
# get all new ports
new_ports = ports
new_set = set(new_ports)
# set of port to update
to_update = old_set & new_set
# set of port to delete
to_delete = old_set - to_update
# set of port to add
to_add = new_set - to_update
self.logger.debug('old: %s, new: %s', old_set, new_set)
self.logger.debug(
'to_update: %s, to_delete: %s, to_add: %s',
to_update, to_delete, to_add
)
# update old ports
for name in to_update:
port = old_ports[name]
port.address = new_ports[name]
self.session.add(port)
# delete outdate ports
for name in to_delete:
port = old_ports[name]
self.session.delete(port)
# add new ports
for name in to_add:
address = new_ports[name]
port = tables.Port(name=name, address=address)
server.ports.append(port)
self.session.add(port)
server.host = host
server.pid = pid
server.revision = revision
server.user_limit = user_limit
server.user_count = user_count
server.resource_limit = resource_limit
server.resource_count = resource_count
server.inbound_rate = inbound_rate
server.outbound_rate = outbound_rate
server.loading = loading
server.last_updated = now
self.session.add(server)
self.session.flush()
def update_proxy_connections(self, server_name, radios):
"""Update listener count of proxy connections
radios is a dict mapping radio user_name to listener count
"""
User = tables.User
ProxyConnection = tables.ProxyConnection
proxy = self.get_server_by_name(server_name)
if proxy is None:
msg = 'Update connection to non-exist server %s' % server_name
self.logger.error(msg)
raise Exception(msg)
# old proxy connections
old_conns = {}
for conn in proxy.connections:
old_conns[conn.user_id] = conn
old_set = set(old_conns)
# new proxy connections
new_conns = {}
for name, listener in radios.iteritems():
new_conns[name] = listener
new_set = set(new_conns)
# set of port to update
to_update = old_set & new_set
# set of port to delete
to_delete = old_set - to_update
# set of port to add
to_add = new_set - to_update
# update old connection
for name in to_update:
conn = old_conns[name]
conn.listener = new_conns[name]
self.session.add(conn)
# delete old connections
for name in to_delete:
conn = old_conns[name]
self.session.delete(conn)
# add new connections
if to_add:
users = self.session \
.query(User) \
.filter(User.user_name.in_(to_add))
user_map = {}
for user in users:
user_map[user.user_name] = user.user_id
for name in to_add:
conn = ProxyConnection()
conn.listener = new_conns[name]
conn.user_id = user_map[name]
proxy.connections.append(conn)
self.session.add(conn)
self.session.flush()
self.logger.info('Update %s connections on proxy %r',
len(radios), proxy)
def add_on_airs(self, server_name, radios):
"""Add on-air radios
"""
from sqlalchemy.orm import joinedload
if not radios:
self.logger.info('No radio gets offline')
return
server = self.session \
.query(tables.Broadcast) \
.filter_by(name=server_name) \
.first()
if not server:
self.logger.error('Radio gets online from a non-exist server %s',
server_name)
return
users = self.session \
.query(tables.User) \
.options(joinedload('on_air')) \
.filter(tables.User.user_name.in_(radios)) \
.all()
for user in users:
onair = user.on_air
if not onair:
onair = tables.OnAir()
else:
self.logger.warn('OnAir %s already exist', onair)
continue
onair.online_time = tables.now_func()
onair.server_id = server.id
user.on_air = onair
self.session.add(server)
self.session.flush()
self.logger.info('Add on-air %s of broadcast %s',
len(radios), server_name)
def update_on_airs(self, server_name, radios):
"""Update on-airs of a broadcast server
radios is a list of current online radio user name
"""
User = tables.User
OnAir = tables.OnAir
broadcast = self.get_server_by_name(server_name)
if broadcast is None:
msg = 'Update on-air to non-exist server %s' % server_name
self.logger.error(msg)
raise Exception(msg)
radios = map(unicode, radios)
# get old on-airs
old_on_airs = set([on_air.user_id for on_air in broadcast.on_airs])
# get new current on-airs
if radios:
users = self.session \
.query(User) \
.filter(User.user_name.in_(radios)) \
.all()
new_on_airs = set([user.user_id for user in users])
else:
new_on_airs = set()
# the set of on-air that we don't have to do anything with them
remain_on_airs = old_on_airs & new_on_airs
# to delete on-airs
to_delete = old_on_airs - remain_on_airs
# to add on_airs
to_add = new_on_airs - remain_on_airs
# delete off-line radios
if to_delete:
self.session \
.query(OnAir) \
.filter(OnAir.user_id.in_(to_delete)) \
.delete('fetch')
# add online radios
for user_id in to_add:
on_air = OnAir(
server_id=broadcast.id,
user_id=user_id,
online_time=tables.now_func()
)
self.session.add(on_air)
self.session.flush()
self.logger.info('Update on-air %s of broadcast %s',
len(radios), server_name)
def remove_on_airs(self, server_name, radios):
"""Remove on-air radios
"""
from sqlalchemy.orm import joinedload
if not radios:
self.logger.info('No radio gets offline')
return
server = self.session \
.query(tables.Broadcast) \
.filter_by(name=server_name) \
.first()
if not server:
self.logger.error('Radio gets offline with non-exist server %s ',
server_name)
return
users = self.session \
.query(tables.User) \
.options(joinedload('on_air')) \
.filter(tables.User.user_name.in_(radios)) \
.all()
for user in users:
onair = user.on_air
if not onair:
self.logger.error('OnAir broadcast=%s, user=%s does not exist',
server, user)
continue
self.session.delete(onair)
self.logger.info('Remove on-air %s', onair)
self.session.flush()
self.logger.info('Remove on-air %s of broadcast %s',
len(radios), server_name)
def get_source_address(self, user_name):
"""Get address of audio source of broadcast server
"""
from sqlalchemy.orm import joinedload
# make sure the user is online
user = self.session.query(tables.User). \
options(joinedload('on_air.broadcast')). \
filter_by(user_name=user_name).first()
if user is None or user.on_air is None:
self.logger.info('Radio %r is not on-air', user_name)
return
# get address of
port = self.session.query(tables.Port) \
.filter_by(server_id=user.on_air.broadcast.id, name='stream') \
.first()
if port is None:
self.logger.info('Cannot find source address for %s', user_name)
return
address = port.address
self.logger.info(
'Radio %r audio resource is on %r', user_name, address)
return address
def expire_hosts(self, timeout):
"""set hosts whose last_updated is before current - timeout as dead
"""
from sqlalchemy.sql.expression import func, text
Host = tables.Host
# delete out-dated hosts
# func.timestampadd()
now = tables.now_func()
if isinstance(now, datetime.datetime):
deadline = now - datetime.timedelta(seconds=timeout)
else:
deadline = func.timestampadd(text('second'), -timeout, now)
query = self.session \
.query(Host) \
.filter(Host.alive) \
.filter(Host.last_updated < deadline)
count = query.count()
query.update(dict(alive=False), False)
self.session.flush()
self.logger.info('Expire %s hosts', count)
def expire_servers(self, timeout):
"""set servers whose last_updated is before current - timeout as dead
"""
from sqlalchemy.sql.expression import func, text
Server = tables.Server
OnAir = tables.OnAir
now = tables.now_func()
if isinstance(now, datetime.datetime):
deadline = now - datetime.timedelta(seconds=timeout)
else:
deadline = func.timestampadd(text('second'), -timeout, now)
query = self.session \
.query(Server.id) \
.filter(Server.alive) \
.filter(Server.last_updated < deadline)
# delete on airs of broadcast server
broadcasts = query \
.filter_by(type='broadcast') \
.subquery()
self.session \
.query(OnAir) \
.filter(OnAir.server_id.in_(broadcasts)) \
.delete('fetch')
# update server state to dead
count = query.count()
query.update(dict(alive=False), False)
self.session.flush()
self.logger.info('Expire %s servers', count)
|
|
# coding=utf-8
from time import sleep
from fabric.api import *
# fix ntp so that it can be queried
@task
@parallel
@roles('c4all')
def fix_ntp():
run("sed -i 's/restrict default nomodify notrap noquery/restrict default nomodify notrap/g' /etc/ntp.conf")
run("service ntpd restart")
@task
@roles('c4all')
def check_ntp():
run("ntpdc -np")
# Ganglia
@task
@parallel
@roles('c4all')
def stop_gmond():
run('service gmond stop')
@task
@parallel
@roles('c4all')
def start_gmond():
run('service gmond start')
@task
@parallel
@roles('c4all')
def restart_gmond():
run('service gmond restart')
@task
@parallel
@roles('c4')
def add_prod_mount():
# run("sed -n '$!p' /etc/auto.mnt > /etc/tmp.txt && mv -f /etc/tmp.txt /etc/auto.mnt")
run("echo 'prod -nolock transit.gbif.org:/mnt/large/prod' >> /etc/auto.mnt")
run('service autofs restart')
@task
@parallel
@roles('c4')
def add_appdev_mount():
run("echo 'appdev -nolock transit.gbif.org:/mnt/large/appdev' >> /etc/auto.mnt")
run('service autofs restart')
@task
@parallel
@roles('c4')
def prod_unmount():
run('service autofs stop')
@task
@parallel
@roles('c4')
def prod_mount():
run('service autofs start')
@task
@parallel
@roles('c4')
def yumupdate():
run('yum -y update')
@task
@parallel
@roles('c4')
def reboot():
run('shutdown -r now')
@task
@parallel
@roles('newprodmasters')
def masteryumupdate():
run('yum -y update')
@task
@parallel
@roles('newprodmasters')
def masterreboot():
run('shutdown -r now')
@task
@parallel
@roles('c4')
def add_uat_mount():
# run("mv /etc/auto.mnt /etc/auto.mnt.old")
# run("echo 'prod -nolock transit.gbif.org:/mnt/large/prod' > /etc/auto.mnt")
# run("echo 'uat -nolock transit.gbif.org:/mnt/large/uat' >> /etc/auto.mnt")
# run("echo 'dev -nolock transit.gbif.org:/mnt/large/dev' >> /etc/auto.mnt")
# run('service autofs restart')
run("rm -f /etc/auto.mnt.old")
run("cd /mnt/auto/uat/occurrence-download && ls /mnt/auto/uat/occurrence-download")
@task
@parallel
@roles('c4all')
def switch_to_java8():
run('alternatives --set java /usr/lib/jvm/jre-1.8.0-openjdk.x86_64/bin/java')
@task
@parallel
@roles('c4')
def switch_to_java7():
run('alternatives --set java /usr/lib/jvm/jre-1.7.0-openjdk.x86_64/bin/java')
@task
@parallel
@roles('c4')
def switch_to_java6():
run('alternatives --set java /usr/lib/jvm/jre-1.6.0-sun/bin/java')
@task
@parallel
@roles('c4')
def remove_java6():
run('rpm -e jdk-1.6.0_31-fcs.x86_64')
run('rpm -e java-1.6.0-sun-1.6.0.45-1jpp.el6_gbif.x86_64')
@task
@parallel
@roles('c4all')
def install_java8():
run('yum -y install java-1.8.0-openjdk-src java-1.8.0-openjdk-devel')
@task
@parallel
@roles('c4all')
def switch_to_java8():
run('alternatives --set java /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.45-28.b13.el6_6.x86_64/jre/bin/java')
@task
@parallel
@roles('c4')
def install_jdk_dev_src():
run('yum -y install java-1.7.0-openjdk-src java-1.7.0-openjdk-devel')
@task
@parallel
@roles('c4')
def copy_jts():
# run('mkdir /opt/cloudera/auxjar')
run('cd /opt/cloudera/auxjar && hadoop fs -get /olivertmp/jts-1.13.jar')
@task
@parallel
@roles('c4slaves')
def distribute_jts():
put('/Users/oliver/java/zips/jts-1.13.jar', '/opt/cloudera/parcels/CDH/lib/solr/webapps/solr/WEB-INF/lib/')
@task
@roles('c4')
def ls_jts():
run('ls /opt/cloudera/auxjar')
@task
@parallel
@roles('c4all')
def restart_cloudera_agent():
run('service cloudera-scm-agent restart')
@task
@roles('newprodmasters')
def check_and_set_kernel_params():
# run('cat /etc/rc.local')
run('cat /sys/kernel/mm/redhat_transparent_hugepage/defrag')
run('echo 360448 > /proc/sys/vm/min_free_kbytes')
run('echo 1 > /proc/sys/vm/zone_reclaim_mode')
run('echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag')
run('cat /sys/kernel/mm/redhat_transparent_hugepage/defrag')
@task
@roles('newprodmasters')
def add_sysctl_params():
run('echo "vm.min_free_kbytes = 360448" >> /etc/sysctl.conf')
run('echo "vm.zone_reclaim_mode = 0" >> /etc/sysctl.conf')
run('echo "vm.swappiness = 0" >> /etc/sysctl.conf')
run("swapoff -a")
run("swapon -a")
@task
@roles('newprodmasters')
def clean_tmp():
run('rm -Rf /tmp/scm_prepare*')
run('rm -Rf /tmp/.scm_prepare*')
run('rm -Rf /tmp/cmflistener*')
@task
@parallel
@roles('c4')
def check_for_jts():
run('ls /opt/cloudera/parcels/SOLR/lib/solr/server/webapps/solr/WEB-INF/lib/jts-1.13.jar')
@task
@parallel
@roles('c4')
def set_swappiness():
run("sed -i 's/vm.swappiness = 1/vm.swappiness = 0/g' /etc/sysctl.conf")
run("sysctl vm.swappiness=0")
run("swapoff -a")
run("swapon -a")
@task
def clean_elasticsearch_logs():
import json
import urllib2
import httplib
from datetime import datetime, timedelta
print('Deleting logstash indices that are more than 1 month old')
today = datetime.now()
lastMonth = monthdelta(today, -1)
# twoMonthsAgo = monthdelta(today, -2)
data = json.load(urllib2.urlopen('http://b6g8.gbif.org:9200/_status'))
for index in data['indices']:
if index.find(formatDateString(today.year, today.month)) < 0 and index.find(formatDateString(lastMonth.year, lastMonth.month)) < 0:
#and index.find(formatDateString(twoMonthsAgo.year, twoMonthsAgo.month)) < 0:
connection = httplib.HTTPConnection('b6g8.gbif.org:9200')
connection.request('DELETE', '/%s/' % index)
response = connection.getresponse()
print('Deleting index %s: %s, %s' % (index, response.status, response.reason))
def formatDateString(year, month):
dateString = str(year) + '.'
if month < 10:
dateString = dateString + str(0)
dateString = dateString + str(month)
return dateString
def monthdelta(date, delta):
m, y = (date.month+delta) % 12, date.year + ((date.month)+delta-1) // 12
if not m: m = 12
d = min(date.day, [31,
29 if y%4==0 and not y%400==0 else 28,31,30,31,30,31,31,30,31,30,31][m-1])
return date.replace(day=d,month=m, year=y)
|
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2021, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Monads with syntactic sugar.
All monads share the following API::
# for a given monad Monad
# Turns a regular function into a function returning an instance of Monad,
# and able to consume monadic values. Similar to liftM in Haskell.
@Monad.lift
async def foo(x, y):
# Inside a decorated function, await can be used to "extract" the value
# contained in the monad, like ``<-`` in Haskell.
z = await Monad.something()
return x
# Equivalent to
@Monad.lift
async def foo(x, y):
# note: await is used automatically if x is an instance of Monad
x_ = await x
y_ = await y
# Monad.pure() is called if x_ is not an instance of Monad
return Monad.pure(x_)
This allow composing lifted functions easily
.. note:: There currently is no overridable ``bind`` operation, since nothing
in Python currently allows getting a proper continuation without explicit
manipulations of lambdas. The closest thing that is used is coroutine
functions, where ``await`` somewhat provides a continuation using
``coroutine.send()``. The limitation comes from that it can only be called
at most once (preventing anything like the list monad). Early-return
control flow such as the maybe monad are typically not necessary as Python
has exceptions already.
.. note:: ``async/await`` is used as syntactic sugar instead of ``yield`` since
the grammar works better for ``await``. ``yield`` cannot be used in
comprehensions, which prevents some idiomatic functional patterns based on
generator expressions.
"""
import abc
import functools
import inspect
class StateMonad(abc.ABC):
"""
The state monad.
:param f: Callable that takes the state as parameter and returns an
instance of the monad.
:type f: collections.abc.Callable
"""
def __init__(self, f):
self._f = f
def __await__(self):
# What happens here is executed as if the code was inlined in the
# coroutine's body ("await x" is actually equivalent to
# "yield from x"):
# 1. "yield self" allows to relinquish control back to the loop,
# providing the monadic value that was awaited on by the user code.
# 2. Returning the result of the yield allows the loop to inject any
# value it sees fit using coro.send().
return (yield self)
def __call__(self, *args, **kwargs):
state = self.make_state(*args, **kwargs)
x, _ = self._f(state)
return x
def __init_subclass__(cls, **kwargs):
# The one inheriting directly from StateMonad is the base of the
# hierarchy
if StateMonad in cls.__bases__:
cls._MONAD_BASE = cls
super().__init_subclass__(**kwargs)
@classmethod
def from_f(cls, *args, **kwargs):
"""
Build an instance of the monad from a state transformation function.
The callback takes the current state as parameter and returns
``tuple(value, new_state)``.
"""
return cls._MONAD_BASE(*args, **kwargs)
@abc.abstractclassmethod
def make_state(cls, *args, **kwargs):
"""
Create the state from user-defined parameters. This is used by
:meth:`lisa.monad.StateMonad.__call__` in order to initialize the
state.
"""
pass
@classmethod
def pure(cls, x):
"""
Lift a value in the state monad, i.e. create a monad instance with a
function that returns the value and the state unchanged.
"""
return cls.from_f(lambda state: (x, state))
@classmethod
def lift(cls, f):
"""
Decorator used to lift a function into the monad, such that it can take
monadic parameters that will be evaluated in the current state, and
returns a monadic value as well.
"""
cls = cls._MONAD_BASE
def run(_f, args, kwargs):
call = lambda: _f(*args, **kwargs)
x = call()
if inspect.iscoroutine(x):
def body(state):
if inspect.getcoroutinestate(x) == inspect.CORO_CLOSED:
_x = call()
else:
_x = x
next_ = lambda: _x.send(None)
while True:
try:
future = next_()
except StopIteration as e:
val = e.value
break
else:
assert isinstance(future, cls)
try:
val, state = future._f(state)
except Exception as e:
# We need an intermediate variable here, since
# "e" is not really bound in this scope.
excep = e
next_ = lambda: _x.throw(excep)
else:
next_ = lambda: _x.send(val)
if isinstance(val, cls):
return val._f(state)
else:
return (val, state)
val = cls.from_f(body, name=f.__qualname__)
else:
if isinstance(x, cls):
val = x
else:
val = cls.pure(x)
return val
@functools.wraps(f)
def wrapper(*args, **kwargs):
async def _f(*args, **kwargs):
args = [
(await arg) if isinstance(arg, cls) else arg
for arg in args
]
kwargs = {
k: (await v) if isinstance(v, cls) else v
for k, v in kwargs.items()
}
return run(f, args, kwargs)
return run(_f, args, kwargs)
return wrapper
@classmethod
def get_state(cls):
"""
Returns a monadic value making the current state available.
To be used inside a lifted function using::
state = await StateMonad.get_state()
"""
return cls.from_f(lambda state: (state, state))
@classmethod
def set_state(cls, new_state):
"""
Returns a monadic value to set the current state.
To be used inside a lifted function using::
await StateMonad.set_state(new_state)
"""
return cls.from_f(lambda state: (state, new_state))
@classmethod
def modify_state(cls, f):
"""
Returns a monadic value to modify the current state.
To be used inside a lifted function using::
await StateMonad.modify_state(lambda state: new_state)
"""
def _f(state):
new_state = f(state)
return (new_state, new_state)
return cls.from_f(_f)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/graceful-restart/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration options for BGP graceful-restart
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"graceful-restart",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/config/enabled (boolean)
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/graceful-restart/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration options for BGP graceful-restart
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"graceful-restart",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/config/enabled (boolean)
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import argh
import os.path
import collections
import random
import re
import shutil
import socket
import sys
import tempfile
import time
import cloud_logging
from tqdm import tqdm
import gzip
import numpy as np
import tensorflow as tf
from tensorflow import gfile
import go
import dual_net
from gtp_wrapper import make_gtp_instance, MCTSPlayer
import preprocessing
import selfplay_mcts
from utils import logged_timer as timer
import evaluation
import sgf_wrapper
import utils
import qmeas
import goparams
# How many positions we should aggregate per 'chunk'.
EXAMPLES_PER_RECORD = goparams.EXAMPLES_PER_RECORD
# How many positions to draw from for our training window.
# AGZ used the most recent 500k games, which, assuming 250 moves/game = 125M
# WINDOW_SIZE = 125000000
#WINDOW_SIZE = 500000
WINDOW_SIZE = goparams.WINDOW_SIZE
def _ensure_dir_exists(directory):
if directory.startswith('gs://'):
return
os.makedirs(directory, exist_ok=True)
def gtp(load_file: "The path to the network model files"=None,
readouts: 'How many simulations to run per move'=10000,
#readouts: 'How many simulations to run per move'=2000,
cgos_mode: 'Whether to use CGOS time constraints'=False,
verbose=1):
engine = make_gtp_instance(load_file,
readouts_per_move=readouts,
verbosity=verbose,
cgos_mode=cgos_mode)
sys.stderr.write("GTP engine ready\n")
sys.stderr.flush()
while not engine.disconnect:
inpt = input()
# handle either single lines at a time
# or multiple commands separated by '\n'
try:
cmd_list = inpt.split("\n")
except:
cmd_list = [inpt]
for cmd in cmd_list:
engine_reply = engine.send(cmd)
sys.stdout.write(engine_reply)
sys.stdout.flush()
def bootstrap(
working_dir: 'tf.estimator working directory. If not set, defaults to a random tmp dir'=None,
model_save_path: 'Where to export the first bootstrapped generation'=None):
qmeas.start_time('bootstrap')
if working_dir is None:
with tempfile.TemporaryDirectory() as working_dir:
_ensure_dir_exists(working_dir)
_ensure_dir_exists(os.path.dirname(model_save_path))
dual_net.bootstrap(working_dir)
dual_net.export_model(working_dir, model_save_path)
else:
_ensure_dir_exists(working_dir)
_ensure_dir_exists(os.path.dirname(model_save_path))
dual_net.bootstrap(working_dir)
dual_net.export_model(working_dir, model_save_path)
qmeas.stop_time('bootstrap')
def train(
working_dir: 'tf.estimator working directory.',
chunk_dir: 'Directory where gathered training chunks are.',
model_save_path: 'Where to export the completed generation.',
generation_num: 'Which generation you are training.'=0):
qmeas.start_time('train')
tf_records = sorted(gfile.Glob(os.path.join(chunk_dir, '*.tfrecord.zz')))
tf_records = tf_records[-1 * (WINDOW_SIZE // EXAMPLES_PER_RECORD):]
print("Training from:", tf_records[0], "to", tf_records[-1])
with timer("Training"):
dual_net.train(working_dir, tf_records, generation_num)
dual_net.export_model(working_dir, model_save_path)
qmeas.stop_time('train')
def validate(
working_dir: 'tf.estimator working directory',
*tf_record_dirs: 'Directories where holdout data are',
checkpoint_name: 'Which checkpoint to evaluate (None=latest)'=None,
validate_name: 'Name for validation set (i.e., selfplay or human)'=None):
qmeas.start_time('validate')
tf_records = []
with timer("Building lists of holdout files"):
for record_dir in tf_record_dirs:
tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))
first_record = os.path.basename(tf_records[0])
last_record = os.path.basename(tf_records[-1])
with timer("Validating from {} to {}".format(first_record, last_record)):
dual_net.validate(
working_dir, tf_records, checkpoint_name=checkpoint_name,
name=validate_name)
qmeas.stop_time('validate')
def evaluate(
black_model: 'The path to the model to play black',
white_model: 'The path to the model to play white',
output_dir: 'Where to write the evaluation results'='sgf/evaluate',
readouts: 'How many readouts to make per move.'=200,
games: 'the number of games to play'=20,
verbose: 'How verbose the players should be (see selfplay)' = 1):
qmeas.start_time('evaluate')
_ensure_dir_exists(output_dir)
with timer("Loading weights"):
black_net = dual_net.DualNetwork(black_model)
white_net = dual_net.DualNetwork(white_model)
winners = []
with timer("%d games" % games):
winners = evaluation.play_match(
black_net, white_net, games, readouts, output_dir, verbose)
qmeas.stop_time('evaluate')
white_count = 0
for win in winners:
if 'W' in win or 'w' in win:
white_count += 1
return white_count * 1.0 / games
# qmeas.report_profiler()
def evaluate_both(
prev_model: 'The path to previous model',
cur_model: 'The path to current model',
output_dir: 'Where to write the evaluation results'='sgf/evaluate',
readouts: 'How many readouts to make per move.'=200,
games: 'the number of games to play'=20,
verbose: 'How verbose the players should be (see selfplay)' = 1):
qmeas.start_time('evaluate')
_ensure_dir_exists(output_dir)
winners = []
with timer("%d games" % games):
winners = evaluation.play_match_many_instance_both(
prev_model, cur_model, games, readouts, output_dir, verbose)
qmeas.stop_time('evaluate')
white_count = 0
for win in winners:
if 'W' in win or 'w' in win:
white_count += 1
return white_count * 1.0 / (games*2)
# qmeas.report_profiler()
def evaluate_evenly(
black_model: 'The path to the model to play black',
white_model: 'The path to the model to play white',
output_dir: 'Where to write the evaluation results'='sgf/evaluate',
readouts: 'How many readouts to make per move.'=200,
games: 'the number of games to play'=20,
verbose: 'How verbose the players should be (see selfplay)' = 1):
''' Returns the white win rate; playes 'games' number of games on both sides. '''
try:
result = (evaluate(black_model, white_model, output_dir, readouts, games, verbose) + (1 - evaluate(white_model, black_model, output_dir, readouts, games, verbose)))/ 2.0
except TypeError:
# It is remotely possible that in weird twist of fate results in a type
# error... Possibly due to weird corner cases in the evaluation...
# Our fall back will be to try agian.
result = (evaluate(black_model, white_model, output_dir, readouts, games, verbose) + (1 - evaluate(white_model, black_model, output_dir, readouts, games, verbose)))/ 2.0
# should this really happen twice, the world really doesn't
# want this to be successful... and we will raise the error.
# If this is being run by the main loop harness, then the
# effect of raising here will be to keep the newest model and go back to
# selfplay.
return result
def evaluate_evenly_many(
prev_model: 'The path to previous model',
cur_model: 'The path to current model',
output_dir: 'Where to write the evaluation results'='sgf/evaluate',
readouts: 'How many readouts to make per move.'=200,
games: 'the number of games to play'=20,
verbose: 'How verbose the players should be (see selfplay)' = 1):
''' Returns the white win rate; playes 'games' number of games on both sides. '''
try:
result = evaluate_both(prev_model, cur_model, output_dir, readouts, games, verbose)
except TypeError:
# It is remotely possible that in weird twist of fate results in a type
# error... Possibly due to weird corner cases in the evaluation...
# Our fall back will be to try agian.
result = evaluate_both(prev_model, cur_model, output_dir, readouts, games, verbose)
# should this really happen twice, the world really doesn't
# want this to be successful... and we will raise the error.
# If this is being run by the main loop harness, then the
# effect of raising here will be to keep the newest model and go back to
# selfplay.
return result
def selfplay(
load_file: "The path to the network model files",
output_dir: "Where to write the games"="data/selfplay",
holdout_dir: "Where to write the games"="data/holdout",
output_sgf: "Where to write the sgfs"="sgf/",
readouts: 'How many simulations to run per move'=100,
verbose: '>=2 will print debug info, >=3 will print boards' = 1,
resign_threshold: 'absolute value of threshold to resign at' = 0.95,
holdout_pct: 'how many games to hold out for validation' = 0.05):
qmeas.start_time('selfplay')
clean_sgf = os.path.join(output_sgf, 'clean')
full_sgf = os.path.join(output_sgf, 'full')
_ensure_dir_exists(clean_sgf)
_ensure_dir_exists(full_sgf)
_ensure_dir_exists(output_dir)
_ensure_dir_exists(holdout_dir)
with timer("Loading weights from %s ... " % load_file):
network = dual_net.DualNetwork(load_file)
with timer("Playing game"):
player = selfplay_mcts.play(
network, readouts, resign_threshold, verbose)
output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())
game_data = player.extract_data()
with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:
f.write(player.to_sgf(use_comments=False))
with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:
f.write(player.to_sgf())
tf_examples = preprocessing.make_dataset_from_selfplay(game_data)
# Hold out 5% of games for evaluation.
if random.random() < holdout_pct:
fname = os.path.join(holdout_dir, "{}.tfrecord.zz".format(output_name))
else:
fname = os.path.join(output_dir, "{}.tfrecord.zz".format(output_name))
preprocessing.write_tf_examples(fname, tf_examples)
qmeas.stop_time('selfplay')
def selfplay_cache_model(
network: "The path to the network model files",
output_dir: "Where to write the games"="data/selfplay",
holdout_dir: "Where to write the games"="data/holdout",
output_sgf: "Where to write the sgfs"="sgf/",
readouts: 'How many simulations to run per move'=100,
verbose: '>=2 will print debug info, >=3 will print boards' = 1,
resign_threshold: 'absolute value of threshold to resign at' = 0.95,
holdout_pct: 'how many games to hold out for validation' = 0.05):
qmeas.start_time('selfplay')
clean_sgf = os.path.join(output_sgf, 'clean')
full_sgf = os.path.join(output_sgf, 'full')
_ensure_dir_exists(clean_sgf)
_ensure_dir_exists(full_sgf)
_ensure_dir_exists(output_dir)
_ensure_dir_exists(holdout_dir)
with timer("Playing game"):
player = selfplay_mcts.play(
network, readouts, resign_threshold, verbose)
output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())
game_data = player.extract_data()
with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:
f.write(player.to_sgf(use_comments=False))
with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:
f.write(player.to_sgf())
tf_examples = preprocessing.make_dataset_from_selfplay(game_data)
# Hold out 5% of games for evaluation.
if random.random() < holdout_pct:
fname = os.path.join(holdout_dir, "{}.tfrecord.zz".format(output_name))
else:
fname = os.path.join(output_dir, "{}.tfrecord.zz".format(output_name))
preprocessing.write_tf_examples(fname, tf_examples)
qmeas.stop_time('selfplay')
def gather(
input_directory: 'where to look for games'='data/selfplay/',
output_directory: 'where to put collected games'='data/training_chunks/',
examples_per_record: 'how many tf.examples to gather in each chunk'=EXAMPLES_PER_RECORD):
qmeas.start_time('gather')
_ensure_dir_exists(output_directory)
models = [model_dir.strip('/')
for model_dir in sorted(gfile.ListDirectory(input_directory))[-50:]]
with timer("Finding existing tfrecords..."):
model_gamedata = {
model: gfile.Glob(
os.path.join(input_directory, model, '*.tfrecord.zz'))
for model in models
}
print("Found %d models" % len(models))
for model_name, record_files in sorted(model_gamedata.items()):
print(" %s: %s files" % (model_name, len(record_files)))
meta_file = os.path.join(output_directory, 'meta.txt')
try:
with gfile.GFile(meta_file, 'r') as f:
already_processed = set(f.read().split())
except tf.errors.NotFoundError:
already_processed = set()
num_already_processed = len(already_processed)
for model_name, record_files in sorted(model_gamedata.items()):
if set(record_files) <= already_processed:
continue
print("Gathering files for %s:" % model_name)
for i, example_batch in enumerate(
tqdm(preprocessing.shuffle_tf_examples(examples_per_record, record_files))):
output_record = os.path.join(output_directory,
'{}-{}.tfrecord.zz'.format(model_name, str(i)))
preprocessing.write_tf_examples(
output_record, example_batch, serialize=False)
already_processed.update(record_files)
print("Processed %s new files" %
(len(already_processed) - num_already_processed))
with gfile.GFile(meta_file, 'w') as f:
f.write('\n'.join(sorted(already_processed)))
qmeas.stop_time('gather')
parser = argparse.ArgumentParser()
argh.add_commands(parser, [gtp, bootstrap, train,
selfplay, gather, evaluate, validate])
if __name__ == '__main__':
cloud_logging.configure()
argh.dispatch(parser)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""sedmodel.py - classes and methods for storing parameters and predicting
observed spectra and photometry from them, given a Source object.
"""
import numpy as np
import os
from numpy.polynomial.chebyshev import chebval, chebvander
from .parameters import ProspectorParams
from scipy.stats import multivariate_normal as mvn
from sedpy.observate import getSED
from ..sources.constants import to_cgs_at_10pc as to_cgs
from ..sources.constants import cosmo, lightspeed, ckms, jansky_cgs
from ..utils.smoothing import smoothspec
__all__ = ["SpecModel", "PolySpecModel",
"SedModel", "PolySedModel", "PolyFitModel"]
class SedModel(ProspectorParams):
"""A subclass of :py:class:`ProspectorParams` that passes the models
through to an ``sps`` object and returns spectra and photometry, including
optional spectroscopic calibration and sky emission.
"""
def predict(self, theta, obs=None, sps=None, **extras):
"""Given a ``theta`` vector, generate a spectrum, photometry, and any
extras (e.g. stellar mass), including any calibration effects.
:param theta:
ndarray of parameter values, of shape ``(ndim,)``
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:func:`utils.obsutils.rectify_obs`
:param sps:
An `sps` object to be used in the model generation. It must have
the :py:func:`get_spectrum` method defined.
:param sigma_spec: (optional, unused)
The covariance matrix for the spectral noise. It is only used for
emission line marginalization.
:returns spec:
The model spectrum for these parameters, at the wavelengths
specified by ``obs['wavelength']``, including multiplication by the
calibration vector. Units of maggies
:returns phot:
The model photometry for these parameters, for the filters
specified in ``obs['filters']``. Units of maggies.
:returns extras:
Any extra aspects of the model that are returned. Typically this
will be `mfrac` the ratio of the surviving stellar mass to the
stellar mass formed.
"""
s, p, x = self.sed(theta, obs, sps=sps, **extras)
self._speccal = self.spec_calibration(obs=obs, **extras)
if obs.get('logify_spectrum', False):
s = np.log(s) + np.log(self._speccal)
else:
s *= self._speccal
return s, p, x
def sed(self, theta, obs=None, sps=None, **kwargs):
"""Given a vector of parameters ``theta``, generate a spectrum, photometry,
and any extras (e.g. surviving mass fraction), ***not** including any
instrument calibration effects. The intrinsic spectrum thus produced is
cached in `_spec` attribute
:param theta:
ndarray of parameter values.
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:func:`utils.obsutils.rectify_obs`
:param sps:
An `sps` object to be used in the model generation. It must have
the :py:func:`get_spectrum` method defined.
:returns spec:
The model spectrum for these parameters, at the wavelengths
specified by ``obs['wavelength']``. Default units are maggies, and
the calibration vector is **not** applied.
:returns phot:
The model photometry for these parameters, for the filters
specified in ``obs['filters']``. Units are maggies.
:returns extras:
Any extra aspects of the model that are returned. Typically this
will be `mfrac` the ratio of the surviving stellar mass to the
steallr mass formed.
"""
self.set_parameters(theta)
spec, phot, extras = sps.get_spectrum(outwave=obs['wavelength'],
filters=obs['filters'],
component=obs.get('component', -1),
lnwavegrid=obs.get('lnwavegrid', None),
**self.params)
spec *= obs.get('normalization_guess', 1.0)
# Remove negative fluxes.
try:
tiny = 1.0 / len(spec) * spec[spec > 0].min()
spec[spec < tiny] = tiny
except:
pass
spec = (spec + self.sky(obs))
self._spec = spec.copy()
return spec, phot, extras
def sky(self, obs):
"""Model for the *additive* sky emission/absorption"""
return 0.
def spec_calibration(self, theta=None, obs=None, **kwargs):
"""Implements an overall scaling of the spectrum, given by the
parameter ``'spec_norm'``
:returns cal: (float)
A scalar multiplicative factor that gives the ratio between the true
spectrum and the observed spectrum
"""
if theta is not None:
self.set_parameters(theta)
return 1.0 * self.params.get('spec_norm', 1.0)
def wave_to_x(self, wavelength=None, mask=slice(None), **extras):
"""Map unmasked wavelengths to the interval (-1, 1). Masked wavelengths may have x>1, x<-1
:param wavelength:
The input wavelengths. ndarray of shape ``(nwave,)``
:param mask: optional
The mask. slice or boolean array with ``True`` for unmasked elements.
The interval (-1, 1) will be defined only by unmasked wavelength points
:returns x:
The wavelength vector, remapped to the interval (-1, 1).
ndarray of same shape as ``wavelength``
"""
x = wavelength - (wavelength[mask]).min()
x = 2.0 * (x / (x[mask]).max()) - 1.0
return x
def mean_model(self, theta, obs, sps=None, sigma_spec=None, **extras):
"""Legacy wrapper around predict()
"""
return self.predict(theta, obs, sps=sps, sigma=sigma_spec, **extras)
class SpecModel(ProspectorParams):
"""A subclass of :py:class:`ProspectorParams` that passes the models
through to an ``sps`` object and returns spectra and photometry, including
optional spectroscopic calibration, and sky emission.
This class performs most of the conversion from intrinsic model spectrum to
observed quantities, and additionally can compute MAP emission line values
and penalties for marginalization over emission line amplitudes.
"""
def predict(self, theta, obs=None, sps=None, sigma_spec=None, **extras):
"""Given a ``theta`` vector, generate a spectrum, photometry, and any
extras (e.g. stellar mass), including any calibration effects.
:param theta:
ndarray of parameter values, of shape ``(ndim,)``
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:func:`utils.obsutils.rectify_obs`
:param sps:
An `sps` object to be used in the model generation. It must have
the :py:func:`get_galaxy_spectrum` method defined.
:param sigma_spec: (optional)
The covariance matrix for the spectral noise. It is only used for
emission line marginalization.
:returns spec:
The model spectrum for these parameters, at the wavelengths
specified by ``obs['wavelength']``, including multiplication by the
calibration vector. Units of maggies
:returns phot:
The model photometry for these parameters, for the filters
specified in ``obs['filters']``. Units of maggies.
:returns extras:
Any extra aspects of the model that are returned. Typically this
will be `mfrac` the ratio of the surviving stellar mass to the
stellar mass formed.
"""
# generate and cache model spectrum and info
self.set_parameters(theta)
self._wave, self._spec, self._mfrac = sps.get_galaxy_spectrum(**self.params)
self._zred = self.params.get('zred', 0)
self._eline_wave, self._eline_lum = sps.get_galaxy_elines()
# Flux normalize
self._norm_spec = self._spec * self.flux_norm()
# generate spectrum and photometry for likelihood
# predict_spec should be called before predict_phot
spec = self.predict_spec(obs, sigma_spec)
phot = self.predict_phot(obs['filters'])
return spec, phot, self._mfrac
def predict_spec(self, obs, sigma_spec, **extras):
"""Generate a prediction for the observed spectrum. This method assumes
that the parameters have been set and that the following attributes are
present and correct:
+ ``_wave`` - The SPS restframe wavelength array
+ ``_zred`` - Redshift
+ ``_norm_spec`` - Observed frame spectral fluxes, in units of maggies
+ ``_eline_wave`` and ``_eline_lum`` - emission line parameters from the SPS model
It generates the following attributes
+ ``_outwave``
+ ``_speccal``
+ ``_elinespec``
And if emission line marginalization is being performed, numerous
quantities related to the emission lines are also cached
(see ``get_el()`` for details.)
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:meth:`utils.obsutils.rectify_obs`
:param sigma_spec: (optional)
The covariance matrix for the spectral noise. It is only used for
emission line marginalization.
:returns spec:
The prediction for the observed frame spectral flux these
parameters, at the wavelengths specified by ``obs['wavelength']``,
including multiplication by the calibration vector.
ndarray of shape ``(nwave,)`` in units of maggies.
"""
# redshift wavelength
obs_wave = self.observed_wave(self._wave, do_wavecal=False)
self._outwave = obs.get('wavelength', obs_wave)
# cache eline parameters
self.cache_eline_parameters(obs)
# smooth and put on output wavelength grid
smooth_spec = self.smoothspec(obs_wave, self._norm_spec)
# calibration
self._speccal = self.spec_calibration(obs=obs, spec=smooth_spec, **extras)
calibrated_spec = smooth_spec * self._speccal
# generate (after fitting) the emission line spectrum
emask = self._eline_wavelength_mask
# If we're marginalizing over emission lines, and at least one pixel
# has an emission line in it
if self.params.get('marginalize_elines', False) & (emask.any()):
self._elinespec = self.get_el(obs, calibrated_spec, sigma_spec)
calibrated_spec[emask] += self._elinespec.sum(axis=1)
# Otherwise, if FSPS is not adding emission lines to the spectrum, we
# add emission lines to valid pixels here.
elif (self.params.get("nebemlineinspec", True) is False) & (emask.any()):
self._elinespec = self.get_eline_spec(wave=self._wave[emask])
if emask.any():
calibrated_spec[emask] += self._elinespec.sum(axis=1)
return calibrated_spec
def predict_phot(self, filters):
"""Generate a prediction for the observed photometry. This method assumes
that the parameters have been set and that the following attributes are
present and correct:
+ ``_wave`` - The SPS restframe wavelength array
+ ``_zred`` - Redshift
+ ``_norm_spec`` - Observed frame spectral fluxes, in units of maggies.
+ ``_eline_wave`` and ``_eline_lum`` - emission line parameters from the SPS model
:param filters:
List of :py:class:`sedpy.observate.Filter` objects.
If there is no photometry, ``None`` should be supplied
:returns phot:
Observed frame photometry of the model SED through the given filters.
ndarray of shape ``(len(filters),)``, in units of maggies.
If ``filters`` is None, this returns 0.0
"""
if filters is None:
return 0.0
# generate photometry w/o emission lines
obs_wave = self.observed_wave(self._wave, do_wavecal=False)
flambda = self._norm_spec * lightspeed / obs_wave**2 * (3631*jansky_cgs)
mags = getSED(obs_wave, flambda, filters)
phot = np.atleast_1d(10**(-0.4 * mags))
# generate emission-line photometry
if self.params.get('nebemlineinspec', False) is False:
phot += self.nebline_photometry(filters)
return phot
def nebline_photometry(self, filters):
"""Compute the emission line contribution to photometry. This requires
several cached attributes:
+ ``_ewave_obs``
+ ``_eline_lum``
:param filters:
List of :py:class:`sedpy.observate.Filter` objects
:returns nebflux:
The flux of the emission line through the filters, in units of
maggies. ndarray of shape ``(len(filters),)``
"""
elams = self._ewave_obs
# We have to remove the extra (1+z) since this is flux, not a flux density
# Also we convert to cgs
elums = self._eline_lum * self.flux_norm() / (1 + self._zred) * (3631*jansky_cgs)
# loop over filters
flux = np.zeros(len(filters))
for i, filt in enumerate(filters):
# calculate transmission at line wavelengths
trans = np.interp(elams, filt.wavelength, filt.transmission,
left=0., right=0.)
# include all lines where transmission is non-zero
idx = (trans > 0)
if True in idx:
flux[i] = (trans[idx]*elams[idx]*elums[idx]).sum() / filt.ab_zero_counts
return flux
def flux_norm(self):
"""Compute the scaling required to go from Lsun/Hz/Msun to maggies.
Note this includes the (1+z) factor required for flux densities.
:returns norm: (float)
The normalization factor, scalar float.
"""
# distance factor
if (self._zred == 0) | ('lumdist' in self.params):
lumdist = self.params.get('lumdist', 1e-5)
else:
lumdist = cosmo.luminosity_distance(self._zred).to('Mpc').value
dfactor = (lumdist * 1e5)**2
# Mass normalization
mass = np.sum(self.params.get('mass', 1.0))
# units
unit_conversion = to_cgs / (3631*jansky_cgs) * (1 + self._zred)
return mass * unit_conversion / dfactor
def cache_eline_parameters(self, obs, nsigma=5):
""" This computes and caches a number of quantities that are relevant
for predicting the emission lines, and computing the MAP values thereof,
including
+ ``_ewave_obs`` - Observed frame wavelengths (AA) of all emission lines.
+ ``_eline_sigma_kms`` - Dispersion (in km/s) of all the emission lines
+ ``_elines_to_fit`` - If fitting and marginalizing over emission lines,
this stores indices of the lines to actually fit, as a boolean
array. Only lines that are within ``nsigma`` of an observed
wavelength points are included.
+ ``_eline_wavelength_mask`` - A mask of the `_outwave` vector that
indicates which pixels to use in the emission line fitting.
Only pixels within ``nsigma`` of an emission line are used.
Can be subclassed to add more sophistication
redshift - first looks for ``eline_delta_zred``, and defaults to ``zred``
sigma - first looks for ``eline_sigma``, defaults to 100 km/s
:param nsigma: (float, optional, default: 5.)
Number of sigma from a line center to use for defining which lines
to fit and useful spectral elements for the fitting. float.
"""
# observed wavelengths
eline_z = self.params.get("eline_delta_zred", 0.0)
self._ewave_obs = (1 + eline_z + self._zred) * self._eline_wave
# observed linewidths
nline = self._ewave_obs.shape[0]
self._eline_sigma_kms = np.atleast_1d(self.params.get('eline_sigma', 100.0))
self._eline_sigma_kms = (self._eline_sigma_kms[None] * np.ones(nline)).squeeze()
#self._eline_sigma_lambda = eline_sigma_kms * self._ewave_obs / ckms
# exit gracefully if not fitting lines
if (obs.get('spectrum', None) is None):
self._elines_to_fit = None
self._eline_wavelength_mask = np.array([], dtype=bool)
return
# --- lines to fit ---
# lines specified by user, but remove any lines which do not
# have an observed pixel within 5sigma of their center
eline_names = self.params.get('lines_to_fit', [])
# FIXME: this should be moved to instantiation and only done once
SPS_HOME = os.getenv('SPS_HOME')
emline_info = np.genfromtxt(os.path.join(SPS_HOME, 'data', 'emlines_info.dat'),
dtype=[('wave', 'f8'), ('name', 'S20')],
delimiter=',')
# restrict to specific emission lines?
if (len(eline_names) == 0):
elines_index = np.ones(emline_info.shape, dtype=bool)
else:
elines_index = np.array([True if name in eline_names else False
for name in emline_info['name']], dtype=bool)
eline_sigma_lambda = self._ewave_obs / ckms * self._eline_sigma_kms
new_mask = np.abs(self._outwave-self._ewave_obs[:, None]) < nsigma*eline_sigma_lambda[:, None]
self._elines_to_fit = elines_index & new_mask.any(axis=1)
# --- wavelengths corresponding to those lines ---
# within N sigma of the central wavelength
self._eline_wavelength_mask = new_mask[self._elines_to_fit, :].any(axis=0)
def get_el(self, obs, calibrated_spec, sigma_spec=None):
"""Compute the maximum likelihood and, optionally, MAP emission line
amplitudes for lines that fall within the observed spectral range. Also
compute and cache the analytic penalty to log-likelihood from
marginalizing over the emission line amplitudes. This is cached as
``_ln_eline_penalty``. The emission line amplitudes (in maggies) at
`_eline_lums` are updated to the ML values for the fitted lines.
:param obs:
A dictionary containing the ``'spectrum'`` and ``'unc'`` keys that
are observed fluxes and uncertainties, both ndarrays of shape
``(n_wave,)``
:param calibrated_spec:
The predicted observer-frame spectrum in the same units as the
observed spectrum, ndarray of shape ``(n_wave,)``
:param sigma_spec:
Spectral covariance matrix, if using a non-trivial noise model.
:returns el:
The maximum likelihood emission line flux densities.
ndarray of shape ``(n_wave_neb, n_fitted_lines)`` where
``n_wave_neb`` is the number of wavelength elements within
``nsigma`` of a line, and ``n_fitted_lines`` is the number of lines
that fall within ``nsigma`` of a wavelength pixel. Units are same
as ``calibrated_spec``
"""
# ensure we have no emission lines in spectrum
# and we definitely want them.
assert self.params['nebemlineinspec'] is False
assert self.params['add_neb_emission'] is True
# generate Gaussians on appropriate wavelength gride
idx = self._elines_to_fit
emask = self._eline_wavelength_mask
nebwave = self._outwave[emask]
eline_gaussians = self.get_eline_gaussians(lineidx=idx, wave=nebwave)
# generate residuals
delta = obs['spectrum'][emask] - calibrated_spec[emask]
# generate line amplitudes in observed flux units
units_factor = self.flux_norm() / (1 + self._zred)
calib_factor = np.interp(self._ewave_obs[idx], nebwave, self._speccal[emask])
linecal = units_factor * calib_factor
alpha_breve = self._eline_lum[idx] * linecal
# generate inverse of sigma_spec
if sigma_spec is None:
sigma_spec = obs["unc"]**2
sigma_spec = sigma_spec[emask]
if sigma_spec.ndim == 2:
sigma_inv = np.linalg.pinv(sigma_spec)
else:
sigma_inv = np.diag(1. / sigma_spec)
# calculate ML emission line amplitudes and covariance matrix
sigma_alpha_hat = np.linalg.pinv(np.dot(eline_gaussians.T, np.dot(sigma_inv, eline_gaussians)))
alpha_hat = np.dot(sigma_alpha_hat, np.dot(eline_gaussians.T, np.dot(sigma_inv, delta)))
# generate likelihood penalty term (and MAP amplitudes)
# FIXME: Cache line amplitude covariance matrices?
if self.params.get('use_eline_prior', False):
# Incorporate gaussian priors on the amplitudes
sigma_alpha_breve = np.diag((self.params['eline_prior_width'] * np.abs(alpha_breve)))**2
M = np.linalg.pinv(sigma_alpha_hat + sigma_alpha_breve)
alpha_bar = (np.dot(sigma_alpha_breve, np.dot(M, alpha_hat)) +
np.dot(sigma_alpha_hat, np.dot(M, alpha_breve)))
sigma_alpha_bar = np.dot(sigma_alpha_hat, np.dot(M, sigma_alpha_breve))
K = ln_mvn(alpha_hat, mean=alpha_breve, cov=sigma_alpha_breve+sigma_alpha_hat) - \
ln_mvn(alpha_hat, mean=alpha_hat, cov=sigma_alpha_hat)
else:
# simply use the ML values and associated marginaliztion penalty
alpha_bar = alpha_hat
K = ln_mvn(alpha_hat, mean=alpha_hat, cov=sigma_alpha_hat)
# Cache the ln-penalty
self._ln_eline_penalty = K
# Store fitted emission line luminosities in physical units
self._eline_lum[idx] = alpha_bar / linecal
# return the maximum-likelihood line spectrum in observed units
return alpha_hat * eline_gaussians
def get_eline_spec(self, wave=None):
"""Compute a complete model emission line spectrum. This should only
be run after calling predict(), as it accesses cached information.
Relatively slow, useful for display purposes
:param wave: (optional, default: ``None``)
The wavelength ndarray on which to compute the emission line spectrum.
If not supplied, the ``_outwave`` vector is used.
:returns eline_spec:
An (n_line, n_wave) ndarray
"""
gaussians = self.get_eline_gaussians(wave=wave)
elums = self._eline_lum * self.flux_norm() / (1 + self._zred)
return elums * gaussians
def get_eline_gaussians(self, lineidx=slice(None), wave=None):
"""Generate a set of unit normals with centers and widths given by the
previously cached emission line observed-frame wavelengths and emission
line widths.
:param lineidx: (optional)
A boolean array or integer array used to subscript the cached
lines. Gaussian vectors will only be constructed for the lines
thus subscripted.
:param wave: (optional)
The wavelength array (in Angstroms) used to construct the gaussian
vectors. If not given, the cached `_outwave` array will be used.
:returns gaussians:
The unit gaussians for each line, in units Lsun/Hz.
ndarray of shape (n_wave, n_line)
"""
if wave is None:
warr = self._outwave
else:
warr = wave
# generate gaussians
mu = np.atleast_2d(self._ewave_obs[lineidx])
sigma = np.atleast_2d(self._eline_sigma_kms[lineidx])
dv = ckms * (warr[:, None]/mu - 1)
dv_dnu = ckms * warr[:, None]**2 / (lightspeed * mu)
eline_gaussians = 1. / (sigma * np.sqrt(np.pi * 2)) * np.exp(-dv**2 / (2 * sigma**2))
eline_gaussians *= dv_dnu
# outside of the wavelengths defined by the spectrum? (why this dependence?)
# FIXME what is this?
eline_gaussians /= -np.trapz(eline_gaussians, 3e18/warr[:, None], axis=0)
return eline_gaussians
def smoothspec(self, wave, spec):
"""Smooth the spectrum. See :py:func:`prospect.utils.smoothing.smoothspec`
for details.
"""
sigma = self.params.get("sigma_smooth", 100)
outspec = smoothspec(wave, spec, sigma, outwave=self._outwave, **self.params)
return outspec
def observed_wave(self, wave, do_wavecal=False):
"""Convert the restframe wavelngth grid to the observed frame wavelength
grid, optionally including wavelength calibration adjustments. Requires
that the ``_zred`` attribute is already set.
:param wave:
The wavelength array
"""
# FIXME: missing wavelength calibration code
if do_wavecal:
raise NotImplementedError
a = 1 + self._zred
return wave * a
def wave_to_x(self, wavelength=None, mask=slice(None), **extras):
"""Map unmasked wavelengths to the interval -1, 1
masked wavelengths may have x>1, x<-1
"""
x = wavelength - (wavelength[mask]).min()
x = 2.0 * (x / (x[mask]).max()) - 1.0
return x
def spec_calibration(self, **kwargs):
return np.ones_like(self._outwave)
def mean_model(self, theta, obs, sps=None, sigma=None, **extras):
"""Legacy wrapper around predict()
"""
return self.predict(theta, obs, sps=sps, sigma_spec=sigma, **extras)
class PolySpecModel(SpecModel):
"""This is a subclass of *SpecModel* that generates the multiplicative
calibration vector at each model `predict` call as the maximum likelihood
chebyshev polynomial describing the ratio between the observed and the model
spectrum.
"""
def spec_calibration(self, theta=None, obs=None, spec=None, **kwargs):
"""Implements a Chebyshev polynomial calibration model. This uses
least-squares to find the maximum-likelihood Chebyshev polynomial of a
certain order describing the ratio of the observed spectrum to the model
spectrum, conditional on all other parameters, using least squares. If
emission lines are being marginalized out, they are excluded from the
least-squares fit.
:returns cal:
A polynomial given by :math:`\Sum_{m=0}^M a_{m} * T_m(x)`.
"""
if theta is not None:
self.set_parameters(theta)
# norm = self.params.get('spec_norm', 1.0)
polyopt = ((self.params.get('polyorder', 0) > 0) &
(obs.get('spectrum', None) is not None))
if polyopt:
order = self.params['polyorder']
# generate mask
# remove region around emission lines if doing analytical marginalization
mask = obs.get('mask', np.ones_like(obs['wavelength'], dtype=bool)).copy()
if self.params.get('marginalize_elines', False):
mask[self._eline_wavelength_mask] = 0
# map unmasked wavelengths to the interval -1, 1
# masked wavelengths may have x>1, x<-1
x = self.wave_to_x(obs["wavelength"], mask)
y = (obs['spectrum'] / spec)[mask] - 1.0
yerr = (obs['unc'] / spec)[mask]
yvar = yerr**2
A = chebvander(x[mask], order)
ATA = np.dot(A.T, A / yvar[:, None])
reg = self.params.get('poly_regularization', 0.)
if np.any(reg > 0):
ATA += reg**2 * np.eye(order)
ATAinv = np.linalg.inv(ATA)
c = np.dot(ATAinv, np.dot(A.T, y / yvar))
Afull = chebvander(x, order)
poly = np.dot(Afull, c)
self._poly_coeffs = c
else:
poly = np.zeros_like(self._outwave)
return (1.0 + poly)
class PolySedModel(SedModel):
"""This is a subclass of SedModel that replaces the calibration vector with
the maximum likelihood chebyshev polynomial describing the difference
between the observed and the model spectrum.
"""
def spec_calibration(self, theta=None, obs=None, **kwargs):
"""Implements a Chebyshev polynomial calibration model. This uses
least-squares to find the maximum-likelihood Chebyshev polynomial of a
certain order describing the ratio of the observed spectrum to the
model spectrum, conditional on all other parameters, using least
squares. The first coefficient is always set to 1, as the overall
normalization is controlled by ``spec_norm``.
:returns cal:
A polynomial given by 'spec_norm' * (1 + \Sum_{m=1}^M a_{m} * T_m(x)).
"""
if theta is not None:
self.set_parameters(theta)
norm = self.params.get('spec_norm', 1.0)
polyopt = ((self.params.get('polyorder', 0) > 0) &
(obs.get('spectrum', None) is not None))
if polyopt:
order = self.params['polyorder']
mask = obs.get('mask', slice(None))
# map unmasked wavelengths to the interval -1, 1
# masked wavelengths may have x>1, x<-1
x = self.wave_to_x(obs["wavelength"], mask)
y = (obs['spectrum'] / self._spec)[mask] / norm - 1.0
yerr = (obs['unc'] / self._spec)[mask] / norm
yvar = yerr**2
A = chebvander(x[mask], order)[:, 1:]
ATA = np.dot(A.T, A / yvar[:, None])
reg = self.params.get('poly_regularization', 0.)
if np.any(reg > 0):
ATA += reg**2 * np.eye(order)
ATAinv = np.linalg.inv(ATA)
c = np.dot(ATAinv, np.dot(A.T, y / yvar))
Afull = chebvander(x, order)[:, 1:]
poly = np.dot(Afull, c)
self._poly_coeffs = c
else:
poly = 0.0
return (1.0 + poly) * norm
class PolyFitModel(SedModel):
"""This is a subclass of *SedModel* that generates the multiplicative
calibration vector as a Chebyshev polynomial described by the
``'poly_coeffs'`` parameter of the model, which may be free (fittable)
"""
def spec_calibration(self, theta=None, obs=None, **kwargs):
"""Implements a Chebyshev polynomial calibration model. This only
occurs if ``"poly_coeffs"`` is present in the :py:attr:`params`
dictionary, otherwise the value of ``params["spec_norm"]`` is returned.
:param theta: (optional)
If given, set :py:attr:`params` using this vector before
calculating the calibration polynomial. ndarray of shape
``(ndim,)``
:param obs:
A dictionary of observational data, must contain the key
``"wavelength"``
:returns cal:
If ``params["cal_type"]`` is ``"poly"``, a polynomial given by
``'spec_norm'`` :math:`\times (1 + \Sum_{m=1}^M```'poly_coeffs'[m-1]``:math:` \times T_n(x))`.
Otherwise, the exponential of a Chebyshev polynomial.
"""
if theta is not None:
self.set_parameters(theta)
if ('poly_coeffs' in self.params):
mask = obs.get('mask', slice(None))
# map unmasked wavelengths to the interval -1, 1
# masked wavelengths may have x>1, x<-1
x = self.wave_to_x(obs["wavelength"], mask)
# get coefficients. Here we are setting the first term to 0 so we
# can deal with it separately for the exponential and regular
# multiplicative cases
c = np.insert(self.params['poly_coeffs'], 0, 0)
poly = chebval(x, c)
# switch to have spec_norm be multiplicative or additive depending
# on whether the calibration model is multiplicative in exp^poly or
# just poly
if self.params.get('cal_type', 'exp_poly') == 'poly':
return (1.0 + poly) * self.params.get('spec_norm', 1.0)
else:
return np.exp(self.params.get('spec_norm', 0) + poly)
else:
return 1.0 * self.params.get('spec_norm', 1.0)
def ln_mvn(x, mean=None, cov=None):
"""Calculates the natural logarithm of the multivariate normal PDF
evaluated at `x`
:param x:
locations where samples are desired.
:param mean:
Center(s) of the gaussians.
:param cov:
Covariances of the gaussians.
"""
ndim = mean.shape[-1]
dev = x - mean
log_2pi = np.log(2 * np.pi)
sign, log_det = np.linalg.slogdet(cov)
exp = np.dot(dev.T, np.dot(np.linalg.pinv(cov, rcond=1e-12), dev))
return -0.5 * (ndim * log_2pi + log_det + exp)
def gauss(x, mu, A, sigma):
"""Sample multiple gaussians at positions x.
:param x:
locations where samples are desired.
:param mu:
Center(s) of the gaussians.
:param A:
Amplitude(s) of the gaussians, defined in terms of total area.
:param sigma:
Dispersion(s) of the gaussians, un units of x.
:returns val:
The values of the sum of gaussians at x.
"""
mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)
val = A / (sigma * np.sqrt(np.pi * 2)) * np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2))
return val.sum(axis=-1)
|
|
import os
import sys
import inspect
import traceback
saved_path = sys.path[:]
sys.path.append(os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda:0))))
from internal.memcached_connection import MemcachedBinaryConnection
from internal.memcached_connection import STATUS, COMMANDS
mc = MemcachedBinaryConnection("127.0.0.1", iproto.py_con.port)
def iequal(left, right, level = 1):
if (left != right):
tb = traceback.extract_stack()[-(level + 1)]
print("Error on line %s:%d: %s not equal %s" % (tb[0], tb[1],
repr(left), repr(right)))
return False
return True
def inequal(left, right, level = 0):
if (left == right):
tb = traceback.extract_stack()[-(level + 1)]
print("Error on line %s:%d: %s equal %s" % (tb[0], tb[1],
repr(left), repr(right)))
return False
return True
def issert(stmt, level = 0):
if bool(stmt):
tb = traceback.extract_stack()[-(level + 1)]
print("Error on line %s:%d: is False" % (tb[0], tb[1],
repr(left), repr(right)))
return False
return True
def __check(res, flags, val, level = 0):
return iequal(res.get('flags', -1), flags, level + 1) and \
iequal(res.get('val', val), val, level + 1)
def check(key, flags, val, level = 0):
res = mc.get(key)
__check(res[0], flags, val, level + 1)
def set(key, expire, flags, value):
res = mc.set(key, value, expire, flags)
return check(key, flags, value, 1)
def empty(key, level = 0):
res = mc.get(key)
return iequal(res[0]['status'], STATUS['KEY_ENOENT'], 2 + level)
def delete(key, when):
res = mc.delete(key)
empty(key, 1)
print("""#---------------------------# flush and noop tests #--------------------------#""")
mc.flush()
iequal(mc.noop()[0]['op'], COMMANDS['noop'][0])
set("x", 5, 19, "somevalue")
delete("x", 0)
set("x", 5, 19, "somevaluex")
set("y", 5, 17, "somevaluey")
mc.flush()
empty("x")
empty("y")
print("""#-----------------------------------------------------------------------------#
################################ Diagnostics ADD ##############################
#-----------------------------------------------------------------------------#""")
empty("i")
mc.add("i", "ex", 10, 5)
check("i", 5, "ex")
res = mc.add("i", "ex2", 10, 5)
iequal(res[0]['status'], STATUS['KEY_EEXISTS'])
check("i", 5, "ex")
print("""#-----------------------------------------------------------------------------#
############################# Diagnostics REPLACE #############################
#-----------------------------------------------------------------------------#""")
# Diagnostics for replace
empty("j")
res = mc.replace("j", "ex", 5, 19)
iequal(res[0]['status'], STATUS['KEY_ENOENT'])
empty("j")
mc.add("j", "ex2", 5, 14)
check("j", 14, "ex2")
mc.replace("j", "ex3", 5, 24)
check("j", 24, "ex3")
print("""#-----------------------------------------------------------------------------#
############################# Diagnostics multiGET ############################
#-----------------------------------------------------------------------------#""")
# Diagnostics "MultiGet"
mc.add("xx", "ex", 5, 1)
mc.add("wye", "why", 5, 2)
mc.getq("xx", nosend=True)
mc.getq("wye", nosend=True)
mc.getq("zed", nosend=True)
res = mc.noop()
__check(res[0], 1, "ex")
__check(res[1], 2, "why")
iequal(len(res), 3)
print("""#-----------------------------------------------------------------------------#
############################ Diagnostics INCR/DECR ############################
#-----------------------------------------------------------------------------#""")
# Test Increment
res = mc.flush()
res = mc.incr("x", 0, expire=0)
iequal(res[0]['val'], 0)
res = mc.incr("x", 1, expire=0)
iequal(res[0]['val'], 1)
res = mc.incr("x", 211, expire=0)
iequal(res[0]['val'], 212)
res = mc.incr("x", 2**33, expire=0)
iequal(res[0]['val'], 8589934804)
print("""#------------------------------# increment error #----------------------------#""")
mc.set("issue48", "text", 0, 0)
res = mc.incr("issue48")
iequal(res[0]['status'], STATUS['DELTA_BADVAL'])
check("issue48", 0, "text")
res = mc.decr("issue48")
iequal(res[0]['status'], STATUS['DELTA_BADVAL'])
check("issue48", 0, "text")
print("""#------------------------------# test decrement #-----------------------------#""")
mc.flush()
res = mc.incr("x", 0, 0, 5)
iequal(res[0]['val'], 5)
res = mc.decr("x")
iequal(res[0]['val'], 4)
res = mc.decr("x", 211)
iequal(res[0]['val'], 0)
print("""#---------------------------------# bug 220 #---------------------------------#""")
res = mc.set("bug220", "100", 0, 0)
ires = mc.incr("bug220", 999)
inequal(res[0]['cas'], ires[0]['cas']) and iequal(ires[0]['val'], 1099)
ires2 = mc.get("bug220")
iequal(ires2[0]['cas'], ires[0]['cas'])
ires = mc.incr("bug220", 999)
inequal(res[0]['cas'], ires[0]['cas']) and iequal(ires[0]['val'], 2098)
ires2 = mc.get("bug220")
iequal(ires2[0]['cas'], ires[0]['cas'])
print("""#----------------------------------# bug 21 #---------------------------------#""")
mc.add("bug21", "9223372036854775807", 0, 0)
res = mc.incr("bug21")
iequal(res[0]['val'], 9223372036854775808)
res = mc.incr("bug21")
iequal(res[0]['val'], 9223372036854775809)
res = mc.decr("bug21")
iequal(res[0]['val'], 9223372036854775808)
print("""#-----------------------------------------------------------------------------#
################################ Diagnostics CAS ##############################
#-----------------------------------------------------------------------------#""")
mc.flush()
res = mc.set("x", "bad value", 5, 19, cas=0x7FFFFFF)
iequal(res[0]['status'], STATUS['KEY_ENOENT'])
res = mc.add("x", "original value", 19, 5)
ires2 = mc.get("x")
iequal(res[0]['cas'], ires2[0]['cas']) and iequal(ires2[0]['val'], 'original value')
res = mc.set("x", "broken value", 5, 19, cas=ires2[0]["cas"] + 1)
iequal(res[0]['status'], STATUS['KEY_EEXISTS'])
res = mc.set("x", "new value", 5, 19, cas=ires2[0]["cas"])
ires = mc.get("x")
iequal(res[0]['cas'], ires[0]['cas']) and iequal(ires[0]['val'], 'new value')
res = mc.set("x", "replay value", 5, 19, cas=ires2[0]["cas"])
iequal(res[0]['status'], STATUS['KEY_EEXISTS'])
def check_empty_response(con):
res = con.noop()
return iequal(len(res), 1, 1) and iequal(res[0]['op'], COMMANDS['noop'][0])
print("""#--------------------------------# silent get #-------------------------------#""")
key, val, flags = "silentset", "siltensetval", 82
empty(key)
mc.setq(key, val, flags=flags, expire=0, nosend=True)
check_empty_response(mc)
check(key, flags, val)
print("""#--------------------------------# silent put #-------------------------------#""")
key, val, flags = "silentadd", "siltenaddval", 82
empty(key)
mc.addq(key, val, flags=flags, expire=0, nosend=True)
check_empty_response(mc)
check(key, flags, val)
print("""#------------------------------# silent replace #-----------------------------#""")
key, val, flags = "silentreplace", "somevalue", 829
empty(key)
mc.add(key, "xval", 0, 831)
check(key, 831, "xval")
mc.replaceq(key, val, flags=flags, nosend=True)
check_empty_response(mc)
check(key, flags, val)
print("""#------------------------------# silent delete #------------------------------#""")
key, val, flags = "silentdelete", "someval", 19
empty(key)
mc.set(key, val, flags=flags, expire=0)
check(key, flags, val)
mc.deleteq(key, nosend=True)
empty(key)
print("""#-----------------------------# silent increment #----------------------------#""")
key, opaque = "silentincr", 98428747
empty(key)
mc.incrq(key, 0, 0, 0, nosend=True)
res = mc.incr (key, 0)
iequal(res[0]['val'], 0)
mc.incrq(key, 8, 0, 0, nosend=True)
res = mc.incr (key, 0)
iequal(res[0]['val'], 8)
# Silent decrement
print("""#-----------------------------# silent decrement #----------------------------#""")
key, opaque = "silentdecr", 98428747
empty(key)
mc.decrq(key, 0, 0, 185, nosend=True)
res = mc.decr (key, 0)
iequal(res[0]['val'], 185)
mc.decrq(key, 8, 0, 0, nosend=True)
res = mc.decr (key, 0)
iequal(res[0]['val'], 177)
print("""#-------------------------------# silent flush #------------------------------#""")
stat1 = mc.stat()
set("x", 5, 19, "somevaluex")
set("y", 5, 19, "somevaluey")
mc.flushq(nosend=True)
empty("x")
empty("y")
stat2 = mc.stat()
iequal(int(stat1['cmd_flush']) + 1, int(stat2['cmd_flush']))
print("""#----------------------------# diagnostics append #---------------------------#""")
key, value, suffix = "appendkey", "some value", " more"
set(key, 8, 19, value)
mc.append(key, suffix)
check(key, 19, value + suffix)
print("""#---------------------------# diagnostics prepend #---------------------------#""")
key, value, prefix = "prependkey", "some value", "more "
set(key, 8, 19, value)
mc.prepend(key, prefix)
check(key, 19, prefix + value)
print("""#------------------------------# silent append #------------------------------#""")
key, value, suffix = "appendqkey", "some value", " more"
set(key, 8, 19, value)
mc.appendq(key, suffix, nosend=True)
check_empty_response(mc)
check(key, 19, value + suffix)
print("""#------------------------------# silent prepend #-----------------------------#""")
key, value, prefix = "prependqkey", "some value", "more "
set(key, 8, 19, value)
mc.prependq(key, prefix, nosend=True)
check_empty_response(mc)
check(key, 19, prefix + value)
sys.path = saved_path
|
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import six
import imath
import weakref
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
_parameterCategoriesAndDefaults = []
def __populateMetadata():
global _parameterCategoriesAndDefaults
plugMetadata = {
"tweaks" : [
"description",
"""
Add a camera tweak.
Arbitrary numbers of user defined tweaks may be
added as children of this plug via the user
interface, or via the CameraTweaks API in Python.
""",
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
"layout:customWidget:footer:widgetType", "GafferSceneUI.CameraTweaksUI._TweaksFooter",
"layout:customWidget:footer:index", -1,
],
"tweaks.*" : [
"tweakPlugValueWidget:allowRemove", True,
],
}
# Create a temporary camera object just to read the default parameter values off of it,
# and access the metadata
tempCam = GafferScene.Camera()
parameterCategories = [ ("Camera Parameters", i ) for i in ["projection","fieldOfView","apertureAspectRatio",
"aperture","focalLength","apertureOffset","fStop","focalLengthWorldScale","focusDistance",
"clippingPlanes" ] ] + [
("Render Overrides", i ) for i in [ "filmFit", "shutter", "resolution", "pixelAspectRatio",
"resolutionMultiplier", "overscan", "overscanLeft", "overscanRight", "overscanTop",
"overscanBottom", "cropWindow", "depthOfField" ] ]
for category, plugName in parameterCategories:
if category == "Render Overrides":
cameraPlug = tempCam["renderSettingOverrides"][plugName]["value"]
else:
cameraPlug = tempCam[plugName]
data = Gaffer.PlugAlgo.extractDataFromPlug( cameraPlug )
_parameterCategoriesAndDefaults.append( ( category, plugName, data ) )
plugMetadata["tweaks.tweak_%s*.name" % plugName] = [ "readOnly", True ]
valueMetadata = []
for metaName in Gaffer.Metadata.registeredValues( cameraPlug ):
metaValue = Gaffer.Metadata.value( cameraPlug, metaName )
if metaName != "layout:section" and not metaValue is None:
valueMetadata.append( metaName )
valueMetadata.append( metaValue )
# The Camera node only offers a choice between "perspective" and "orthographic", since
# they are the two that make sense in the UI. But if you're putting down a special
# tweak node, you might want to use a non-standard camera supported by your specific
# renderer backend ( eg. spherical_camera in Arnold )
if plugName == "projection":
valueMetadata.append( "presetsPlugValueWidget:allowCustom" )
valueMetadata.append( True )
plugMetadata["tweaks.tweak_%s*.value" % plugName] = valueMetadata
Gaffer.Metadata.registerNode(
GafferScene.CameraTweaks,
"description",
"""
Applies modifications, also known as "tweaks," to camera
parameters or render options in the scene. Supports any number
of tweaks, and custom camera parameters. Tweaks to camera
parameters apply to every camera specified by the filter.
Tweaks apply to every camera specified by the filter.
Can add new camera parameters or render options.
Any existing parameters/options can be replaced or removed.
Numeric parameters/options can also be added to, subtracted
from, or multiplied.
Tweaks are applied in order, so if there is more than one tweak
to the same parameter/option, the first tweak will be applied
first, then the second, etc.
""",
plugs = plugMetadata
)
__populateMetadata()
class _TweaksFooter( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
GafferUI.PlugValueWidget.__init__( self, row, plug )
with row :
GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
)
GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } )
def _updateFromPlug( self ) :
self.setEnabled( self._editable() )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
# Create a temporary camera object just to read the default parameter values off of it
tempCam = GafferScene.Camera()
for category, name, defaultData in _parameterCategoriesAndDefaults:
result.append(
"/" + category + "/" + name,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__addTweak ), name, defaultData )
}
)
# TODO - would be nice to share these default options with other users of TweakPlug
for item in [
Gaffer.BoolPlug,
Gaffer.FloatPlug,
Gaffer.IntPlug,
"NumericDivider",
Gaffer.StringPlug,
"StringDivider",
Gaffer.V2iPlug,
Gaffer.V3iPlug,
Gaffer.V2fPlug,
Gaffer.V3fPlug,
"VectorDivider",
Gaffer.Color3fPlug,
Gaffer.Color4fPlug
] :
if isinstance( item, six.string_types ) :
result.append( "/Custom Parameter/" + item, { "divider" : True } )
else :
result.append(
"/Custom Parameter/" + item.__name__.replace( "Plug", "" ),
{
"command" : functools.partial( Gaffer.WeakMethod( self.__addTweak ), "", item ),
}
)
return result
def __addTweak( self, name, plugTypeOrValue ) :
if isinstance( plugTypeOrValue, IECore.Data ) :
plug = GafferScene.TweakPlug( name, plugTypeOrValue )
else :
plug = GafferScene.TweakPlug( name, plugTypeOrValue() )
plug.setName( name or "tweak1" )
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().addChild( plug )
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of Role Makers."""
import os
import time
import numpy as np
import warnings
from multiprocessing import Process, Manager
import paddle
import paddle.fluid as fluid
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
__all__ = []
class Role:
WORKER = 1
SERVER = 2
HETER_WORKER = 3
ALL = 4
class Gloo(object):
"""
Gloo is a universal class for barrier and collective communication
"""
class RENDEZVOUS:
HDFS = 1
FILE = 2
HTTP = 3
def __init__(self):
self._worker_comm = None
self._server_comm = None
self._nodes_comm = None
self._comm_world = ["worker", "server", "all"]
self._err_init = "gloo is not initialized, will not communicator with other nodes"
self._err_type = "gloo initialized error, please check arguments"
self._err_world = "argument error, comm_world must in {}".format(
self._comm_world)
self._is_initialized = False
self._init_timeout_seconds = 3600
self._run_timeout_seconds = 9999999
self._rendezvous = None
self._role = None
self._iface = None
self._role_id = -1
self._worker_num = -1
self._server_num = -1
self._need_init_all = False
def init(self,
rendezvous,
role,
role_id,
worker_num,
server_num,
need_init_all=False,
kwargs=None):
self._rendezvous = rendezvous
self._role = role
self._role_id = role_id
self._worker_num = worker_num
self._server_num = server_num
self._need_init_all = need_init_all
self._iface = ""
self._prefix = kwargs.get("store.prefix", "")
http_server = None
if self._rendezvous == Gloo.RENDEZVOUS.HDFS:
dfs_name = kwargs.get("dfs.name", "")
dfs_ugi = kwargs.get("dfs.ugi", "")
dfs_path = kwargs.get("dfs.path", "")
if not dfs_name or not dfs_ugi or not dfs_path:
raise ValueError(self._err_type)
self._init_dfs(dfs_name, dfs_ugi, dfs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.FILE:
fs_path = kwargs.get("dfs.path", "")
if not fs_path:
raise ValueError(self._err_type)
self._init_fs(fs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.HTTP:
ip = kwargs.get("http.host", "")
port = kwargs.get("http.port", "")
start_http_server = kwargs.get("start_http_server", False)
http_server_d = kwargs.get("http_server_d")
if not ip or not port:
raise ValueError(self._err_type)
http_server = self._init_http(ip, port, self._prefix,
start_http_server, http_server_d)
else:
raise ValueError(self._err_type)
self._is_initialized = True
self._http_server = http_server
def _init_fs(self, fs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(fs_path, role), "", "")
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_dfs(self, dfs_name, dfs_ugi, dfs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(dfs_path, role), dfs_name, dfs_ugi)
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_http(self, ip, port, prefix, start_http_server, http_server_d):
def __start_kv_server(http_server_d, size_d):
print("start http_server: {}, {}".format(port, size_d))
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(port, size_d)
http_server.start()
wait_seconds = 5
while http_server_d.get("running",
False) or not http_server.should_stop():
time.sleep(wait_seconds)
http_server.stop()
def init_kv_server(http_server_d):
worker_key = prefix + '_' + 'worker'
size_d = {worker_key: self._worker_num, }
print("worker_key:{}, size: {}".format(worker_key, size_d))
http_server_d["running"] = True
# child process for http server
_http_server = Process(
target=__start_kv_server, args=(http_server_d, size_d))
_http_server.daemon = True
# set running status to True
# start child process
_http_server.start()
return _http_server
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_http_store(ip, port, 'worker')
ep = ":".join([ip, str(port)])
wait_server_ready([ep])
gloo.init()
return gloo
port = int(port)
if start_http_server:
print("to start http_server")
http_server = init_kv_server(http_server_d)
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
# TODO (sandyhouse): initialize gloo for server and all
# the closing of kv server may cause gloo init failure
# since it depend on the full mesh connection
# e.g. 0 connected with 1,2,3 while 2-3 not connected yet
# TODO(kuizhiqing)
if start_http_server:
http_server_d["running"] = False
http_server.join()
def _get_rank_nodes(self, role):
nodes = 0
rank = -1
if role == Role.WORKER:
nodes = self._worker_num
rank = self._role_id
elif role == Role.SERVER:
nodes = self._server_num
rank = self._role_id
elif role == Role.ALL:
nodes = self._worker_num + self._server_num
if self._role == Role.WORKER:
rank = self._role_id
else:
rank = self._worker_num + self._role_id
else:
ValueError(self._err_type)
return rank, nodes
def __get_default_iface(self):
"""
get default physical interface
"""
default1 = self.__get_default_iface_from_gateway()
default2 = self.__get_default_iface_from_interfaces()
return default2 if default1 == "lo" else default1
def __get_default_iface_from_gateway(self):
"""
get default physical interface
"""
res = os.popen("route -A inet").read().strip().split("\n")
gateway_idx = None
iface_idx = None
for item in res:
item = item.split()
if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None:
gateway = None
if len(item) > gateway_idx:
gateway = item[gateway_idx]
if gateway and gateway != '*' and gateway != "0.0.0.0" and len(
item) > iface_idx:
return item[iface_idx]
return "lo"
def __get_default_iface_from_interfaces(self):
"""
get default physical interface
"""
res = os.popen("ip -f inet addr | awk NR%3==1").read().strip().split(
"\n")
for item in res:
if "BROADCAST" in item:
return item.split(":")[1].strip()
return "lo"
def barrier(self, comm_world):
"""
dummy barrier, do nothing
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
self._worker_comm.barrier()
elif comm_world == "server":
self._server_comm.barrier()
else:
self._nodes_comm.barrier()
def all_reduce(self, input, mode="sum", comm_world="worker"):
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
input = np.array(input)
input_shape = input.shape
input_list = input.reshape(-1).tolist()
self.barrier(comm_world)
if comm_world == "worker":
ans = self._worker_comm.all_reduce(input_list, mode)
elif comm_world == "server":
ans = self._server_comm.all_reduce(input_list, mode)
else:
ans = self._nodes_comm.all_reduce(input_list, mode)
output = np.array(ans).reshape(input_shape)
return output
def all_gather(self, input, comm_world="worker"):
"""
dummy all gather, do nothing
Args:
obj(any): obj to do all gather
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
output = self._worker_comm.all_gather(input)
elif comm_world == "server":
output = self._server_comm.all_gather(input)
else:
output = self._nodes_comm.all_gather(input)
return output
class RoleMakerBase(object):
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
A paddle developer can implement RoleMakerBase to design a role maker
for worker or pserver assignment.
"""
def __init__(self):
self._worker_endpoints = []
self._server_endpoints = []
self._role_is_generated = False
self._role = None
self._current_id = -1
def _is_worker(self):
"""
return is_worker() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_server(self):
"""
return is_server() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_num(self):
"""
Get current total worker number.
Returns:
int: worker number
"""
raise NotImplementedError("Please implement this method in child class")
def _server_num(self):
"""
Get current total server number.
Returns:
int: server number
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_index(self):
"""
Get current worker id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _server_index(self):
"""
Get current server id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _role_id(self):
"""
Get current id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _node_num(self):
"""
Get the training node number
Returns:
int: node num
"""
raise NotImplementedError("Please implement this method in child class")
def _get_trainer_endpoints(self):
"""
return trainer endpoints
"""
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
return pserver endpoints
"""
return self._server_endpoints
def to_string(self):
return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format(
self._role, self._current_id, self._worker_endpoints,
self._server_endpoints)
def _all_gather(self, input, comm_world="worker"):
print("warning: RoleMakerBase does not have all gather worker.")
return None
def _all_reduce(self, input, mode="sum", comm_world="worker"):
"""
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
return None
def _barrier(self, comm_world):
"""
barrier between trainers if current role is TRAINER
"""
print("warning: RoleMakerBase does not have barrier worker.")
#def _is_heter_worker(self):
# """
# Return is_heter_worker() of current process
# """
# raise NotImplementedError("Please implement this method in child class")
#def _heter_worker_num(self):
# """
# Get current total heter-worker number.
#
# Returns:
# int: heter_worker number
# """
# raise NotImplementedError("Please implement this method in child class")
#def _get_heter_worker_endpoints(self):
# """
# Returns:
# string: all heter_trainers'endpoints
# """
# raise NotImplementedError("Please implement this method in child class")
#def _get_heter_worker_endpoint(self):
# """
# Returns:
# int: corresponding heter_trainer's endpoint
# """
# raise NotImplementedError("Please implement this method in child class")
class PaddleCloudRoleMaker(RoleMakerBase):
def __init__(self, is_collective=False, **kwargs):
super(PaddleCloudRoleMaker, self).__init__()
self._is_collective = is_collective
self._non_distributed = False
self._kwargs = kwargs
self._role_is_generated = False
# for heterps
self._stage_id = 1
self._stage_num = 1
self._next_heter_trainer_endpoints = []
self._previous_heter_trainer_endpoints = []
self._heter_trainer_endpoints = []
self._heter_trainer_device = "cpu"
self._heter_trainer_device_type = "cpu"
self._is_heter_parameter_server_mode = False
self._stage_trainers = []
self._server_endpoints = []
self._worker_endpoints = []
self._gloo = Gloo() # gloo instance
def _barrier(self, comm_world):
self._gloo.barrier(comm_world)
def _all_gather(self, input, comm_world="worker"):
return self._gloo.all_gather(input, comm_world)
def _all_reduce(self, input, mode="sum", comm_world="worker"):
return self._gloo.all_reduce(input, mode, comm_world)
def _heter_device(self):
"""
return the heter device that current heter worker is using
"""
if not self._role_is_generated:
self._generate_role()
return self._heter_trainer_device
def _heter_device_type(self):
"""
return the heter device type that current heter worker is using
"""
if not self._role_is_generated:
self._generate_role()
return self._heter_trainer_device_type
def _get_stage_id(self):
"""
return stage id of current heter worker
"""
if not self._role_is_generated:
self._generate_role()
return self._stage_id
def _get_stage_trainers(self):
"""
return trainer num of all stages
"""
if not self._role_is_generated:
self._generate_role()
return self._stage_trainers
def _get_num_stage(self):
"""
return stage num
"""
if not self._role_is_generated:
self._generate_role()
return self._stage_num
def _is_worker(self):
"""
whether current process is worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER
def _is_server(self):
"""
whether current process is server
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.SERVER
def _is_first_worker(self):
"""
whether current process is worker of rank 0
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER and self._current_id == 0
def _worker_index(self):
"""
get index of current worker
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _server_index(self):
"""
get index of current server
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _role_id(self):
"""
get index of current node
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _worker_num(self):
"""
retrun the current number of worker
"""
if not self._role_is_generated:
self._generate_role()
return self._trainers_num
def _server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self._generate_role()
return len(self._get_pserver_endpoints(
)) if self._get_pserver_endpoints() is not None else 0
def _node_num(self):
"""
return the training node number
"""
if not self._role_is_generated:
self._generate_role()
return self._nodes_num
def _get_node_num(self):
"""
return the training node number
"""
if not self._role_is_generated:
self._generate_role()
return self._nodes_num
def _get_local_rank(self):
if not self._role_is_generated:
self._generate_role()
return self._local_rank
def _get_local_device_ids(self):
if not self._role_is_generated:
self._generate_role()
return self._local_device_ids
def _get_world_device_ids(self):
if not self._role_is_generated:
self._generate_role()
return self._world_device_ids
def _get_trainer_endpoints(self):
"""
get endpoint of all trainers
"""
if not self._role_is_generated:
self._generate_role()
return self._worker_endpoints
def _get_trainer_endpoint(self):
if not self._role_is_generated:
self._generate_role()
assert self._role == Role.WORKER, "get_trainer_endpoint should be called by trainer"
return self._cur_endpoint
def _get_heter_worker_endpoints(self):
"""
Returns:
string: all heter_trainers'endpoints
"""
if not self._role_is_generated:
self._generate_role()
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints
def _get_heter_worker_endpoint(self):
"""
Returns:
int: corresponding heter_trainer's endpoint
"""
if not self._role_is_generated:
self._generate_role()
assert self._role == Role.HETER_WORKER, "_get_heter_worker_endpoint should be invoked by heter worker"
return self._cur_endpoint
def _get_pserver_endpoints(self):
"""
get endpoint of all pservers
"""
if not self._role_is_generated:
self._generate_role()
return self._server_endpoints
def _get_previous_trainers(self):
"""
invoked by heter worker
"""
if not self._role_is_generated:
self._generate_role()
assert self._role in (
Role.WORKER, Role.HETER_WORKER
), "_get_previous_trainers should be invoked by trainer or heter worker"
return self._previous_heter_trainer_endpoints
def _get_next_trainers(self):
"""
invoked by heter worker
"""
if not self._role_is_generated:
self._generate_role()
assert self._role in (
Role.WORKER, Role.HETER_WORKER
), "_get_next_trainers should be invoked by trainer or heter worker"
return self._next_heter_trainer_endpoints
def _is_non_distributed(self):
"""
Return True if indispensable environment for fleetrun is not found
(use python-run to launch fleet-code directly)
"""
if not self._role_is_generated:
self._generate_role()
return self._non_distributed
def _heter_worker_num(self):
"""
get heter worker nums
"""
if not self._role_is_generated:
self._generate_role()
return self._heter_trainers_num
def _is_heter_worker(self):
"""
whether current process is heter worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.HETER_WORKER
def _ps_env(self):
# Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set
# format: string(ip:port,ip:port), eg. 127.0.0.1:6001,127.0.0.1:6002
self._server_endpoints = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST", None)
if self._server_endpoints is None:
# back to non_distributed execution.
self._server_endpoints = ""
self._trainers_num = 1
self._role = Role.WORKER
self._current_id = 0
self._nodes_num = 1
self._heter_trainers_num = 0
self._heter_trainer_endpoints = None
self._non_distributed = True
return
self._server_endpoints = self._server_endpoints.split(",")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None)
if self._worker_endpoints != None:
self._worker_endpoints = self._worker_endpoints.split(",")
else:
self._worker_endpoints = []
trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None)
if trainers_num == None:
raise ValueError(
"Can not find PADDLE_TRAINERS_NUM, please check your environment."
)
trainers_num = int(trainers_num)
training_role = os.getenv("TRAINING_ROLE", None)
if training_role == None:
raise ValueError(
"Can not find TRAINING_ROLE, please check your environment.")
if training_role not in ["TRAINER", "PSERVER", "HETER_TRAINER"]:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER, but get {}, please check your environment.".
format(training_role))
# For Heter Parameter Server env setting
next_heter_trainer_eplist = os.getenv(
"PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST", "")
previous_heter_trainer_eplist = os.getenv(
"PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST", "")
all_heter_trainer_eplist = os.getenv(
"PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST", "")
if all_heter_trainer_eplist != "":
self._heter_trainer_endpoints = all_heter_trainer_eplist.split(",")
self._is_heter_parameter_server_mode = True
self._heter_trainers_num = len(self._heter_trainer_endpoints)
if previous_heter_trainer_eplist == "":
assert training_role in (
"TRAINER", "PSERVER"
), "training_role should be trainer or pserver"
else:
try:
self._previous_heter_trainer_endpoints = previous_heter_trainer_eplist.split(
",")
except:
raise ValueError(
"Can not Find PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ."
)
if next_heter_trainer_eplist == "":
assert training_role in (
"HETER_TRAINER", "PSERVER"
), "training_role should be heter trainer or pserver"
else:
try:
self._next_heter_trainer_endpoints = next_heter_trainer_eplist.split(
",")
except:
raise ValueError(
"Can not Find PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ."
)
#self._is_heter_parameter_server_mode = True
#heter_trainers_num = len(all_heter_trainer_eplist.split(","))
#self._heter_trainer_endpoints = all_heter_trainer_eplist.split(",")
else:
self._is_heter_parameter_server_mode = False
self._heter_trainers_num = 0
#if previous_heter_trainer_eplist == "":
# self._is_heter_parameter_server_mode = False
# heter_trainers_num = 0
#else: ## for the last heter worker
# try:
# previous_heter_trainer_eplist = os.environ[
# "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST"].split(",")
# self._previous_heter_trainer_endpoints = previous_heter_trainer_eplist
# except:
# raise ValueError(
# "Can not Find PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ."
# )
# self._is_heter_parameter_server_mode = True
# heter_trainers_num = len(all_heter_trainer_eplist.split(","))
# self._heter_trainer_endpoints = all_heter_trainer_eplist.split(",")
if training_role == "TRAINER":
role = Role.WORKER
current_id = os.getenv("PADDLE_TRAINER_ID", None)
if current_id == None:
raise ValueError(
"Can not find PADDLE_TRAINER_ID, please check your environment."
)
current_id = int(current_id)
if self._is_heter_parameter_server_mode:
self._stage_id = os.getenv("STAGE_ID", None)
if self._stage_id == None:
raise ValueError(
"Can not find STAGE_ID, please check your environment.")
self._stage_id = int(self._stage_id)
self._stage_num = os.getenv("STAGE_NUM", None)
if self._stage_num == None:
raise ValueError(
"Can not find STAGE_NUM, please check your environment.")
self._stage_num = int(self._stage_num)
self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM",
None)
if self._stage_trainers == None:
raise ValueError(
"Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment."
)
self._stage_trainers = eval(self._stage_trainers)
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
curr_endpoint = ":".join([cur_ip, cur_port])
self._cur_endpoint = curr_endpoint
elif training_role == "PSERVER":
role = Role.SERVER
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
curr_endpoint = ":".join([cur_ip, cur_port])
self._cur_endpoint = curr_endpoint
current_id = self._server_endpoints.index(self._cur_endpoint)
elif training_role == "HETER_TRAINER":
role = Role.HETER_WORKER
self._stage_id = os.getenv("STAGE_ID", None)
if self._stage_id == None:
raise ValueError(
"Can not find STAGE_ID, please check your environment.")
self._stage_id = int(self._stage_id)
self._stage_num = os.getenv("STAGE_NUM", None)
if self._stage_num == None:
raise ValueError(
"Can not find STAGE_NUM, please check your environment.")
self._stage_num = int(self._stage_num)
self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM", None)
if self._stage_trainers == None:
raise ValueError(
"Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment."
)
self._stage_trainers = eval(self._stage_trainers)
self._heter_trainer_device_type = os.getenv("HETER_DEVICE_TYPE",
None)
if self._heter_trainer_device_type == None:
raise ValueError(
"Can not find HETER_DEVICE_TYPE, please check your environment."
)
assert self._heter_trainer_device_type in (
"cpu", "gpu", "xpu"
), "HETER_DEVICE_TYPE should be cpu,gpu or xpu"
if self._heter_trainer_device_type == "gpu":
heter_device_id = os.getenv("FLAGS_selected_gpus", "0")
self._heter_trainer_device = ":".join(
(self._heter_trainer_device_type, heter_device_id))
if self._heter_trainer_device == "xpu":
heter_device_id = os.getenv("FLAGS_selected_xpus", "0")
self._heter_trainer_device = ":".join(
(self._heter_trainer_device_type, heter_device_id))
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
curr_endpoint = ":".join([cur_ip, cur_port])
self._cur_endpoint = curr_endpoint
current_id = all_heter_trainer_eplist.split(",").index(
curr_endpoint) + trainers_num
self._trainers_num = trainers_num
self._role = role
self._current_id = current_id
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _collective_env(self):
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
self._training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
assert (self._training_role == "TRAINER")
self._role = Role.WORKER
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
self._cur_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
if self._worker_endpoints is None:
# back to non_distributed execution.
self._worker_endpoints = "127.0.0.1:6170"
self._cur_endpoint = self._worker_endpoints
self._non_distributed = True
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
self._local_rank = os.getenv("PADDLE_RANK_IN_NODE")
self._local_device_ids = os.getenv("PADDLE_LOCAL_DEVICE_IDS")
self._world_device_ids = os.getenv("PADDLE_WORLD_DEVICE_IDS")
def _gloo_init(self):
# PADDLE_WITH_GLOO 1: trainer barrier, 2: all barrier
use_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if use_gloo not in [1, 2]:
return
# PADDLE_GLOO_RENDEZVOUS 1: HDFS 2: FILE 3: HTTP
rendezvous_type = int(os.getenv("PADDLE_GLOO_RENDEZVOUS", "0"))
prefix = os.getenv("SYS_JOB_ID", "")
if rendezvous_type not in [
Gloo.RENDEZVOUS.HDFS, Gloo.RENDEZVOUS.HTTP, Gloo.RENDEZVOUS.FILE
]:
raise ValueError(self._gloo._err_type)
need_init_all = True if use_gloo == 2 else False
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
dfs_name = os.getenv("PADDLE_GLOO_FS_NAME", "")
dfs_ugi = os.getenv("PADDLE_GLOO_FS_UGI", "")
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.name": dfs_name,
"dfs.ugi": dfs_ugi,
"dfs.path": dfs_path,
"store.prefix": prefix,
}
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
start_http_server = False
manager = Manager()
http_server_d = manager.dict()
http_server_d["running"] = False
if self._is_collective:
ep_rank_0 = self._worker_endpoints[0]
if self._is_first_worker():
start_http_server = True
else:
ep_rank_0 = os.getenv("PADDLE_GLOO_HTTP_ENDPOINT", "")
if self._is_server() and self._server_index() == 0:
start_http_server = True
ip, port = ep_rank_0.split(':')
kwargs = {
"http.host": ip,
"http.port": port,
"store.prefix": prefix,
'start_http_server': start_http_server,
'http_server_d': http_server_d,
}
else:
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.path": dfs_path,
"store.prefix": prefix,
}
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
type = "HDFS"
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
type = "HTTP"
else:
type = "FILE"
print("Gloo init with {}: need_init_all: {}, args: {}".format(
type, need_init_all, kwargs))
self._gloo.init(
rendezvous=rendezvous_type,
role=self._role,
role_id=self._role_id(),
worker_num=self._worker_num(),
server_num=self._server_num(),
need_init_all=need_init_all,
kwargs=kwargs)
if rendezvous_type == Gloo.RENDEZVOUS.HTTP:
http_server_d['running'] = False
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._ps_env()
else:
self._collective_env()
self._role_is_generated = True
if not paddle.fluid.framework.in_dygraph_mode():
self._gloo_init()
class UserDefinedRoleMaker(PaddleCloudRoleMaker):
def __init__(self, is_collective=False, init_gloo=False, **kwargs):
super(UserDefinedRoleMaker, self).__init__(
is_collective=is_collective, init_gloo=init_gloo, **kwargs)
self._init_gloo = init_gloo
def _user_defined_ps_env(self):
self._server_endpoints = self._kwargs.get("server_endpoints")
self._worker_endpoints = self._kwargs.get("worker_endpoints", [])
self._trainers_num = self._kwargs.get("worker_num", 0)
if self._trainers_num == 0:
assert (len(self._worker_endpoints) > 0)
self._trainers_num = len(self._worker_endpoints)
self._role = self._kwargs.get("role")
self._current_id = self._kwargs.get("current_id")
if self._role == Role.WORKER and len(
self._worker_endpoints) > self._current_id:
self._cur_endpoint = self._worker_endpoints[self._current_id]
elif self._role == Role.SERVER:
self._cur_endpoint = self._server_endpoints[self._current_id]
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _user_defined_collective_env(self):
self._worker_endpoints = self._kwargs.get("worker_endpoints")
self._current_id = self._kwargs.get("current_id")
self._trainers_num = len(self._worker_endpoints)
self._training_role = Role.WORKER
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._user_defined_ps_env()
else:
self._user_defined_collective_env()
self._role_is_generated = True
|
|
"""
Convert sanitized json data to tfrecord data format.
"""
import sys
import collections
import json
import pickle
import numpy as np
import tensorflow as tf
def invert_dict(dictionary):
"""
Invert a dict object.
"""
return {v:k for k, v in dictionary.items()}
def _read_words(filepath):
"""
Return word list in tokens of json file.
"""
words = []
with open(filepath, 'r', encoding='utf-8') as file_p:
for row in file_p:
words.extend(json.loads(row)['tokens'])
counter = collections.Counter(words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], str(x[0])))
words, counts = list(zip(*count_pairs))
return words, counts
def _read_mention_chars(filepath, remove_below):
"""
Return character list in mentions of json file.
"""
char_list = []
with open(filepath, 'r', encoding='utf-8') as file_p:
for row in file_p:
json_data = json.loads(row)
tokens = json_data['tokens']
for mention in json_data['mentions']:
for char in ' '.join(tokens[mention['start']:mention['end']]):
char_list.append(char)
counter = collections.Counter(char_list)
chrs, counts = list(zip(*sorted(counter.items(), key=lambda x: (-x[1], x[0]))))
chrs = np.array(chrs)
counts = np.array(counts)
# remove infrequent characters
mask = counts >= remove_below
chrs = chrs[mask]
counts = counts[mask]
# 0th character will be used as padding
num_to_chrs = dict(enumerate(chrs, 1))
# add unique character
num_to_chrs[len(num_to_chrs) + 1] = 'unk'
# add end of mention character
num_to_chrs[len(num_to_chrs) + 1] = 'eos'
chrs_to_num = invert_dict(num_to_chrs)
return chrs_to_num
def load_filtered_embeddings(filepath, word_list):
"""
Load selected pre-trained word vectors based on word list.
"""
word_dic = {}
word_found = set()
word_set = set(word_list)
with open(filepath, 'r', encoding='utf-8') as file_p:
for line in file_p:
splits = line.split(' ')
word = splits[0]
if word in word_set or word == 'unk':
word_dic[word] = [float(x) for x in splits[1:]]
word_found.add(word)
word_not_found = word_set.difference(word_found)
# enumeration will start from 1
word_to_num = dict(zip(word_dic.keys(), range(1, len(word_dic) + 1)))
# 0th pre_trained_embedding will be remain 0
pre_trained_embeddings = np.zeros((len(word_to_num) + 1, len(word_dic['unk'])),
dtype=np.core.numerictypes.float32
)
for word in word_to_num:
pre_trained_embeddings[word_to_num[word]] = word_dic[word]
return word_to_num, pre_trained_embeddings, word_not_found
def generate_labels_to_numbers(dataset, sanitized_directory):
"""
Generate label to number dictionary.
"""
with open(sanitized_directory + dataset + '/sanitized_labels.txt', 'r') as file_p:
label_list = file_p.read().split('\n')
num_to_label = dict(zip(label_list, range(len(label_list))))
return num_to_label
def generate_features_to_numbers(dataset, sanitized_directory):
"""
Generate pos and dep type to number dictionary.
"""
with open(sanitized_directory + dataset + '/sanitized_pos.txt', 'r') as file_p:
pos_list = file_p.read().split('\n')
num_to_pos = dict(zip(pos_list, range(len(pos_list))))
with open(sanitized_directory + dataset + '/sanitized_dep_type.txt', 'r') as file_p:
dep_type_list = file_p.read().split('\n')
num_to_dep_type = dict(zip(dep_type_list, range(len(dep_type_list))))
return num_to_pos, num_to_dep_type
def labels_status(labels):
"""
Check is labels is clean or not.
"""
leaf = max(labels, key=lambda x: x.count('/'))
clean = 1
for label in labels:
if label not in leaf:
clean = 0
return clean
#pylint: disable-msg=R0914
def make_tf_record_f1(json_data, mention, mappings):
"""
A tfrecord per mention.
"""
start = mention['start']
end = mention['end']
tokens = json_data['tokens']
poss = json_data['pos']
dep_types = json_data['dep']
uid = bytes('_'.join([json_data['fileid'],
str(json_data['senid']),
str(start),
str(end)
]), 'utf-8')
# lc and rc include mention
left_context = tokens[:end]
entity = tokens[start:end]
right_context = tokens[start:]
left_poss = poss[:end]
right_poss = poss[start:]
left_dts = dep_types[:end]
right_dts = dep_types[start:]
ex = tf.train.SequenceExample()
ex.context.feature["uid"].bytes_list.value.append(uid)
ex.context.feature["lcl"].int64_list.value.append(len(left_context))
ex.context.feature["rcl"].int64_list.value.append(len(right_context))
ex.context.feature["eml"].int64_list.value.append(len(' '.join(entity)) + 1)
ex.context.feature["clean"].int64_list.value.append(labels_status(mention['labels']))
lc_ids = ex.feature_lists.feature_list["lci"]
rc_ids = ex.feature_lists.feature_list["rci"]
em_ids = ex.feature_lists.feature_list["emi"]
l_pos_ids = ex.feature_lists.feature_list["lpi"]
r_pos_ids = ex.feature_lists.feature_list["rpi"]
l_dt_ids = ex.feature_lists.feature_list["ldti"]
r_dt_ids = ex.feature_lists.feature_list["rdti"]
label_list = ex.feature_lists.feature_list["labels"]
for word in left_context:
lc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for word in right_context:
rc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for char in ' '.join(entity):
em_ids.feature.add().int64_list.value.append(mappings['ctn'].get(char,
mappings['ctn']['unk']))
em_ids.feature.add().int64_list.value.append(mappings['ctn']['eos'])
for pos in left_poss:
l_pos_ids.feature.add().int64_list.value.append(mappings['ptn'][pos])
for pos in right_poss:
r_pos_ids.feature.add().int64_list.value.append(mappings['ptn'][pos])
for dep_type in left_dts:
l_dt_ids.feature.add().int64_list.value.append(mappings['dttn'][dep_type['type']])
for dep_type in right_dts:
# small hack, get(dep_type, 0) need to fix this when doing transfer learning
# with Wiki and OntoNotes dataset
# conj:uh not found in Wiki dataset
# For all other experiments, this will not affect
r_dt_ids.feature.add().int64_list.value.append(mappings['dttn'].get(dep_type['type'], 0))
temp_labels = [0] * len(mappings['ltn'])
for label in mention['labels']:
temp_labels[mappings['ltn'][label]] = 1
for label in temp_labels:
label_list.feature.add().int64_list.value.append(label)
return ex
#pylint: disable-msg=R0914
def make_tf_record_f2(json_data, mention, mappings, mention_window, context_window):
"""
A tfrecord per mention.
"""
start = mention['start']
end = mention['end']
tokens = json_data['tokens']
uid = bytes('_'.join([json_data['fileid'],
str(json_data['senid']),
str(start),
str(end)
]), 'utf-8')
# lc and rc does not include mention
# as mentioned in AKBC paper
if context_window:
left_context = tokens[:start][-context_window:]
right_context = tokens[end:][:context_window]
else:
left_context = tokens[:start]
right_context = tokens[end:]
if mention_window:
entity = tokens[start:end][:mention_window]
else:
entity = tokens[start:end]
ex = tf.train.SequenceExample()
ex.context.feature["uid"].bytes_list.value.append(uid)
ex.context.feature["lcl"].int64_list.value.append(len(left_context))
ex.context.feature["rcl"].int64_list.value.append(len(right_context))
ex.context.feature["eml"].int64_list.value.append(len(entity))
# This will only be used in representations experiment.
ex.context.feature["clean"].int64_list.value.append(labels_status(mention['labels']))
lc_ids = ex.feature_lists.feature_list["lci"]
rc_ids = ex.feature_lists.feature_list["rci"]
em_ids = ex.feature_lists.feature_list["emi"]
label_list = ex.feature_lists.feature_list["labels"]
for word in left_context:
lc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for word in right_context:
rc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for word in entity:
em_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
temp_labels = [0] * len(mappings['ltn'])
for label in mention['labels']:
temp_labels[mappings['ltn'][label]] = 1
for label in temp_labels:
label_list.feature.add().int64_list.value.append(label)
return ex
def data_format_f1(in_filepath, out_filepath, mappings):
"""
Convert json file to tfrecord.
"""
total = 0
with open(in_filepath, 'r') as file_p1, open(out_filepath, 'wb') as file_p2:
writer = tf.python_io.TFRecordWriter(file_p2.name)
for row in file_p1:
json_data = json.loads(row)
for mention in json_data['mentions']:
ex = make_tf_record_f1(json_data, mention, mappings)
writer.write(ex.SerializeToString())
total += 1
writer.close()
return total
def data_format_f2(in_filepath, out_filepath, mappings):
"""
Convert json file to tfrecord.
"""
total = 0
with open(in_filepath, 'r') as file_p1, open(out_filepath, 'wb') as file_p2:
writer = tf.python_io.TFRecordWriter(file_p2.name)
for row in file_p1:
json_data = json.loads(row)
for mention in json_data['mentions']:
# window width as mentioned in AKBC paper
ex = make_tf_record_f2(json_data, mention, mappings, 5, 15)
writer.write(ex.SerializeToString())
total += 1
writer.close()
return total
def data_format_f5(in_filepath, out_filepath, mappings):
"""
Convert json file to tfrecord.
"""
total = 0
with open(in_filepath, 'r') as file_p1, open(out_filepath, 'wb') as file_p2:
writer = tf.python_io.TFRecordWriter(file_p2.name)
for row in file_p1:
json_data = json.loads(row)
for mention in json_data['mentions']:
ex = make_tf_record_f2(json_data, mention, mappings, None, None)
writer.write(ex.SerializeToString())
total += 1
writer.close()
return total
def data_format_abhishek(dataset, sanitized_directory, glove_vector_filepath, output_directory):
"""
Generate data as needed by our model.
"""
print('Reading words.')
words, _ = _read_words(sanitized_directory + dataset + '/sanitized_train.json')
print('Loading word embeddings.')
word_to_num, embedding, _ = load_filtered_embeddings(glove_vector_filepath, words)
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
print('Generating pos and dep type to number dictionary.')
pos_to_num, dep_type_to_num = generate_features_to_numbers(dataset, sanitized_directory)
print('Generating character to number dictionary.')
chrs_to_num = _read_mention_chars(sanitized_directory + dataset + '/sanitized_train.json', 5)
mappings = {}
mappings['wtn'] = word_to_num
mappings['ctn'] = chrs_to_num
mappings['ltn'] = label_to_num
mappings['ptn'] = pos_to_num
mappings['dttn'] = dep_type_to_num
print('Generating training data.')
train_size = data_format_f1(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f1/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f1(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f1/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f1(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f1/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'num_to_chrs' : invert_dict(chrs_to_num),
'num_to_pos' : invert_dict(pos_to_num),
'num_to_dep_type' : invert_dict(dep_type_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f1/' + dataset + '/local_variables.pickle', 'wb'))
def data_format_shimaoka(dataset, sanitized_directory, glove_vector_filepath, output_directory):
"""
Generate data as needed by shimaoka model.
"""
print('Reading words.')
words, _ = _read_words(sanitized_directory + dataset + '/sanitized_train.json')
print('Loading word embeddings.')
word_to_num, embedding, _ = load_filtered_embeddings(glove_vector_filepath, words)
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
mappings = {}
mappings['wtn'] = word_to_num
mappings['ltn'] = label_to_num
print('Generating training data.')
train_size = data_format_f2(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f2/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f2(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f2/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f2(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f2/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f2/' + dataset + '/local_variables.pickle', 'wb'))
#pylint: disable=invalid-name
def data_format_shimaoka_representation(dataset,
sanitized_directory,
glove_vector_filepath,
output_directory):
"""
Generate data as needed by shimaoka model.
"""
print('Reading words.')
words, _ = _read_words(sanitized_directory + dataset + '/sanitized_train.json')
print('Loading word embeddings.')
word_to_num, embedding, _ = load_filtered_embeddings(glove_vector_filepath, words)
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
mappings = {}
mappings['wtn'] = word_to_num
mappings['ltn'] = label_to_num
print('Generating training data.')
train_size = data_format_f5(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f5/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f5(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f5/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f5(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f5/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f5/' + dataset + '/local_variables.pickle', 'wb'))
def data_format_transfer_learning(dataset, sanitized_directory, output_directory):
"""
Generate data as needed for finetuning.
"""
# Wiki dataset hard coded.
l_vars = pickle.load(open(output_directory + 'f1/Wiki/' + 'local_variables.pickle', 'rb'))
embedding = l_vars['word_embedding']
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
word_to_num = invert_dict(l_vars['num_to_word'])
chrs_to_num = invert_dict(l_vars['num_to_chrs'])
pos_to_num = invert_dict(l_vars['num_to_pos'])
dep_type_to_num = invert_dict(l_vars['num_to_dep_type'])
mappings = {}
mappings['wtn'] = word_to_num
mappings['ctn'] = chrs_to_num
mappings['ltn'] = label_to_num
mappings['ptn'] = pos_to_num
mappings['dttn'] = dep_type_to_num
print('Generating training data.')
train_size = data_format_f1(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f3/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f1(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f3/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f1(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f3/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'num_to_chrs' : invert_dict(chrs_to_num),
'num_to_pos' : invert_dict(pos_to_num),
'num_to_dep_type' : invert_dict(dep_type_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f3/' + dataset + '/local_variables.pickle', 'wb'))
if __name__ == '__main__':
if len(sys.argv) != 6:
print('Usage: dataset sanitized_directory glove_vector_filepath format output_directory')
sys.exit(0)
else:
FORMAT = sys.argv[4]
if FORMAT == 'f1':
data_format_abhishek(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5])
elif FORMAT == 'f2':
data_format_shimaoka(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5])
elif FORMAT == 'f3':
data_format_transfer_learning(sys.argv[1], sys.argv[2], sys.argv[5])
elif FORMAT == 'f5':
data_format_shimaoka_representation(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5])
|
|
from __future__ import absolute_import, print_function, division
import numpy as np
from .. import jit, typeof, utils, types, numpy_support, sigutils
from ..typing import npydecl
from ..typing.templates import AbstractTemplate, signature
from . import _internal, ufuncbuilder
from ..dispatcher import Dispatcher
from .. import array_analysis
def make_dufunc_kernel(_dufunc):
from ..targets import npyimpl
class DUFuncKernel(npyimpl._Kernel):
"""
npyimpl._Kernel subclass responsible for lowering a DUFunc kernel
(element-wise function) inside a broadcast loop (which is
generated by npyimpl.numpy_ufunc_kernel()).
"""
dufunc = _dufunc
def __init__(self, context, builder, outer_sig):
super(DUFuncKernel, self).__init__(context, builder, outer_sig)
self.inner_sig, self.cres = self.dufunc.find_ewise_function(
outer_sig.args)
def generate(self, *args):
isig = self.inner_sig
osig = self.outer_sig
cast_args = [self.cast(val, inty, outty)
for val, inty, outty in zip(args, osig.args, isig.args)]
if self.cres.objectmode:
func_type = self.context.call_conv.get_function_type(
types.pyobject, [types.pyobject] * len(isig.args))
else:
func_type = self.context.call_conv.get_function_type(
isig.return_type, isig.args)
module = self.builder.block.function.module
entry_point = module.get_or_insert_function(
func_type, name=self.cres.fndesc.llvm_func_name)
entry_point.attributes.add("alwaysinline")
_, res = self.context.call_conv.call_function(
self.builder, entry_point, isig.return_type, isig.args,
cast_args)
return self.cast(res, isig.return_type, osig.return_type)
DUFuncKernel.__name__ += _dufunc.ufunc.__name__
return DUFuncKernel
class DUFuncLowerer(object):
'''Callable class responsible for lowering calls to a specific DUFunc.
'''
def __init__(self, dufunc):
self.kernel = make_dufunc_kernel(dufunc)
self.libs = []
def __call__(self, context, builder, sig, args):
from ..targets import npyimpl
explicit_output = len(args) > self.kernel.dufunc.ufunc.nin
return npyimpl.numpy_ufunc_kernel(context, builder, sig, args,
self.kernel,
explicit_output=explicit_output)
class DUFunc(_internal._DUFunc):
"""
Dynamic universal function (DUFunc) intended to act like a normal
Numpy ufunc, but capable of call-time (just-in-time) compilation
of fast loops specialized to inputs.
"""
# NOTE: __base_kwargs must be kept in synch with the kwlist in
# _internal.c:dufunc_init()
__base_kwargs = set(('identity', '_keepalive', 'nin', 'nout'))
def __init__(self, py_func, identity=None, cache=False, targetoptions={}):
if isinstance(py_func, Dispatcher):
py_func = py_func.py_func
self.targetoptions = targetoptions.copy()
kws = {}
kws['identity'] = ufuncbuilder.parse_identity(identity)
dispatcher = jit(target='npyufunc', cache=cache)(py_func)
super(DUFunc, self).__init__(dispatcher, **kws)
# Loop over a copy of the keys instead of the keys themselves,
# since we're changing the dictionary while looping.
self._install_type()
self._lower_me = DUFuncLowerer(self)
self._install_cg()
self.__name__ = py_func.__name__
self.__doc__ = py_func.__doc__
def build_ufunc(self):
"""
For compatibility with the various *UFuncBuilder classes.
"""
return self
@property
def nin(self):
return self.ufunc.nin
@property
def nout(self):
return self.ufunc.nout
@property
def nargs(self):
return self.ufunc.nargs
@property
def ntypes(self):
return self.ufunc.ntypes
@property
def types(self):
return self.ufunc.types
@property
def identity(self):
return self.ufunc.identity
def disable_compile(self):
"""
Disable the compilation of new signatures at call time.
"""
# If disabling compilation then there must be at least one signature
assert len(self._dispatcher.overloads) > 0
self._frozen = True
def add(self, sig):
"""
Compile the DUFunc for the given signature.
"""
args, return_type = sigutils.normalize_signature(sig)
return self._compile_for_argtys(args, return_type)
def _compile_for_args(self, *args, **kws):
nin = self.ufunc.nin
if kws:
if 'out' in kws:
out = kws.pop('out')
args += (out,)
if kws:
raise TypeError("unexpected keyword arguments to ufunc: %s"
% ", ".join(repr(k) for k in sorted(kws)))
args_len = len(args)
assert (args_len == nin) or (args_len == nin + self.ufunc.nout)
assert not kws
argtys = []
# To avoid a mismatch in how Numba types values as opposed to
# Numpy, we need to first check for scalars. For example, on
# 64-bit systems, numba.typeof(3) => int32, but
# np.array(3).dtype => int64.
for arg in args[:nin]:
if numpy_support.is_arrayscalar(arg):
argtys.append(numpy_support.map_arrayscalar_type(arg))
else:
argty = typeof(arg)
if isinstance(argty, types.Array):
argty = argty.dtype
argtys.append(argty)
return self._compile_for_argtys(tuple(argtys))
def _compile_for_argtys(self, argtys, return_type=None):
"""
Given a tuple of argument types (these should be the array
dtypes, and not the array types themselves), compile the
element-wise function for those inputs, generate a UFunc loop
wrapper, and register the loop with the Numpy ufunc object for
this DUFunc.
"""
if self._frozen:
raise RuntimeError("compilation disabled for %s" % (self,))
assert isinstance(argtys, tuple)
if return_type is None:
sig = argtys
else:
sig = return_type(*argtys)
cres, argtys, return_type = ufuncbuilder._compile_element_wise_function(
self._dispatcher, self.targetoptions, sig)
actual_sig = ufuncbuilder._finalize_ufunc_signature(
cres, argtys, return_type)
dtypenums, ptr, env = ufuncbuilder._build_element_wise_ufunc_wrapper(
cres, actual_sig)
self._add_loop(utils.longint(ptr), dtypenums)
self._keepalive.append((ptr, cres.library, env))
self._lower_me.libs.append(cres.library)
return cres
def _install_type(self, typingctx=None):
"""Constructs and installs a typing class for a DUFunc object in the
input typing context. If no typing context is given, then
_install_type() installs into the typing context of the
dispatcher object (should be same default context used by
jit() and njit()).
"""
if typingctx is None:
typingctx = self._dispatcher.targetdescr.typing_context
_ty_cls = type('DUFuncTyping_' + self.ufunc.__name__,
(AbstractTemplate,),
dict(key=self, generic=self._type_me))
typingctx.insert_user_function(self, _ty_cls)
def find_ewise_function(self, ewise_types):
"""
Given a tuple of element-wise argument types, find a matching
signature in the dispatcher.
Return a 2-tuple containing the matching signature, and
compilation result. Will return two None's if no matching
signature was found.
"""
if self._frozen:
# If we cannot compile, coerce to the best matching loop
loop = numpy_support.ufunc_find_matching_loop(self, ewise_types)
if loop is None:
return None, None
ewise_types = tuple(loop.inputs + loop.outputs)[:len(ewise_types)]
for sig, cres in self._dispatcher.overloads.items():
if sig.args == ewise_types:
return sig, cres
return None, None
def _type_me(self, argtys, kwtys):
"""
Implement AbstractTemplate.generic() for the typing class
built by DUFunc._install_type().
Return the call-site signature after either validating the
element-wise signature or compiling for it.
"""
assert not kwtys
ufunc = self.ufunc
_handle_inputs_result = npydecl.Numpy_rules_ufunc._handle_inputs(
ufunc, argtys, kwtys)
base_types, explicit_outputs, ndims, layout = _handle_inputs_result
explicit_output_count = len(explicit_outputs)
if explicit_output_count > 0:
ewise_types = tuple(base_types[:-len(explicit_outputs)])
else:
ewise_types = tuple(base_types)
sig, cres = self.find_ewise_function(ewise_types)
if sig is None:
# Matching element-wise signature was not found; must
# compile.
if self._frozen:
raise TypeError("cannot call %s with types %s"
% (self, argtys))
self._compile_for_argtys(ewise_types)
sig, cres = self.find_ewise_function(ewise_types)
assert sig is not None
if explicit_output_count > 0:
outtys = list(explicit_outputs)
elif ufunc.nout == 1:
if ndims > 0:
outtys = [types.Array(sig.return_type, ndims, layout)]
else:
outtys = [sig.return_type]
else:
raise NotImplementedError("typing gufuncs (nout > 1)")
outtys.extend(argtys)
return signature(*outtys)
def _install_cg(self, targetctx=None):
"""
Install an implementation function for a DUFunc object in the
given target context. If no target context is given, then
_install_cg() installs into the target context of the
dispatcher object (should be same default context used by
jit() and njit()).
"""
if targetctx is None:
targetctx = self._dispatcher.targetdescr.target_context
_any = types.Any
_arr = types.Array
# Either all outputs are explicit or none of them are
sig0 = (_any,) * self.ufunc.nin + (_arr,) * self.ufunc.nout
sig1 = (_any,) * self.ufunc.nin
targetctx.insert_func_defn(
[(self._lower_me, self, sig) for sig in (sig0, sig1)])
array_analysis.MAP_TYPES.append(DUFunc)
|
|
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <[email protected]> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import os
import glob
import sys
import yaml
import re
import optparse
import datetime
import cgi
import warnings
from collections import defaultdict
from jinja2 import Environment, FileSystemLoader
from six import iteritems
from ansible.utils import module_docs
from ansible.utils.vars import merge_hash
from ansible.utils.unicode import to_bytes
from ansible.errors import AnsibleError
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = " (D)"
NOTCORE = " (E)"
#####################################################################################
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
#####################################################################################
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
#####################################################################################
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
#####################################################################################
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
#####################################################################################
def write_data(text, options, outputname, module):
''' dumps module output to a file or the screen, as requested '''
if options.output_dir is not None:
fname = os.path.join(options.output_dir, outputname % module)
fname = fname.replace(".py","")
f = open(fname, 'w')
f.write(text.encode('utf-8'))
f.close()
else:
print(text)
#####################################################################################
def list_modules(module_dir, depth=0):
''' returns a hash of categories, each category being a hash of module names to file paths '''
categories = dict()
module_info = dict()
aliases = defaultdict(set)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're going to traverse
files = glob.glob("%s/*.py" % module_dir) + glob.glob("%s/*/*.py" % module_dir) + glob.glob("%s/*/*/*.py" % module_dir) + glob.glob("%s/*/*/*/*.py" % module_dir)
for module_path in files:
if module_path.endswith('__init__.py'):
continue
category = categories
for new_cat in ['network', 'f5']:
if new_cat not in category:
category[new_cat] = dict()
category = category[new_cat]
module = os.path.splitext(os.path.basename(module_path))[0]
if module.startswith("_") and os.path.islink(module_path):
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_","",1)
aliases[source].add(module)
continue
category[module] = module_path
module_info[module] = module_path
return module_info, categories, aliases
#####################################################################################
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option('-V', action='version', help='Show version number and exit')
return p
#####################################################################################
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True,
)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('rst.j2')
outputname = "%s_module.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
#####################################################################################
def too_old(added):
if not added:
return False
try:
added_tokens = str(added).split(".")
readded = added_tokens[0] + "." + added_tokens[1]
added_float = float(readded)
except ValueError as e:
warnings.warn("Could not parse %s: %s" % (added, str(e)))
return False
return (added_float < TO_OLD_TO_BE_NOTABLE)
def process_module(module, options, env, template, outputname, module_map, aliases):
fname = module_map[module]
if isinstance(fname, dict):
return "SKIPPED"
basename = os.path.basename(fname)
deprecated = False
# ignore files with extensions
if not basename.endswith(".py"):
return
elif module.startswith("_"):
if os.path.islink(fname):
return # ignore, its an alias
deprecated = True
module = module.replace("_","",1)
print("rendering: %s" % module)
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if deprecated and 'deprecated' not in doc:
sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if "/core/" in fname:
doc['core'] = True
else:
doc['core'] = False
if module in aliases:
doc['aliases'] = aliases[module]
all_keys = []
if not 'version_added' in doc:
sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
sys.exit(1)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if too_old(added):
del doc['version_added']
if 'options' in doc and doc['options']:
for (k,v) in iteritems(doc['options']):
# don't show version added information if it's too old to be called out
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
if not 'description' in doc['options'][k]:
raise AnsibleError("Missing required description for option %s in %s " % (k, module))
required_value = doc['options'][k].get('required', False)
if not isinstance(required_value, bool):
raise AnsibleError("Invalid required value '%s' for option '%s' in '%s' (must be truthy)" % (required_value, k, module))
if not isinstance(doc['options'][k]['description'],list):
doc['options'][k]['description'] = [doc['options'][k]['description']]
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
if returndocs:
try:
doc['returndocs'] = yaml.safe_load(returndocs)
except:
print("could not load yaml: %s" % returndocs)
raise
else:
doc['returndocs'] = None
# here is where we build the table of contents...
try:
text = template.render(doc)
except Exception as e:
raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e)))
write_data(text, options, outputname, module)
return doc['short_description']
#####################################################################################
def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
modstring = module
if modstring.startswith('_'):
modstring = module[1:]
modname = modstring
if module in deprecated:
modstring = modstring + DEPRECATED
elif module not in core:
modstring = modstring + NOTCORE
category_file.write(" %s - %s <%s_module>\n" % (to_bytes(modstring), to_bytes(rst_ify(module_map[module][1])), to_bytes(modname)))
def process_category(category, categories, options, env, template, outputname):
### FIXME:
# We no longer conceptually deal with a mapping of category names to
# modules to file paths. Instead we want several different records:
# (1) Mapping of module names to file paths (what's presently used
# as categories['all']
# (2) Mapping of category names to lists of module names (what you'd
# presently get from categories[category_name][subcategory_name].keys()
# (3) aliases (what's presently in categories['_aliases']
#
# list_modules() now returns those. Need to refactor this function and
# main to work with them.
module_map = categories[category]
module_info = categories['all']
aliases = {}
if '_aliases' in categories:
aliases = categories['_aliases']
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "w")
print("*** recording category %s in %s ***" % (category, category_file_path))
# start a new category file
category = category.replace("_"," ")
category = category.title()
modules = []
deprecated = []
core = []
for module in module_map.keys():
if isinstance(module_map[module], dict):
for mod in (m for m in module_map[module].keys() if m in module_info):
if mod.startswith("_"):
deprecated.append(mod)
elif '/core/' in module_info[mod][0]:
core.append(mod)
else:
if module not in module_info:
continue
if module.startswith("_"):
deprecated.append(module)
elif '/core/' in module_info[module][0]:
core.append(module)
modules.append(module)
modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
category_header = "%s Modules" % (category.title())
underscores = "`" * len(category_header)
category_file.write("""\
%s
%s
.. toctree:: :maxdepth: 1
""" % (category_header, underscores))
sections = []
for module in modules:
if module in module_map and isinstance(module_map[module], dict):
sections.append(module)
continue
else:
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases)
sections.sort()
for section in sections:
category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
category_file.write(".. toctree:: :maxdepth: 1\n\n")
section_modules = module_map[section].keys()
section_modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
#for module in module_map[section]:
for module in (m for m in section_modules if m in module_info):
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases)
category_file.write("""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules.
- Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_
""" % (DEPRECATED, NOTCORE))
category_file.close()
# TODO: end a new category file
#####################################################################################
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
print("--module-dir is required", file=sys.stderr)
sys.exit(1)
if not os.path.exists(options.module_dir):
print("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
sys.exit(1)
if not options.template_dir:
print("--template-dir must be specified")
sys.exit(1)
#####################################################################################
def main():
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
env, template, outputname = jinja2_environment(options.template_dir, options.type)
mod_info, categories, aliases = list_modules(options.module_dir)
categories['all'] = mod_info
categories['_aliases'] = aliases
category_names = [c for c in categories.keys() if not c.startswith('_')]
category_names.sort()
# Write master category list
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
with open(category_list_path, "w") as category_list_file:
category_list_file.write("Module Index\n")
category_list_file.write("============\n")
category_list_file.write("\n\n")
category_list_file.write(".. toctree::\n")
category_list_file.write(" :maxdepth: 1\n\n")
for category in category_names:
category_list_file.write(" list_of_%s_modules\n" % category)
#
# Import all the docs into memory
#
module_map = mod_info.copy()
skipped_modules = set()
for modname in module_map:
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result == 'SKIPPED':
del categories['all'][modname]
else:
categories['all'][modname] = (categories['all'][modname], result)
#
# Render all the docs to rst via category pages
#
for category in category_names:
process_category(category, categories, options, env, template, outputname)
if __name__ == '__main__':
main()
|
|
'''
Inspector
=========
.. versionadded:: 1.0.9
.. warning::
This module is highly experimental, use it with care.
The Inspector is a tool for finding a widget in the widget tree by clicking or
tapping on it.
Some keyboard shortcuts are activated:
* "Ctrl + e": activate / deactivate the inspector view
* "Escape": cancel widget lookup first, then hide the inspector view
Available inspector interactions:
* tap once on a widget to select it without leaving inspect mode
* double tap on a widget to select and leave inspect mode (then you can
manipulate the widget again)
Some properties can be edited live. However, due to the delayed usage of
some properties, it might crash if you don't handle all the cases.
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation.
The Inspector, however, can also be imported and used just like a normal
python module. This has the added advantage of being able to activate and
deactivate the module programmatically::
from kivy.core.window import Window
from kivy.app import App
from kivy.uix.button import Button
from kivy.modules import inspector
class Demo(App):
def build(self):
button = Button(text="Test")
inspector.create_inspector(Window, button)
return button
Demo().run()
To remove the Inspector, you can do the following::
inspector.stop(Window, button)
'''
__all__ = ('start', 'stop', 'create_inspector')
import kivy
kivy.require('1.0.9')
import weakref
from kivy.animation import Animation
from kivy.logger import Logger
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.textinput import TextInput
from kivy.uix.image import Image
from kivy.uix.treeview import TreeViewNode
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.modalview import ModalView
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix, \
Translate, Rotate, Scale
from kivy.properties import ObjectProperty, BooleanProperty, ListProperty, \
NumericProperty, StringProperty, OptionProperty, \
ReferenceListProperty, AliasProperty, VariableListProperty
from kivy.graphics.texture import Texture
from kivy.clock import Clock
from functools import partial
from itertools import chain
from kivy.lang import Builder
from kivy.vector import Vector
Builder.load_string('''
<Inspector>:
layout: layout
treeview: treeview
content: content
BoxLayout:
orientation: 'vertical'
id: layout
size_hint_y: None
height: 250
padding: 5
spacing: 5
top: 0
canvas:
Color:
rgb: .4, .4, .4
Rectangle:
pos: self.x, self.top
size: self.width, 1
Color:
rgba: .185, .18, .18, .95
Rectangle:
pos: self.pos
size: self.size
# Top Bar
BoxLayout:
size_hint_y: None
height: 50
spacing: 5
Button:
text: 'Move to Top'
on_release: root.toggle_position(args[0])
size_hint_x: None
width: 120
ToggleButton:
text: 'Inspect'
on_state: root.inspect_enabled = args[1] == 'down'
size_hint_x: None
state: 'down' if root.inspect_enabled else 'normal'
width: 80
Button:
text: 'Parent'
on_release:
root.highlight_widget(root.widget.parent) if root.widget \
and root.widget.parent is not root.win else None
size_hint_x: None
width: 80
Button:
text: '%r' % root.widget
on_release: root.show_widget_info()
Button:
text: 'X'
size_hint_x: None
width: 50
on_release: root.activated = False
# Bottom Bar
BoxLayout:
ScrollView:
scroll_type: ['bars', 'content']
bar_width: 10
TreeView:
id: treeview
size_hint_y: None
hide_root: True
height: self.minimum_height
ScrollView:
id: content
<TreeViewProperty>:
height: max(lkey.texture_size[1], ltext.texture_size[1])
Label:
id: lkey
text: root.key
text_size: (self.width, None)
width: 150
size_hint_x: None
Label:
id: ltext
text: [repr(getattr(root.widget, root.key, '')), root.refresh][0]\
if root.widget else ''
text_size: (self.width, None)
''')
class TreeViewProperty(BoxLayout, TreeViewNode):
widget_ref = ObjectProperty(None, allownone=True)
def _get_widget(self):
wr = self.widget_ref
if wr is None:
return None
wr = wr()
if wr is None:
self.widget_ref = None
return None
return wr
widget = AliasProperty(_get_widget, None, bind=('widget_ref', ))
key = ObjectProperty(None, allownone=True)
inspector = ObjectProperty(None)
refresh = BooleanProperty(False)
class Inspector(FloatLayout):
widget = ObjectProperty(None, allownone=True)
layout = ObjectProperty(None)
treeview = ObjectProperty(None)
inspect_enabled = BooleanProperty(False)
activated = BooleanProperty(False)
widget_info = BooleanProperty(False)
content = ObjectProperty(None)
at_bottom = BooleanProperty(True)
def __init__(self, **kwargs):
super(Inspector, self).__init__(**kwargs)
self.avoid_bring_to_top = False
self.win = kwargs.get('win')
with self.canvas.before:
self.gcolor = Color(1, 0, 0, .25)
PushMatrix()
self.gtranslate = Translate(0, 0, 0)
self.grotate = Rotate(0, 0, 0, 1)
self.gscale = Scale(1.)
self.grect = Rectangle(size=(0, 0))
PopMatrix()
Clock.schedule_interval(self.update_widget_graphics, 0)
def on_touch_down(self, touch):
ret = super(Inspector, self).on_touch_down(touch)
if touch.button == 'left' and not ret and self.inspect_enabled:
self.highlight_at(*touch.pos)
if touch.is_double_tap:
self.inspect_enabled = False
self.show_widget_info()
ret = True
return ret
def on_touch_move(self, touch):
ret = super(Inspector, self).on_touch_move(touch)
if not ret and self.inspect_enabled:
self.highlight_at(*touch.pos)
ret = True
return ret
def on_touch_up(self, touch):
ret = super(Inspector, self).on_touch_up(touch)
if not ret and self.inspect_enabled:
ret = True
return ret
def on_window_children(self, win, children):
if self.avoid_bring_to_top:
return
self.avoid_bring_to_top = True
win.remove_widget(self)
win.add_widget(self)
self.avoid_bring_to_top = False
def highlight_at(self, x, y):
widget = None
# reverse the loop - look at children on top first and
# modalviews before others
win_children = self.win.children
children = chain(
(c for c in reversed(win_children) if isinstance(c, ModalView)),
(c for c in reversed(win_children) if not isinstance(c, ModalView))
)
for child in children:
if child is self:
continue
widget = self.pick(child, x, y)
if widget:
break
self.highlight_widget(widget)
def highlight_widget(self, widget, info=True, *largs):
# no widget to highlight, reduce rectangle to 0, 0
self.widget = widget
if not widget:
self.grect.size = 0, 0
if self.widget_info and info:
self.show_widget_info()
def update_widget_graphics(self, *l):
if not self.activated:
return
if self.widget is None:
self.grect.size = 0, 0
return
gr = self.grect
widget = self.widget
# determine rotation
a = Vector(1, 0)
b = Vector(widget.to_window(*widget.to_parent(0, 0)))
c = Vector(widget.to_window(*widget.to_parent(1, 0))) - b
angle = -a.angle(c)
# determine scale
scale = c.length()
# apply transform
gr.size = widget.size
self.gtranslate.xy = Vector(widget.to_window(*widget.pos))
self.grotate.angle = angle
# fix warning about scale property deprecation
self.gscale.xyz = (scale,) * 3
def toggle_position(self, button):
to_bottom = button.text == 'Move to Bottom'
if to_bottom:
button.text = 'Move to Top'
if self.widget_info:
Animation(top=250, t='out_quad', d=.3).start(self.layout)
else:
Animation(top=60, t='out_quad', d=.3).start(self.layout)
bottom_bar = self.layout.children[1]
self.layout.remove_widget(bottom_bar)
self.layout.add_widget(bottom_bar)
else:
button.text = 'Move to Bottom'
if self.widget_info:
Animation(top=self.height, t='out_quad', d=.3).start(
self.layout)
else:
Animation(y=self.height - 60, t='out_quad', d=.3).start(
self.layout)
bottom_bar = self.layout.children[1]
self.layout.remove_widget(bottom_bar)
self.layout.add_widget(bottom_bar)
self.at_bottom = to_bottom
def pick(self, widget, x, y):
ret = None
# try to filter widgets that are not visible (invalid inspect target)
if (hasattr(widget, 'visible') and not widget.visible):
return ret
if widget.collide_point(x, y):
ret = widget
x2, y2 = widget.to_local(x, y)
# reverse the loop - look at children on top first
for child in reversed(widget.children):
ret = self.pick(child, x2, y2) or ret
return ret
def on_activated(self, instance, activated):
if not activated:
self.grect.size = 0, 0
if self.at_bottom:
anim = Animation(top=0, t='out_quad', d=.3)
else:
anim = Animation(y=self.height, t='out_quad', d=.3)
anim.bind(on_complete=self.animation_close)
anim.start(self.layout)
self.widget = None
self.widget_info = False
else:
self.win.add_widget(self)
Logger.info('Inspector: inspector activated')
if self.at_bottom:
Animation(top=60, t='out_quad', d=.3).start(self.layout)
else:
Animation(y=self.height - 60, t='out_quad', d=.3).start(
self.layout)
def animation_close(self, instance, value):
if self.activated is False:
self.inspect_enabled = False
self.win.remove_widget(self)
self.content.clear_widgets()
treeview = self.treeview
for node in list(treeview.iterate_all_nodes())[:]:
node.widget_ref = None
treeview.remove_node(node)
Logger.info('Inspector: inspector deactivated')
def show_widget_info(self):
self.content.clear_widgets()
widget = self.widget
treeview = self.treeview
for node in list(treeview.iterate_all_nodes())[:]:
node.widget_ref = None
treeview.remove_node(node)
if not widget:
if self.at_bottom:
Animation(top=60, t='out_quad', d=.3).start(self.layout)
else:
Animation(y=self.height - 60, t='out_quad', d=.3).start(
self.layout)
self.widget_info = False
return
self.widget_info = True
if self.at_bottom:
Animation(top=250, t='out_quad', d=.3).start(self.layout)
else:
Animation(top=self.height, t='out_quad', d=.3).start(self.layout)
for node in list(treeview.iterate_all_nodes())[:]:
treeview.remove_node(node)
keys = list(widget.properties().keys())
keys.sort()
node = None
wk_widget = weakref.ref(widget)
for key in keys:
text = '%s' % key
node = TreeViewProperty(text=text, key=key, widget_ref=wk_widget)
node.bind(is_selected=self.show_property)
try:
widget.bind(**{key: partial(
self.update_node_content, weakref.ref(node))})
except: pass
treeview.add_node(node)
def update_node_content(self, node, *l):
node = node()
if node is None:
return
node.refresh = True
node.refresh = False
def keyboard_shortcut(self, win, scancode, *largs):
modifiers = largs[-1]
if scancode == 101 and modifiers == ['ctrl']:
self.activated = not self.activated
if self.activated:
self.inspect_enabled = True
return True
elif scancode == 27:
if self.inspect_enabled:
self.inspect_enabled = False
return True
if self.activated:
self.activated = False
return True
def show_property(self, instance, value, key=None, index=-1, *l):
# normal call: (tree node, focus, )
# nested call: (widget, prop value, prop key, index in dict/list)
if value is False:
return
content = None
if key is None:
# normal call
nested = False
widget = instance.widget
key = instance.key
prop = widget.property(key)
value = getattr(widget, key)
else:
# nested call, we might edit subvalue
nested = True
widget = instance
prop = None
dtype = None
if isinstance(prop, AliasProperty) or nested:
# trying to resolve type dynamicly
if type(value) in (str, str):
dtype = 'string'
elif type(value) in (int, float):
dtype = 'numeric'
elif type(value) in (tuple, list):
dtype = 'list'
if isinstance(prop, NumericProperty) or dtype == 'numeric':
content = TextInput(text=str(value) or '', multiline=False)
content.bind(text=partial(
self.save_property_numeric, widget, key, index))
elif isinstance(prop, StringProperty) or dtype == 'string':
content = TextInput(text=value or '', multiline=True)
content.bind(text=partial(
self.save_property_text, widget, key, index))
elif (isinstance(prop, ListProperty) or
isinstance(prop, ReferenceListProperty) or
isinstance(prop, VariableListProperty) or
dtype == 'list'):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for i, item in enumerate(value):
button = Button(text=repr(item), size_hint_y=None, height=44)
if isinstance(item, Widget):
button.bind(on_release=partial(self.highlight_widget, item,
False))
else:
button.bind(on_release=partial(self.show_property, widget,
item, key, i))
content.add_widget(button)
elif isinstance(prop, OptionProperty):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for option in prop.options:
button = ToggleButton(
text=option,
state='down' if option == value else 'normal',
group=repr(content.uid), size_hint_y=None,
height=44)
button.bind(on_press=partial(
self.save_property_option, widget, key))
content.add_widget(button)
elif isinstance(prop, ObjectProperty):
if isinstance(value, Widget):
content = Button(text=repr(value))
content.bind(on_release=partial(self.highlight_widget, value))
elif isinstance(value, Texture):
content = Image(texture=value)
else:
content = Label(text=repr(value))
elif isinstance(prop, BooleanProperty):
state = 'down' if value else 'normal'
content = ToggleButton(text=key, state=state)
content.bind(on_release=partial(self.save_property_boolean, widget,
key, index))
self.content.clear_widgets()
if content:
self.content.add_widget(content)
def save_property_numeric(self, widget, key, index, instance, value):
try:
if index >= 0:
getattr(widget, key)[index] = float(instance.text)
else:
setattr(widget, key, float(instance.text))
except:
pass
def save_property_text(self, widget, key, index, instance, value):
try:
if index >= 0:
getattr(widget, key)[index] = instance.text
else:
setattr(widget, key, instance.text)
except:
pass
def save_property_boolean(self, widget, key, index, instance, ):
try:
value = instance.state == 'down'
if index >= 0:
getattr(widget, key)[index] = value
else:
setattr(widget, key, value)
except:
pass
def save_property_option(self, widget, key, instance, *l):
try:
setattr(widget, key, instance.text)
except:
pass
def create_inspector(win, ctx, *l):
'''Create an Inspector instance attached to the *ctx* and bound to the
Windows :meth:`~kivy.core.window.WindowBase.on_keyboard` event for capturing
the keyboard shortcut.
:Parameters:
`win`: A :class:`Window <kivy.core.window.WindowBase>`
The application Window to bind to.
`ctx`: A :class:`~kivy.uix.widget.Widget` or subclass
The Widget to be inspected.
'''
# Dunno why, but if we are creating inspector within the start(), no lang
# rules are applied.
ctx.inspector = Inspector(win=win)
win.bind(children=ctx.inspector.on_window_children,
on_keyboard=ctx.inspector.keyboard_shortcut)
def start(win, ctx):
Clock.schedule_once(partial(create_inspector, win, ctx))
def stop(win, ctx):
'''Stop and unload any active Inspectors for the given *ctx*.'''
if hasattr(ctx, 'inspector'):
win.unbind(children=ctx.inspector.on_window_children,
on_keyboard=ctx.inspector.keyboard_shortcut)
win.remove_widget(ctx.inspector)
del ctx.inspector
|
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is yet another bazel-to-cmake converter. It's independently written from
scratch but relies on the same basic idea as others (including IREE's), namely:
to let the python interpreter do the bulk of the work, exploiting the fact that
both Bazel's BUILD syntax and Starlark (".bzl") languages are more or less
subsets of Python.
The main features that this converter supports and that others don't, justifying
its existence as of early 2021, are:
1. Ad-hoc support for select(), generating CMake if()...elseif()... chains
parsing the condition keys (e.g. anything ending in ":windows" is
interpreted as the condition "the target platform is Windows"). This allows
to just ignore config_setting, as we only care about the config_setting
names, not their actual implementation, as well as all the variants from
the Bazel 'selects' library.
2. Support for load(), loading macros from Starlark files.
"""
import re
import os
import os.path
import pickle
import sys
import datetime
import itertools
# Ruy's dependencies.
external_targets = ['gtest', 'gtest_main', 'cpuinfo']
# Text replacements [oldstring, newstring] pairs, applied on all BUILD and
# Starlark files that we load. Only used by preprocess_input_text.
replacements = [
['$(STACK_FRAME_UNLIMITED)', ''],
['native.cc_', 'cc_'],
['selects.config_setting_group', 'config_setting_group'],
['@com_google_googletest//:gtest', 'gtest'],
['@com_google_googletest//:gtest_main', 'gtest_main'],
['@cpuinfo', 'cpuinfo'],
]
def preprocess_input_text(text):
result = text
for replacement in replacements:
result = result.replace(replacement[0], replacement[1])
return result
def set_cmake_list(list_name, values, indent):
semicolon_separated = ';'.join(values)
print(f'{indent}set({list_name} "{semicolon_separated}")')
def generate_cmake_select(select_name, dict):
new_if_branch_keyword = 'if'
default_value = []
for key in dict:
condition = ''
if key == '//conditions:default':
default_value = dict[key]
continue
elif re.search(r':windows$', key):
condition = 'CMAKE_SYSTEM_NAME STREQUAL Windows'
elif re.search(r':ppc$', key):
condition = ('CMAKE_SYSTEM_PROCESSOR STREQUAL ppc64 OR '
'CMAKE_SYSTEM_PROCESSOR STREQUAL ppc64le')
elif re.search(r':s390x$', key):
condition = ('CMAKE_SYSTEM_PROCESSOR STREQUAL s390 OR '
'CMAKE_SYSTEM_PROCESSOR STREQUAL s390x')
elif re.search(r':fuchsia$', key):
condition = 'CMAKE_SYSTEM_NAME STREQUAL Fuchsia'
elif re.search(r':arm32_assuming_neon$', key):
condition = 'CMAKE_SYSTEM_PROCESSOR STREQUAL arm'
elif re.search(r':do_not_want_O3$', key):
# Ruy is a specialist library: we always want code to be compiled
# with -O3 unless the build type is Debug or the compiler does not
# support that flag syntax.
condition = '(CMAKE_BUILD_TYPE STREQUAL Debug) OR MSVC'
elif re.search(r':x86_64_and_not_msvc$', key):
condition = ('(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR '
'CMAKE_SYSTEM_PROCESSOR STREQUAL amd64) AND NOT MSVC')
elif re.search(r':windows_msvc$', key):
condition = 'MSVC'
elif re.search(r':ruy_profiler$', key):
condition = '${RUY_PROFILER}'
else:
raise ValueError(f'Unhandled key in select: {key}')
print(f'{new_if_branch_keyword}({condition})')
set_cmake_list(select_name, dict[key], ' ')
new_if_branch_keyword = 'elseif'
print('else()')
set_cmake_list(select_name, default_value, ' ')
print('endif()\n')
def trim_multiple_ruy_prefixes(name):
return re.sub(r'(ruy_)+ruy', 'ruy', name)
def get_cmake_local_target_name(name):
global package_prefix
return trim_multiple_ruy_prefixes(f'ruy_{package_prefix}_{name}')
def get_cmake_dep_target_name(name):
if name in external_targets:
return name
if name.startswith('$'):
# Happens for deps that are the result of expanding a select() that we
# have compiled to expanding a variable.
return name
if name.startswith('//'):
after_last_slash = name.split('/')[-1]
if ':' not in after_last_slash:
name = f'{name}:{after_last_slash}'
raw = name[2:].replace('/', '_').replace(':', '_')
return trim_multiple_ruy_prefixes(raw)
if name.startswith(':'):
name = name[1:]
return get_cmake_local_target_name(name)
#
# Functions implementing BUILD functions
#
def package(**kwargs):
pass
def exports_files(*args):
pass
def load(filename, *args):
if filename.startswith('@'):
return
elif filename.startswith(':'):
filename = os.path.join(bazel_package_dir, filename[1:])
elif filename.startswith('//'):
split = filename[2:].split(':')
filename = os.path.join(bazel_workspace_dir, split[0], split[1])
src_file_content = open(filename).read()
processed_file_content = preprocess_input_text(src_file_content)
exec(processed_file_content, globals(), globals())
def config_setting(**kwargs):
# Nothing to do since our implementation of select() is based on parsing
# the names of config_settings, not looking deep into their actual
# implementation.
pass
def filegroup(**kwargs):
pass
def config_setting_group(**kwargs):
# See config_setting.
pass
def bzl_library(**kwargs):
pass
select_index = 0
select_cache = {}
def select(select_dict):
global select_index
global select_cache
global package_prefix
key = pickle.dumps(sorted(select_dict.items()))
if key in select_cache:
select_name = select_cache[key]
else:
unique_values = sorted(
set(itertools.chain.from_iterable(select_dict.values()))
) # sorting ensures determinism, no spurious diffs
description = '_'.join(unique_values)
select_name = f'{package_prefix}_{select_index}_{description}'
select_name = select_name.replace('c++', 'cxx')
select_name = re.sub(r'[^a-zA-Z0-9]+', '_', select_name)
select_index = select_index + 1
select_cache[key] = select_name
generate_cmake_select(select_name, select_dict)
return [f'${{{select_name}}}']
def generic_rule(rule_name, **kwargs):
print(f'{rule_name}(')
for key in kwargs.keys():
values = kwargs[key]
if type(values) is bool:
if values:
print(f' {key.upper()}')
continue
else:
raise ValueError('Cannot specify FALSE boolean args in CMake')
if key == 'visibility':
if values == ['//visibility:public']:
print(f' PUBLIC')
continue
if key == 'tags':
values = list(filter(lambda x: not x.startswith('req_dep'), values))
if not values:
continue
print(f' {key.upper()}')
if type(values) is list:
for value in values:
if key == 'deps':
target_name = get_cmake_dep_target_name(value)
print(f' {target_name}')
else:
print(f' {value}')
else:
if key == 'name':
target_name = get_cmake_local_target_name(values)
print(f' {target_name}')
else:
print(f' {values}')
print(')\n')
def cc_library(**kwargs):
generic_rule('ruy_cc_library', **kwargs)
def cc_test(**kwargs):
generic_rule('ruy_cc_test', **kwargs)
def cc_binary(**kwargs):
generic_rule('ruy_cc_binary', **kwargs)
#
# Program entry point.
#
if __name__ == "__main__":
if len(sys.argv) != 3:
print('Usage: bazel_to_cmake.py bazel_workspace_dir bazel_package_dir')
sys.exit(1)
bazel_workspace_dir = sys.argv[1]
bazel_package_dir = sys.argv[2]
bazel_package_relative_dir = os.path.relpath(bazel_package_dir,
bazel_workspace_dir)
package_prefix = bazel_package_relative_dir.replace(os.path.sep, '_')
print("""# This file is generated (whence no license header). Do not edit!
# To regenerate, run:
# cmake/bazel_to_cmake.sh
""")
src_build_file = os.path.join(bazel_package_dir, 'BUILD')
src_build_content = open(src_build_file).read()
processed_build_content = preprocess_input_text(src_build_content)
exec(processed_build_content)
print('ruy_add_all_subdirs()')
|
|
from flask import Blueprint, render_template, abort, request, redirect, url_for
from flask.ext.login import LoginManager, UserMixin, login_required, current_user, login_user, logout_user
from werkzeug.security import generate_password_hash, check_password_hash
from db import User, get_user, has_superuser, write_to_db, delete_from_db, get_all
from settings import SECRET_KEY
auth = Blueprint('auth', __name__, template_folder='templates')
# Auth Helpers/Flask-Login
auth_sys = LoginManager()
def setup_auth(app):
auth_sys.init_app(app)
# Use the UserMixin from Flask-Login to make this easy.
class FLUser(UserMixin):
def __init__(self, user):
self.user = user
def get_id(self):
return self.user.username
# Redirect missing attributes to the User object
def __getattr__(self, name):
return getattr(self.user, name)
@auth_sys.user_loader
def load_user(uname):
user = get_user(uname)
if user:
return FLUser(user)
return None
@auth_sys.unauthorized_handler
def unauthorized():
return redirect('/login')
class UserExists(ValueError):
pass
class NoPermissionError(Exception):
pass
def create_user(name, password, sudo=False):
if not name or not password or len(name) < 3 or len(password) < 4 or name.isdigit(): # Disallow unames that are numbers to avoid confusing the ID catcher
raise ValueError()
if get_user(name.lower()):
raise UserExists()
u = User(username=name.lower(), password=generate_password_hash(password))
u.is_superuser = sudo
write_to_db(u)
def check_password(user, password):
return check_password_hash(user.password, password)
# Flask Routing
@auth.route('/logout')
@login_required
def logout():
logout_user()
return render_template('auth/logout.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated():
return redirect('/')
if not has_superuser():
return redirect('/firstrun')
errs = None
if request.method == 'POST':
try:
uname = request.form['username'].lower()
user = get_user(uname)
assert user
assert check_password(user, request.form['password'])
remember = False
if 'remember' in request.form:
remember = True
login_user(FLUser(user))
return redirect('/')
except Exception as ex:
errs = ["Incorrect username/password."]
return render_template('auth/login.html', errors=errs)
@auth.route('/firstrun', methods=['GET', 'POST'])
def firstrun():
if has_superuser():
return redirect('/login')
errs = None
if request.method == 'POST':
try:
assert request.form['password'] == request.form['password-confirm']
uname = request.form['username'].lower()
create_user(uname, request.form['password'], sudo=True)
return render_template('auth/setup_complete.html')
except Exception as ex:
errs = ["Invalid credentials. Mismatching passwords?"]
return render_template('auth/firstrun.html', errors=errs)
ERR_USER_EXISTS = "User already exists; perhaps you wanted <a href=\"/manage-accounts\">account management</a>?"
@auth.route('/create-user', methods=['GET', 'POST'])
@login_required
def create_user_page():
if not current_user.is_superuser:
return redirect('/')
errs = None
info = None
if request.method == 'POST':
try:
assert request.form['password'] == request.form['password-confirm']
uname = request.form['username']
admin = True if 'superuser' in request.form else False
create_user(uname, request.form['password'], sudo=admin)
info = "User '{}' created.".format(uname)
except UserExists:
errs = [ERR_USER_EXISTS]
except Exception as ex:
errs = ["User creation failed; mismatching passwords?"]
return render_template('auth/create_user.html', errors=errs, info=info)
@auth.route('/manage-accounts')
@login_required
def manage_accounts():
if not current_user.is_superuser:
return redirect('/')
info = request.args.get('info', None)
errs = []
return render_template('auth/manage.html', users=get_all(User), errors=errs, info=info)
@auth.route('/users/destroy', methods=['GET', 'POST'])
@login_required
def destroy_user():
if not current_user.is_superuser:
return redirect('/')
uid = request.args.get('uid', None)
if not uid:
return redirect(url_for('auth.manage_accounts'))
user = get_user(uid)
errs = None
if not user:
return redirect(url_for('auth.manage_accounts'))
if request.method == 'POST':
# Actually destroy
uname = user.username
try:
delete_from_db(user)
return redirect(url_for('auth.manage_accounts', info="User {} deleted.".format(uname)))
except Exception as ex:
errs = [str(ex)]
return render_template('auth/destroy.html', user=user, errors=errs)
@auth.route('/users/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
uid = request.args.get('uid', None)
user = None
if uid and current_user.is_superuser:
user = get_user(uid)
else:
user = current_user
errs = None
info = None
if request.method == 'POST':
try:
uname = request.form['username']
pw1 = request.form['password']
pw2 = request.form['password-confirm']
assert pw1 == pw2
if not uname == current_user.username and not current_user.is_superuser:
raise NoPermissionError()
u = get_user(uname)
if len(pw1) < 4:
raise ValueError()
u.password = generate_password_hash(pw1)
write_to_db(u)
info = "Password changed for '{}'".format(uname)
except NoPermissionError:
errs = ["Permission denied."]
except Exception as ex:
print(ex)
errs = ["Password change failed; mismatching passwords?"]
return render_template('auth/change_password.html', user=user, errors=errs, info=info)
@auth.route('/users/promote', methods=['GET', 'POST'])
@login_required
def promote_user():
if not current_user.is_superuser:
return redirect('/')
uid = request.args.get('uid', None)
if not uid:
return redirect(url_for('auth.manage_accounts'))
user = get_user(uid)
if not user:
return redirect(url_for('auth.manage_accounts'))
if request.method == 'POST':
user.is_superuser = True
uname = user.username
write_to_db(user)
return redirect(url_for('auth.manage_accounts', info="{} promoted.".format(uname)))
return render_template('auth/promote.html', user=user)
@auth.route('/users/demote', methods=['GET', 'POST'])
@login_required
def demote_user():
if not current_user.is_superuser:
return redirect('/')
uid = request.args.get('uid', None)
if not uid:
return redirect(url_for('auth.manage_accounts'))
user = get_user(uid)
if not user:
return redirect(url_for('auth.manage_accounts'))
if request.method == 'POST':
user.is_superuser = False
uname = user.username
write_to_db(user)
return redirect(url_for('auth.manage_accounts', info="{} demoted.".format(uname)))
return render_template('auth/demote.html', user=user)
|
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2016 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
# Create binary tarballs on Unix style systems.
#
# Step 1: Clone pylith_installer repository.
# Step 2: Use this utility to create tarballs.
# Source setup.sh after running --configure.
import os
import shutil
import subprocess
class BinaryApp(object):
def __init__(self, base_dir, pylith_branch, nthreads, force_config):
self.baseDir = base_dir
self.pylithBranch = pylith_branch
self.nthreads = nthreads
self.forceConfig = force_config
self.srcDir = os.path.join(base_dir, "src", "pylith_installer")
self.destDir = os.path.join(base_dir, "dist")
self.buildDir = os.path.join(base_dir, "build")
sysname, hostname, release, version, machine = os.uname()
self.os = sysname
self.arch = machine
self.pythonVersion = "2.7" # :KLUDGE:
return
def setup(self):
print("Cleaning destination directory '%s'..." % self.destDir)
if os.path.isdir(self.destDir):
shutil.rmtree(self.destDir)
print("Cleaning build directory '%s'..." % self.buildDir)
if os.path.isdir(self.buildDir):
shutil.rmtree(self.buildDir)
os.mkdir(self.destDir)
os.mkdir(self.buildDir)
return
def configure(self):
if self.os == "Linux":
configArgs = ("--enable-gcc",
"--enable-openssl",
"--enable-python",
"--enable-mpi=mpich",
"--enable-cppunit",
"--enable-numpy",
"--with-numpy-blaslapack",
"--enable-six",
"--enable-proj4",
"--enable-hdf5",
"--enable-netcdfpy",
"--enable-cmake",
"--enable-nemesis",
"--enable-fiat",
"--enable-pcre",
"--enable-swig",
"--enable-setuptools",
)
petscOptions = ("--download-chaco=1",
"--download-ml=1",
"--download-f2cblaslapack=1",
"--with-hwloc=0",
"--with-ssl=0",
"--with-x=0",
"--with-c2html=0",
"--with-lgrind=0",
)
elif self.os == "Darwin":
configArgs = ("--enable-autotools",
"--enable-mpi=mpich",
"--enable-swig",
"--enable-pcre",
"--enable-numpy",
"--enable-cmake",
"--with-fortran=no",
"--with-fetch=curl",
)
petscOptions = ("--download-chaco=1",
"--download-ml",
"--with-fc=0",
"--with-hwloc=0",
"--with-ssl=0",
"--with-x=0",
"--with-c2html=0",
"--with-lgrind=0",
"--with-blas-lib=/System/Library/Frameworks/Accelerate.framework/Frameworks/vecLib.framework/Versions/Current/libBLAS.dylib",
"--with-lapack-lib=/System/Library/Frameworks/Accelerate.framework/Frameworks/vecLib.framework/Versions/Current/libLAPACK.dylib",
)
else:
raise ValueError("Unknown os '%s'." % self.os)
# autoreconf
os.chdir(self.srcDir)
cmd = ("autoreconf", "--install", "--force", "--verbose")
self._runCmd(cmd)
self._setEnviron()
# configure
os.chdir(self.buildDir)
cmd = ("%s" % os.path.join(self.srcDir, "configure"),
"--with-make-threads=%d" % self.nthreads,
"--prefix=%s" % self.destDir,
)
if not self.pylithBranch is None:
cmd += ("--with-pylith-git=%s" % self.pylithBranch,)
if self.forceConfig:
cmd += ("--enable-force-install",)
cmd += configArgs
cmd += ("--with-petsc-options=%s" % " ".join(petscOptions),)
self._runCmd(cmd)
return
def build(self):
os.chdir(self.buildDir)
self._setEnviron()
cmd = ("make",)
self._runCmd(cmd)
return
def package(self):
if self.os == "Darwin":
filename = "setup_darwin.sh"
elif self.os == "Linux":
if self.arch == "x86_64":
filename = "setup_linux64.sh"
elif self.arch == "i686":
filename = "setup_linux32.sh"
else:
raise ValueError("Unknown architecture '%s'." % self.arch)
else:
raise ValueError("Unknown os '%s'." % self.os)
shutil.copyfile(os.path.join(self.srcDir, "packager", filename), os.path.join(self.destDir, "setup.sh"))
os.chdir(os.path.join(self.buildDir, "pylith-build"))
cmd = (os.path.join(self.srcDir, "packager", "make_package.py"),)
self._runCmd(cmd)
# Darwin
if self.os == "Darwin":
print("Unpack tarball")
print("Run packager/update_darwinlinking.py in top-level directory of unpacked tarball.")
print("Repack tarball")
return
def _setEnviron(self):
print("Setting environment...")
path = (os.path.join(self.destDir, "bin"),
os.path.join(os.environ["HOME"], "bin"), # utilities for building PyLith (e.g., updated version of git)
"/bin",
"/usr/bin",
"/sbin",
"/usr/sbin",
)
os.environ["PATH"] = ":".join(path)
pythonpath = (os.path.join(self.destDir, "lib", "python%s" % self.pythonVersion, "site-packages"),)
if self.arch == "x86_64":
pythonpath += (os.path.join(self.destDir, "lib64", "python%s" % self.pythonVersion, "site-packages"),)
os.environ["PYTHONPATH"] = ":".join(pythonpath)
if self.os == "Linux":
ldpath = (os.path.join(self.destDir, "lib"),)
if self.arch == "x86_64":
ldpath += (os.path.join(self.destDir, "lib64"),)
os.environ["LD_LIBRARY_PATH"] = ":".join(ldpath)
return
def _runCmd(self, cmd):
print("Running '%s'..." % " ".join(cmd))
subprocess.check_call(cmd)
return
# ======================================================================
if __name__ == "__main__":
import argparse
baseDirDefault = os.path.join(os.environ["HOME"], "pylith-binary")
parser = argparse.ArgumentParser()
parser.add_argument("--setup", action="store_true", dest="setup")
parser.add_argument("--configure", action="store_true", dest="configure")
parser.add_argument("--build", action="store_true", dest="build")
parser.add_argument("--package", action="store_true", dest="package")
parser.add_argument("--all", action="store_true", dest="all")
parser.add_argument("--base-dir", action="store", dest="base_dir", default=baseDirDefault)
parser.add_argument("--pylith-branch", action="store", dest="pylith_branch")
parser.add_argument("--make-threads", action="store", dest="make_threads", type=int, default=4)
parser.add_argument("--force-config", action="store_true", dest="force_config", default=False)
args = parser.parse_args()
app = BinaryApp(args.base_dir, args.pylith_branch, args.make_threads, args.force_config)
if args.setup or args.all:
app.setup()
if args.configure or args.all:
app.configure()
if args.build or args.all:
app.build()
if args.package or args.all:
app.package()
# End of file
|
|
# -*- coding: utf-8 -*-
from docutils import nodes
import traceback
from docutils.parsers.rst import Directive
from past.builtins import basestring
from sphinx.locale import _
from six.moves.urllib import parse as urlparse # Retain Py2 compatibility for urlparse
import requests
from requests_file import FileAdapter
import json
class swaggerv2doc(nodes.Admonition, nodes.Element):
pass
def visit_swaggerv2doc_node(self, node):
self.visit_admonition(node)
def depart_swaggerv2doc_node(self, node):
self.depart_admonition(node)
class SwaggerV2DocDirective(Directive):
DEFAULT_GROUP = ''
# this enables content in the directive
has_content = True
def processSwaggerURL(self, url):
parsed_url = urlparse.urlparse(url)
if not parsed_url.scheme: # Assume file relative to documentation
env = self.state.document.settings.env
relfn, absfn = env.relfn2path(url)
env.note_dependency(relfn)
with open(absfn) as fd:
content = fd.read()
return json.loads(content)
else:
s = requests.Session()
s.mount('file://', FileAdapter())
r = s.get(url)
return r.json()
def create_item(self, key, value):
para = nodes.paragraph()
para += nodes.strong('', key)
para += nodes.Text(value)
item = nodes.list_item()
item += para
return item
def expand_values(self, list):
expanded_values = ''
for value in list:
expanded_values += value + ' '
return expanded_values
def cell(self, contents):
if isinstance(contents, basestring):
contents = nodes.paragraph(text=contents)
return nodes.entry('', contents)
def row(self, cells):
return nodes.row('', *[self.cell(c) for c in cells])
def create_table(self, head, body, colspec=None):
table = nodes.table()
tgroup = nodes.tgroup()
table.append(tgroup)
# Create a colspec for each column
if colspec is None:
colspec = [1 for n in range(len(head))]
for width in colspec:
tgroup.append(nodes.colspec(colwidth=width))
# Create the table headers
thead = nodes.thead()
thead.append(self.row(head))
tgroup.append(thead)
# Create the table body
tbody = nodes.tbody()
tbody.extend([self.row(r) for r in body])
tgroup.append(tbody)
return table
def make_responses(self, responses):
# Create an entry with swagger responses and a table of the response properties
entries = []
paragraph = nodes.paragraph()
paragraph += nodes.strong('', 'Responses')
entries.append(paragraph)
head = ['Name', 'Description', 'Type']
for response_name, response in responses.items():
paragraph = nodes.paragraph()
paragraph += nodes.emphasis('', '%s - %s' % (response_name,
response.get('description', '')))
entries.append(paragraph)
body = []
# if the optional field properties is in the schema, display the properties
if isinstance(response.get('schema'), dict) and 'properties' in response.get('schema'):
for property_name, property in response.get('schema').get('properties', {}).items():
row = []
row.append(property_name)
row.append(property.get('description', ''))
row.append(property.get('type', ''))
body.append(row)
table = self.create_table(head, body)
entries.append(table)
return entries
def make_parameters(self, parameters):
entries = []
head = ['Name', 'Position', 'Description', 'Type']
body = []
for param in parameters:
row = []
row.append(param.get('name', ''))
row.append(param.get('in', ''))
row.append(param.get('description', ''))
row.append(param.get('type', ''))
body.append(row)
table = self.create_table(head, body)
paragraph = nodes.paragraph()
paragraph += nodes.strong('', 'Parameters')
entries.append(paragraph)
entries.append(table)
return entries
def make_method(self, path, method_type, method):
swagger_node = swaggerv2doc(path)
swagger_node += nodes.title(path, method_type.upper() + ' ' + path)
paragraph = nodes.paragraph()
paragraph += nodes.Text(method.get('summary', ''))
bullet_list = nodes.bullet_list()
method_sections = {'Description': 'description', 'Consumes': 'consumes', 'Produces': 'produces'}
for title in method_sections:
value_name = method_sections[title]
value = method.get(value_name)
if value is not None:
bullet_list += self.create_item(title + ': \n', value)
paragraph += bullet_list
swagger_node += paragraph
parameters = method.get('parameters')
if parameters is not None:
swagger_node += self.make_parameters(parameters)
responses = method.get('responses')
if responses is not None:
swagger_node += self.make_responses(responses)
return [swagger_node]
def group_tags(self, api_desc):
groups = {}
if 'tags' in api_desc:
for tag in api_desc['tags']:
groups[tag['name']] = []
if len(groups) == 0:
groups[SwaggerV2DocDirective.DEFAULT_GROUP] = []
for path, methods in api_desc['paths'].items():
for method_type, method in methods.items():
if SwaggerV2DocDirective.DEFAULT_GROUP in groups:
groups[SwaggerV2DocDirective.DEFAULT_GROUP].append((path, method_type, method))
else:
for tag in method['tags']:
groups.setdefault(tag, []).append((path, method_type, method))
return groups
def create_section(self, title):
section = nodes.section(ids=[title])
section += nodes.title(title, title)
return section
def check_tags(self, selected_tags, tags, api_url):
invalid_tags = list(set(selected_tags) - set(tags))
if len(invalid_tags) > 0:
msg = self.reporter.error("Error. Tag '%s' not found in Swagger URL %s." % (invalid_tags[0], api_url))
return [msg]
def run(self):
self.reporter = self.state.document.reporter
api_url = self.content[0]
if len(self.content) > 1:
selected_tags = self.content[1:]
else:
selected_tags = []
try:
api_desc = self.processSwaggerURL(api_url)
groups = self.group_tags(api_desc)
self.check_tags(selected_tags, groups.keys(), api_url)
entries = []
for tag_name, methods in groups.items():
if tag_name in selected_tags or len(selected_tags) == 0:
section = self.create_section(tag_name)
for path, method_type, method in methods:
section += self.make_method(path, method_type, method)
entries.append(section)
return entries
except Exception as e:
error_message = 'Unable to process URL: %s' % api_url
print(error_message)
traceback.print_exc()
error = nodes.error('')
para_error = nodes.paragraph()
para_error += nodes.Text(error_message + '. Please check that the URL is a valid Swagger api-docs URL and it is accesible')
para_error_detailed = nodes.paragraph()
para_error_detailed = nodes.strong('Processing error. See console output for a more detailed error')
error += para_error
error += para_error_detailed
return [error]
|
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import os
import tempfile
import string
import sys
from nose.tools import assert_equal, assert_false, assert_true, assert_raises, eq_
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group
from useradmin.models import User
from aws.s3 import join, parse_uri
from aws.s3.s3fs import S3FileSystem, S3FileSystemException
from aws.s3.s3test_utils import S3TestBase, generate_id
from aws.s3.upload import DEFAULT_WRITE_SIZE
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
class TestS3FileSystem():
def test_rmtree_bucket(self):
with patch('aws.s3.s3fs.S3FileSystem._delete_bucket') as _delete_bucket:
fs = S3FileSystem(s3_connection=Mock())
fs.rmtree(path='s3a://gethue')
_delete_bucket.assert_called()
def test_rmtree_key(self):
with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
key = Mock(
name='data',
exists=Mock(return_value=True),
bucket=Mock(
list=Mock(return_value=[]),
delete_key=Mock()
),
delete=Mock(
return_value=Mock(
exists=Mock(return_value=False)
)
)
)
_get_key.return_value = key
isdir.return_value = False
fs = S3FileSystem(s3_connection=Mock())
fs.rmtree(path='s3a://gethue/data')
key.delete.assert_called()
key.bucket.delete_keys.assert_not_called()
def test_rmtree_empty_dir(self):
with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
key = Mock(
name='data',
exists=Mock(return_value=True),
bucket=Mock(
list=Mock(return_value=[]),
delete_key=Mock()
),
delete=Mock(
return_value=Mock(
exists=Mock(return_value=False)
)
)
)
_get_key.return_value = key
isdir.return_value = True
fs = S3FileSystem(s3_connection=Mock())
fs.rmtree(path='s3a://gethue/data')
key.delete.assert_called()
key.bucket.list.assert_called_with(prefix='s3a://gethue/data/')
key.bucket.delete_keys.assert_not_called()
def test_rmtree_non_empty_dir(self):
with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
key = Mock(
name='data',
exists=Mock(return_value=True),
bucket=Mock(
list=Mock(return_value=['s3a://gethue/data/1', 's3a://gethue/data/2']),
delete_keys=Mock(
return_value=Mock(
errors=[]
)
)
),
delete=Mock(
return_value=Mock(
exists=Mock(return_value=False)
)
)
)
_get_key.return_value = key
isdir.return_value = True
fs = S3FileSystem(s3_connection=Mock())
fs.rmtree(path='s3a://gethue/data')
key.delete.assert_not_called()
key.bucket.list.assert_called_with(prefix='s3a://gethue/data/')
key.bucket.delete_keys.assert_called()
class S3FSTest(S3TestBase):
@classmethod
def setUpClass(cls):
S3TestBase.setUpClass()
if not cls.shouldSkip():
cls.fs = S3FileSystem(cls.s3_connection)
cls.c = make_logged_in_client(username='test', is_superuser=False)
grant_access('test', 'test', 'filebrowser')
add_to_group('test')
cls.user = User.objects.get(username="test")
def test_open(self):
path = self.get_test_path('test_open.txt')
with self.cleaning(path):
assert_raises(S3FileSystemException, self.fs.open, path)
key = self.get_key(path)
key.set_contents_from_string('Hello')
fh1 = self.fs.open(path)
eq_('He', fh1.read(length=2))
fh2 = self.fs.open(path, mode='r')
eq_('Hello', fh2.read())
eq_('llo', fh1.read())
assert_raises(Exception, self.fs.open, path, mode='w')
assert_raises(Exception, self.fs.open, path, mode='?r')
def test_read(self):
path = self.get_test_path('test_read.txt')
with self.cleaning(path):
key = self.get_key(path)
key.set_contents_from_string('Hello')
eq_('Hel', self.fs.read(path, 0, 3))
eq_('ell', self.fs.read(path, 1, 3))
def test_isfile(self):
pass
def test_isdir(self):
pass
def test_exists(self):
dir_path = self.get_test_path('test_exists')
file_path = join(dir_path, 'file')
assert_false(self.fs.exists(dir_path))
assert_false(self.fs.exists(file_path))
self.fs.create(file_path)
assert_true(self.fs.exists(dir_path))
assert_true(self.fs.exists(file_path))
assert_true(self.fs.exists('s3a://%s' % self.bucket_name))
assert_true(self.fs.exists('s3a://'))
fake_bucket = 'fake%s' % generate_id(8, string.ascii_lowercase + string.digits)
assert_false(self.fs.exists('s3a://%s' % fake_bucket))
def test_stats(self):
assert_raises(ValueError, self.fs.stats, 'ftp://archive')
not_exists = self.get_test_path('does_not_exist')
assert_raises(S3FileSystemException, self.fs.stats, not_exists)
root_stat = self.fs.stats('s3a://')
eq_(True, root_stat.isDir)
eq_('s3a://', root_stat.path)
bucket_stat = self.fs.stats('s3a://%s' % self.bucket_name)
eq_(True, bucket_stat.isDir)
eq_('s3a://%s' % self.bucket_name, bucket_stat.path)
def test_copyfile(self):
src_path = self.get_test_path('test_copy_file_src')
dst_path = self.get_test_path('test_copy_file_dst')
with self.cleaning(src_path, dst_path):
data = "To boldly go where no one has gone before\n" * 2000
self.fs.create(src_path, data=data)
self.fs.create(dst_path, data="some initial data")
self.fs.copyfile(src_path, dst_path)
actual = self.fs.read(dst_path, 0, len(data) + 100)
eq_(data, actual)
def test_full_copy(self):
src_path = self.get_test_path('test_full_copy_src')
dst_path = self.get_test_path('test_full_copy_dst')
src_file_path = join(src_path, 'file.txt')
dst_file_path = join(dst_path, 'file.txt')
with self.cleaning(src_path, dst_path):
self.fs.mkdir(src_path)
self.fs.mkdir(dst_path)
data = "To boldly go where no one has gone before\n" * 2000
self.fs.create(src_file_path, data=data)
# File to directory copy.
self.fs.copy(src_file_path, dst_path)
assert_true(self.fs.exists(dst_file_path))
# Directory to directory copy.
self.fs.copy(src_path, dst_path, True)
base_name = parse_uri(src_path)[2]
dst_folder_path = join(dst_path, base_name)
assert_true(self.fs.exists(dst_folder_path))
assert_true(self.fs.exists(join(dst_folder_path, 'file.txt')))
# Copy directory to file should fail.
assert_raises(S3FileSystemException, self.fs.copy, src_path, dst_file_path, True)
def test_copy_remote_dir(self):
src_dir = self.get_test_path('test_copy_remote_dir_src')
dst_dir = self.get_test_path('test_copy_remote_dir_dst')
with self.cleaning(src_dir, dst_dir):
self.fs.mkdir(src_dir)
self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
self.fs.mkdir(dst_dir)
self.fs.copy_remote_dir(src_dir, dst_dir)
src_stat = self.fs.listdir_stats(src_dir)
dst_stat = self.fs.listdir_stats(dst_dir)
src_names = set([stat.name for stat in src_stat])
dst_names = set([stat.name for stat in dst_stat])
assert_true(src_names)
eq_(src_names, dst_names)
def test_copy_from_local(self):
src_name = 'test_copy_from_local_src'
src_path = os.path.join(tempfile.gettempdir(), src_name)
dst_path = self.get_test_path('test_copy_from_local_dst')
data = "To boldly go where no one has gone before\n" * 2000
f = open(src_path, 'w')
f.write(data)
f.close()
with self.cleaning(dst_path):
self.fs.copyFromLocal(src_path, dst_path)
actual = self.fs.read(dst_path, 0, len(data) + 100)
eq_(data, actual)
def test_rename_dir(self):
src_dir = self.get_test_path('test_rename_dir_src')
dst_dir = self.get_test_path('test_rename_dir_dst')
with self.cleaning(src_dir, dst_dir):
self.fs.mkdir(src_dir)
self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
src_ls = self.fs.listdir(src_dir)
eq_(2, len(src_ls))
assert_true('file_one.txt' in src_ls)
assert_true('file_two.txt' in src_ls)
# Assert that no directories with dst_dir name exist yet
assert_false(self.fs.exists(dst_dir))
# Rename src to dst
self.fs.rename(src_dir, dst_dir)
assert_true(self.fs.exists(dst_dir))
assert_false(self.fs.exists(src_dir))
dst_ls = self.fs.listdir(dst_dir)
eq_(2, len(dst_ls))
assert_true('file_one.txt' in dst_ls)
assert_true('file_two.txt' in dst_ls)
# Assert that the children files are not duplicated at top-level destination
bucket_ls = self.bucket.list()
assert_false('file_one.txt' in bucket_ls)
assert_false('file_two.txt' in bucket_ls)
# Assert that only the renamed directory, and not an empty file, exists
assert_equal(1, len([key for key in bucket_ls if key.name.strip('/') == self.get_key(dst_dir).name.strip('/')]))
def test_rename_star(self):
src_dir = self.get_test_path('test_rename_star_src')
dst_dir = self.get_test_path('test_rename_star_dst')
with self.cleaning(src_dir, dst_dir):
self.fs.mkdir(src_dir)
self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
src_ls = self.fs.listdir(src_dir)
eq_(2, len(src_ls))
assert_true('file_one.txt' in src_ls)
assert_true('file_two.txt' in src_ls)
src_stat = self.fs.listdir_stats(src_dir)
self.fs.mkdir(dst_dir)
self.fs.rename_star(src_dir, dst_dir)
dst_stat = self.fs.listdir_stats(dst_dir)
src_names = set([stat.name for stat in src_stat])
dst_names = set([stat.name for stat in dst_stat])
assert_true(src_names)
eq_(src_names, dst_names)
def test_rmtree(self):
assert_raises(NotImplementedError, self.fs.rmtree, 'universe', skipTrash=False)
directory = self.get_test_path('test_rmtree')
with self.cleaning(directory):
self.fs.mkdir(directory)
nested_dir = join(directory, 'nested_dir')
self.fs.mkdir(nested_dir)
file_path = join(nested_dir, 'file')
key = self.get_key(file_path)
key.set_contents_from_string('Some content')
self.fs.rmtree(directory, skipTrash=True)
assert_false(self.fs.exists(file_path))
assert_false(self.fs.exists(nested_dir))
assert_false(self.fs.exists(directory))
def test_listing_buckets(self):
buckets = self.fs.listdir('s3a://')
assert_true(len(buckets) > 0)
def test_mkdir(self):
dir_path = self.get_test_path('test_mkdir')
assert_false(self.fs.exists(dir_path))
self.fs.mkdir(dir_path)
assert_true(self.fs.exists(dir_path))
def test_upload_file(self):
with tempfile.NamedTemporaryFile() as local_file:
# Make sure we can upload larger than the UPLOAD chunk size
file_size = DEFAULT_WRITE_SIZE * 2
local_file.write('0' * file_size)
local_file.flush()
dest_dir = self.get_test_path('test_upload')
local_file = local_file.name
dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file))
add_permission(self.user.username, 'has_s3', permname='s3_access', appname='filebrowser')
try:
# Just upload the current python file
resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file)))
response = json.loads(resp.content)
finally:
remove_from_group(self.user.username, 'has_s3')
assert_equal(0, response['status'], response)
stats = self.fs.stats(dest_path)
f = self.fs.open(dest_path)
actual = f.read(file_size)
expected = file(local_file).read()
assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))
def test_check_access(self):
dir_path = self.get_test_path('test_check_access')
self.fs.mkdir(dir_path)
assert_true(self.fs.check_access(dir_path, permission='WRITE'))
|
|
# This file implements all actions performed by agent to start execution script on exec host and sar data collection
# from all exec and stat hosts. Each procedure is mapped with particular daytona command used by scheduler to
# communicate with agent. Upon recieving command from daytona scheduler, agent execute below procedure
# which is mapped with that particular daytona command
#!/usr/bin/env python
import subprocess
import threading
import common
import os
import time
import shutil
from shutil import copyfile
import sys
import testobj
import client
import config
import signal
import envelope
import system_metrics_gather
from logger import LOG
lctx = None
cfg = config.CFG("DaytonaHost", lctx)
cfg.readCFG("config.ini")
EXEC_SCRIPT_DIR = cfg.execscript_location
# Agent on a particular host maintains a list of tests it is currently executing and it also keep updating test data.
# It's a key-value pair map in which each class object is associated with test ID
running_tests = {}
action_lock = threading.Lock()
exec_script_pid = {}
exec_script_lock = threading.Lock()
class activeTest:
"""
This class defines a test object which capture all the information of the test. Agent save these test objects
in a queue to maintain information of all running tests.
"""
def __init__(self, testid, actionID, exec_thread, testobj):
self.testid = testid
self.actionID = actionID
self.exec_thread = exec_thread
self.tobj = testobj
self.stream = None
self.status = ""
self.serverip = ""
self.stathostip = ""
self.serverport = 0
self.stathostport = 0
self.execdir = ""
self.logdir = ""
self.resultsdir = ""
self.statsdir = ""
self.archivedir = ""
self.execscriptfile = ""
self.hostname = ""
def clear(self):
lctx.info("Clearing object contents")
self.cleanup()
def cleanup(self):
lctx.info("Clearing FS, processes")
class commandThread(threading.Thread):
"""
This class creates child thread for starting execution script or executing any other linux based command to get
output from the system
"""
def __init__(self, cmdstr, dcmdstr, streamfile, cdir, testid):
self.cmd = cmdstr
self.dcmd = dcmdstr
self.sfile = streamfile
self.cwd = cdir
self.paused = False
self._stop = threading.Event()
self.stdout = None
self.stderr = None
self.testid = testid
threading.Thread.__init__(self)
def resume(self):
with self.state:
self.paused = False
self.state.notify() # unblock self if waiting
def pause(self):
with self.state:
self.paused = True # make self block and wait
def check(self):
with self.state:
if self.paused:
self.state.wait() # block until notified
if self._stop.isSet():
return False
def stop(self):
self._stop.set()
def __del__(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
lctx.debug(self.cmd)
ca = self.cmd.split(" ")
lctx.debug(ca)
# os.setsid is used for creating a new pid group for this exec script excuting so that any subsequent
# child thread or another script invocation will remain in same PID group. In the event of timer expire or if
# something goes wrong, we will just kill this PID group to kill everything.
p = subprocess.Popen(ca, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.cwd,
preexec_fn=os.setsid)
# Saving PID information for keeping track of PID group
exec_script_lock.acquire()
exec_script_pid[self.testid] = p
exec_script_lock.release()
while True:
out = p.stdout.read(1)
if out == '' and p.poll() is not None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
if self.sfile is not None:
self.sfile.flush()
self.sfile.flush()
def get_test(testid):
"""
This command get the test object from the running queue of agent. It accquire lock on the queue to avoid mutual
exclusion situtation. Mutilple threads might be excuting actions for a particular test
:param testid: It takes test ID as argument to fetch the test object
:return: test object if found in the queue
"""
found = False
current_test = None
action_lock.acquire()
if testid in running_tests:
current_test = running_tests[testid]
found = True
action_lock.release()
if found:
return current_test
else:
return
def save_test(testid, test):
"""
This procedure is called to update test information in agent queue.
:param testid: Test ID is a key in running test queue
:param test: Updated test object which need to be saved in running queue
:return: true if update is successfull
"""
found = False
action_lock.acquire()
if testid in running_tests:
running_tests[testid] = test
found = True
action_lock.release()
return found
def delete_test(testid):
"""
This procedure delete the test information from the running queue. This will happen if test execution ends or
something goes wrong with the test
:param testid: Test ID to identify test in running queue
:return: NA
"""
action_lock.acquire()
if testid in running_tests:
del running_tests[testid]
action_lock.release()
def exec_cmd(cmd, daytona_cmd, sync, obj, actionid, current_test):
"""
This procedure does the setup for starting execution script. It creates object of child process which execute
startup script
"""
lctx.debug("Execute cmd : " + cmd)
sfile = None
cl = None
########
if daytona_cmd == "DAYTONA_START_TEST":
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
(current_test.stream, sfile) = cl.stream_start(current_test.serverip, current_test.serverport,
str(current_test.tobj.testobj.TestInputData.exec_log_path))
########
if sfile is not None:
sfile.flush()
cthread = commandThread(cmd, daytona_cmd, sfile, current_test.execdir, current_test.testid)
current_test.exec_thread = cthread
cthread.start()
(t, aid, tst, ts) = (None, None, None, None)
if sync == "T":
lctx.debug("Execute cmd in Sync ctx")
cthread.join()
if sfile is not None:
sfile.flush()
else:
# async action entry in the server table (need this to check self alive below)
for tp in obj.async_actions:
if tp[1] == actionid:
(t, aid, tst, ts) = tp
lctx.debug("Execute cmd in asSync ctx : " + str(actionid))
timer_expire = False
while True:
lctx.debug("waiting for async action to complete : " + str(actionid))
if cthread.stdout is not None:
lctx.debug("printting output of stream ")
if sfile is not None:
sfile.flush()
if tst.testobj.TestInputData.timeout > 0:
if time.time() - ts > tst.testobj.TestInputData.timeout:
lctx.error("Timer expired for this test, need to end this async action")
timer_expire = True
# Exit from this while loop if timer expires or execution script ends
if t.check() == False or cthread.is_alive() == False or timer_expire:
if daytona_cmd == "DAYTONA_START_TEST":
if cthread.is_alive():
exec_script_lock.acquire()
if current_test.testid in exec_script_pid:
p = exec_script_pid[current_test.testid]
del exec_script_pid[current_test.testid]
exec_script_lock.release()
if p:
os.killpg(p.pid, signal.SIGTERM)
lctx.debug("end stream")
cl.stream_end(current_test.serverip, current_test.serverport,
str(current_test.tobj.testobj.TestInputData.exec_log_path), current_test.stream,
sfile)
# callback
# removeactionid
lctx.debug("Callback here removing item")
obj.removeActionItem(actionid)
break
time.sleep(3)
if daytona_cmd == "DAYTONA_START_TEST":
if timer_expire:
current_test.status = "TIMEOUT"
else:
lctx.debug("Setting current test status to TESTEND")
current_test.status = "TESTEND"
lctx.debug(daytona_cmd + " END [" + str(actionid) + "]")
if save_test(current_test.testid, current_test):
return "SUCCESS"
else:
return "ERROR"
def scheduler_handshake(current_test):
"""
This procedure is a part of 2-way handshake between scheduler and agent. If agent receive handshake message from
scheduler, then agent also send a handshake message to scheduler to check if agent can communicate with scheduler
on scheduler port. This part is important as later we need to transfer log files to scheduler using scheduler port
:param current_test: Test object
:return: true if scheduler respond otherwise false
"""
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
env = envelope.DaytonaEnvelope()
ret = cl.send(current_test.serverip, current_test.serverport, env.construct("DAYTONA_HANDSHAKE", "handshake2"))
if ret == "SUCCESS":
return True
else:
return False
def setupTest(self, *args):
"""
Test setup is called when scheduler send "DAYTONA_SETUP_TEST" message to agent. In this procedure agent create
all necessary file system path string and update in test object. After creating file path string it execute command
for making all these file system directories so that agent can later save SAR data. On exec host, it copies execution
script from Execscript folder to test specific directory in order to keep execution script seperate in case of
multiple test execution
:param self:
:param args: tuple of arguments containing obj, command, parameter sent by scheduler to agent for this command,
actionID and sync flag to denote if we need to execute this procedure in sync or async mode
:return: SUCCESS in case everything goes well otherwise it throws ERROR
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
test_serialized = params.split(",")[0]
host_type = params.split(",")[1]
t2 = testobj.testDefn()
t2.deserialize(test_serialized)
current_test = get_test(t2.testobj.TestInputData.testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("TEST SETUP | " + str(current_test.testid) + " | START")
test_logger.info("Test setup started")
current_test.tobj = testobj.testDefn()
current_test.tobj = t2
current_test.testid = current_test.tobj.testobj.TestInputData.testid
cfg = config.CFG("DaytonaHost", lctx)
cfg.readCFG("config.ini")
dir = cfg.daytona_agent_root + "/" + current_test.tobj.testobj.TestInputData.frameworkname + "/" + str(
current_test.tobj.testobj.TestInputData.testid)
shutil.rmtree(dir, ignore_errors=True)
prefix = cfg.daytona_agent_root + "/" + current_test.tobj.testobj.TestInputData.frameworkname + "/" + str(
current_test.tobj.testobj.TestInputData.testid) + "/results/"
if host_type == "EXEC":
current_test.execdir = prefix + current_test.tobj.testobj.TestInputData.exechostname
current_test.logdir = prefix + current_test.tobj.testobj.TestInputData.exechostname + "/application"
current_test.statsdir = prefix + current_test.stathostip + "/sar/"
current_test.resultsdir = cfg.daytona_agent_root + "/" + \
current_test.tobj.testobj.TestInputData.frameworkname + "/" + \
str(current_test.tobj.testobj.TestInputData.testid) + "/results"
current_test.archivedir = cfg.daytona_agent_root + "/" + \
current_test.tobj.testobj.TestInputData.frameworkname + "/" + \
str(current_test.tobj.testobj.TestInputData.testid) + "/"
if host_type == "EXEC":
common.createdir(current_test.execdir, self.lctx)
common.createdir(current_test.logdir, self.lctx)
common.createdir(current_test.resultsdir, self.lctx)
common.createdir(current_test.statsdir, self.lctx)
test_logger.info("Test directory created")
if host_type == "EXEC":
execscript = current_test.tobj.testobj.TestInputData.execution_script_location
lctx.debug("TEST SETUP : " + str(execscript))
current_test.execscriptfile = current_test.execdir + "/" + execscript
lctx.debug(current_test.execscriptfile)
# check if execution script is present in EXEC_SCRIPT_DIR - execute script only if it present at
# this location
execscript_location = EXEC_SCRIPT_DIR + execscript
execscript_location = os.path.realpath(execscript_location)
valid_path = os.path.commonprefix([execscript_location, EXEC_SCRIPT_DIR]) == EXEC_SCRIPT_DIR
if valid_path:
if os.path.isfile(execscript_location):
ret = shutil.copytree(os.path.dirname(execscript_location),
os.path.dirname(current_test.execscriptfile))
else:
raise Exception(
"Execution script not found at Daytona Execution Script Location : " + EXEC_SCRIPT_DIR)
else:
raise Exception(
"Access Denied : Use Daytona Execution Script Location '" + EXEC_SCRIPT_DIR + "' for executing "
"exec scripts")
os.chmod(current_test.execscriptfile, 0744)
test_logger.info("Execution script copied successfully")
save_test(current_test.testid, current_test)
test_logger.info("Test setup complete")
lctx.debug("TEST SETUP | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
# create dirs
# get exec script name
# cp the exec script
# set exec perm
# update cur test obj with exec script
# exec any custom setup script
def startTest(self, *args):
"""
This procedure is invoked for STRACE and PERF profiler setup on exec/stat host. On exec host, after setting up
profilier it starts execution of exec script
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params.split(",")[0])
host_type = params.split(",")[1]
current_test = get_test(testid)
strace_config = None
perf_config = dict()
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("TESTSTART | " + str(current_test.testid) + " | START")
test_logger.info("Starting test")
current_test.status = "RUNNING"
current_test.actionID = actionID
save_test(current_test.testid, current_test)
if current_test.tobj.testobj.TestInputData.strace:
strace_config = dict()
strace_config["delay"] = str(current_test.tobj.testobj.TestInputData.strace_delay)
strace_config["duration"] = str(current_test.tobj.testobj.TestInputData.strace_duration)
strace_config["process"] = current_test.tobj.testobj.TestInputData.strace_process
perf_config["delay"] = str(current_test.tobj.testobj.TestInputData.perf_delay)
perf_config["duration"] = str(current_test.tobj.testobj.TestInputData.perf_duration)
if current_test.tobj.testobj.TestInputData.perf_process:
perf_config["process"] = current_test.tobj.testobj.TestInputData.perf_process
test_logger.info("Configuring perf profiler - " + str(perf_config))
if strace_config is not None:
test_logger.info("Configuring strace profiler - " + str(strace_config))
# Setting up STRACE and PERF configuration
system_metrics_gather.perf_strace_gather(current_test.testid, perf_config, strace_config)
test_logger.info("Profiler started")
if host_type == "EXEC":
# Copied execscript
execscript = current_test.execscriptfile
args = ""
for a in current_test.tobj.testobj.TestInputData.execScriptArgs:
args = args + " \"" + a[3] + "\""
execline = execscript + args
lctx.debug("Execution line:" + execline)
test_logger.info("Execution script started")
# execute the exec script here
exec_cmd(execline, command, sync, obj, actionID, current_test)
lctx.debug("TESTSTART | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def stopTest(self, *args):
"""
This procedure is invoked by agent when it receives DAYTONA_STOP_TEST message from scheduler. In current test
life cycle stat host agent receive this message from scheduler. In this procedure, agent changes the state of a
test from RUNNING to TESTEND and update running queue
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
current_test.status = "TESTEND"
save_test(current_test.testid, current_test)
test_logger.info("Test stop")
return "SUCCESS"
else:
raise Exception("Test not running : " + str(current_test.testid))
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def cleanup(self, *args):
"""
This procedure is called on test completion or timer expiry or if something goes wrong with test. It perform
below tasks:
* Download agent side test execution life cycle logs
* remove the logger object for this particular test
* Delete the test logs file system
* Delete the test from running queue of agent
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("CLEANUP | " + str(current_test.testid) + " | START")
test_logger.info("Test cleanup")
downloadTestLogs(testid)
LOG.removeLogger(current_test.tobj)
shutil.rmtree(current_test.resultsdir, ignore_errors=True)
delete_test(testid)
lctx.debug("CLEANUP | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def abortTest(self, *args):
"""
This procedure is invoked by agent whenever scheduler send DAYTONA_ABORT_TEST message. This happend in case
something goes wrong on exec host or user cancelled test execution and scheduler want to terminate this test on
all the hosts it has started this particular test. It basically stop execution script thread to stop execution and
call cleanup procedure for downloading logs and other cleanups
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
t2 = current_test.tobj
(t, aid, tst, ts) = (None, None, None, None)
lctx.debug(args)
abort_action = False
for tp in obj.async_actions:
(t, aid, tst, ts) = tp
if tst.testobj.TestInputData.testid == t2.testobj.TestInputData.testid:
lctx.debug("Found ASYNC action pending for this test, Aborting it")
abort_action = True
break
if abort_action:
t.stop()
t.join()
lctx.debug("Stopped ASYNC action pending for this test : " + str(tst.testobj.TestInputData.testid))
else:
lctx.debug("No ASYNC action pending for this test : " + str(t2.testobj.TestInputData.testid))
cleanup(self, self, command, params, actionID, sync)
lctx.debug(command + "[" + str(actionID) + "]")
return "SUCCESS"
def heartbeat(self, *args):
"""
It send "ALIVE" message in response to any hearbeat message received.
"""
return "ALIVE"
def setFinish(self, *args):
"""
Agent invoke this procedure when scheduler send DAYTONA_FINISH_TEST message for gracefully ending test on all the
hosts. It just calls cleanup procedure for test cleanup and test life cycle logs download
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
cleanup(self, self, command, params, actionID, sync)
return "SUCCESS"
def getStatus(self, *args):
"""
Agent execute this procedure whenever scheduler want to check state of a test by sending DAYTONA_GET_STATUS
message to agent. In this procedure we fetch the test from running queue and return saved test state information
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug(str(current_test.testid) + ":" + current_test.status)
test_logger.info("Test Status : " + current_test.status)
return current_test.status
else:
return "TESTNA"
def fileDownload(self, *args):
"""
On test completion, agent execute this procedure when it receive DAYTONA_FILE_DOWNLOAD message from scheduler.
We create a TAR file called results.tgz and save it test location, then we send this file to scheduler and save it
in scheduler side file system
"""
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
testid = int(args[2])
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("FILE DOWNLOAD | " + str(current_test.testid) + " | START")
lctx.debug("Preparing TAR file of system metric folder")
test_logger.info("Preparing TAR file of system metric folder")
common.make_tarfile(current_test.archivedir + "results.tgz", current_test.resultsdir + "/")
dest = current_test.tobj.testobj.TestInputData.stats_results_path[current_test.stathostip]
download_file = current_test.archivedir + "results.tgz"
test_logger.info("Sending TAR file to daytona host")
cl.sendFile(current_test.serverip, current_test.serverport, download_file, dest.strip())
lctx.debug("FILE DOWNLOAD | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def downloadTestLogs(testid):
"""
This procedure just send test life cycle log file to scheduler upon test cleanup. This file provide user
information about test execution sequence on agent
"""
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
test_logger.info("Sending test log to daytona host")
dest = current_test.tobj.testobj.TestInputData.stats_results_path[current_test.stathostip]
download_file = current_test.agent_log_file
cl.sendFile(current_test.serverip, current_test.serverport, download_file, dest.strip())
test_logger.info("Test log file transfer complete")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator input resize compute."""
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.topi.util import nchw_pack_layout, nchw_xc_layout
from .. import tag
def get_2d_indices(indices, layout="NCHW"):
""" Get 2d indices """
(cc, inum, ic) = (0, 0, 0)
if layout == "NHWC":
n, y, x, c = indices
cc = None
elif layout == "NCHW":
n, c, y, x = indices
cc = None
elif nchw_pack_layout(layout):
n, c, y, x, inum, ic = indices
else:
# else must be NCHWxc
assert nchw_xc_layout(layout)
n, c, y, x, cc = indices
return n, c, y, x, cc, inum, ic
def get_2d_pixel(data, layout, boxes, image_height, image_width, n, c, y, x, cc, ib, ic):
""" Get 2d pixel """
if boxes is None:
y = tvm.te.max(tvm.te.min(y, image_height - 1), 0)
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
if layout == "NHWC":
return data(n, y, x, c).astype("float")
if layout == "NCHW":
return data(n, c, y, x).astype("float")
if nchw_pack_layout(layout):
return data(n, c, y, x, ib, ic).astype("float")
# else must be NCHWxc
assert nchw_xc_layout(layout)
return data(n, c, y, x, cc).astype("float")
def resize_nearest_neighbor(
indices,
data,
image_height,
image_width,
target_height,
target_width,
boxes=None,
box_indices=None,
extrapolation_value=None,
layout="NCHW",
coordinate_transformation_mode="align_corners",
out_dtype=None,
):
"""Perform resize operation with nearest neighbor method on the data.
For details about Nearest-neighbor interpolation please refer to
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
image_height : integer
Input image height
image_width : integer
Input image width
target_height : integer
The target resized image height
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
y1, x1 = boxes(n, 0), boxes(n, 1)
y2, x2 = boxes(n, 2), boxes(n, 3)
in_h = (image_height - 1) * (y2 - y1)
in_w = (image_width - 1) * (x2 - x1)
h_scale = in_h.astype("float") / (target_height - 1)
w_scale = in_w.astype("float") / (target_width - 1)
in_y = y1 * (image_height - 1) + h_scale * y
in_x = x1 * (image_width - 1) + w_scale * x
else:
if coordinate_transformation_mode == "align_corners":
h_scale = (image_height - 1).astype("float") / (target_height - 1)
w_scale = (image_width - 1).astype("float") / (target_width - 1)
elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
h_scale = image_height.astype("float") / target_height
w_scale = image_width.astype("float") / target_width
else:
raise ValueError(
"Unsupported coordinate_transformation_mode: {}".format(
coordinate_transformation_mode
)
)
in_y = h_scale * y
in_x = w_scale * x
if coordinate_transformation_mode == "align_corners" or boxes is not None:
closest_x_index = te.round(in_x).astype("int32")
closest_y_index = te.round(in_y).astype("int32")
else:
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
closest_y_index = te.floor(in_y + epsilon).astype("int32")
closest_x_index = te.floor(in_x + epsilon).astype("int32")
value = get_2d_pixel(
data,
layout,
boxes,
image_height,
image_width,
box_idx,
c,
closest_y_index,
closest_x_index,
cc,
inum,
ic,
)
if extrapolation_value is not None:
out = tvm.tir.if_then_else(
in_y < 0,
extrapolation_value,
tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value),
)
# use extrapolation_value if in_x is out of boundary
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize_bilinear(
indices,
data,
image_height,
image_width,
target_height,
target_width,
boxes=None,
box_indices=None,
extrapolation_value=None,
layout="NCHW",
coordinate_transformation_mode="align_corners",
out_dtype=None,
):
"""Perform resize operation with bilinear method on the data.
For details about Bilinear interpolation please refer to
https://en.wikipedia.org/wiki/Bilinear_interpolation.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
image_height : integer
Input image height
image_width : integer
Input image width
target_height : integer
The target resized image height
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
def _lerp(A, B, t):
return A * (1.0 - t) + B * t
n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout=layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
y1, x1 = boxes(n, 0), boxes(n, 1)
y2, x2 = boxes(n, 2), boxes(n, 3)
in_h = (image_height - 1) * (y2 - y1)
in_w = (image_width - 1) * (x2 - x1)
h_scale = in_h.astype("float") / (target_height - 1)
w_scale = in_w.astype("float") / (target_width - 1)
in_y = y1 * (image_height - 1) + h_scale * y
in_x = x1 * (image_width - 1) + w_scale * x
else:
if coordinate_transformation_mode == "align_corners":
h_scale = (image_height - 1).astype("float") / (target_height - 1)
w_scale = (image_width - 1).astype("float") / (target_width - 1)
elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
h_scale = image_height.astype("float") / target_height
w_scale = image_width.astype("float") / target_width
else:
raise ValueError(
"Unsupported coordinate_transformation_mode: {}".format(
coordinate_transformation_mode
)
)
if coordinate_transformation_mode == "half_pixel":
in_y = h_scale * (y + 0.5) - 0.5
in_x = w_scale * (x + 0.5) - 0.5
else:
in_y = h_scale * y
in_x = w_scale * x
top_y_index = te.floor(in_y).astype("int32")
bottom_y_index = te.ceil(in_y).astype("int32")
y_lerp = in_y - top_y_index
left_x_index = te.floor(in_x).astype("int32")
right_x_index = te.ceil(in_x).astype("int32")
x_lerp = in_x - left_x_index
top_left = get_2d_pixel(
data,
layout,
boxes,
image_height,
image_width,
box_idx,
c,
top_y_index,
left_x_index,
cc,
inum,
ic,
)
top_right = get_2d_pixel(
data,
layout,
boxes,
image_height,
image_width,
box_idx,
c,
top_y_index,
right_x_index,
cc,
inum,
ic,
)
bottom_left = get_2d_pixel(
data,
layout,
boxes,
image_height,
image_width,
box_idx,
c,
bottom_y_index,
left_x_index,
cc,
inum,
ic,
)
bottom_right = get_2d_pixel(
data,
layout,
boxes,
image_height,
image_width,
box_idx,
c,
bottom_y_index,
right_x_index,
cc,
inum,
ic,
)
top = _lerp(top_left, top_right, x_lerp)
bottom = _lerp(bottom_left, bottom_right, x_lerp)
value = _lerp(top, bottom, y_lerp)
# use extrapolation_value if in_y/in_x is out of boundary
if extrapolation_value is not None:
out = tvm.tir.if_then_else(
in_y < 0,
extrapolation_value,
tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value),
)
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize_bicubic(
indices,
data,
image_height,
image_width,
target_height,
target_width,
boxes=None,
box_indices=None,
extrapolation_value=None,
layout="NCHW",
coordinate_transformation_mode="align_corners",
out_dtype=None,
):
"""Perform resize operation with bicubic method on the data.
More details about Bicubic interpolation please refer to
https://en.wikipedia.org/wiki/Bicubic_interpolation.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
image_height : integer
Input image height
image_width : integer
Input image width
target_height : integer
The target resized image height
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cubic_kernel(A, B, C, D, t):
a = -A / 2.0 + (3.0 * B) / 2.0 - (3.0 * C) / 2.0 + D / 2.0
b = A - (5.0 * B) / 2.0 + 2.0 * C - D / 2.0
c = -A / 2.0 + C / 2.0
d = B
return a * t * t * t + b * t * t + c * t + d
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
y1, x1 = boxes(n, 0), boxes(n, 1)
y2, x2 = boxes(n, 2), boxes(n, 3)
in_h = (image_height - 1) * (y2 - y1)
in_w = (image_width - 1) * (x2 - x1)
h_scale = in_h.astype("float") / (target_height - 1)
w_scale = in_w.astype("float") / (target_width - 1)
in_y = y1 * (image_height - 1) + h_scale * y
in_x = x1 * (image_width - 1) + w_scale * x
else:
if coordinate_transformation_mode == "align_corners":
h_scale = (image_height - 1).astype("float") / (target_height - 1)
w_scale = (image_width - 1).astype("float") / (target_width - 1)
elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
h_scale = image_height.astype("float") / target_height
w_scale = image_width.astype("float") / target_width
else:
raise ValueError(
"Unsupported coordinate_transformation_mode: {}".format(
coordinate_transformation_mode
)
)
if coordinate_transformation_mode == "half_pixel":
in_y = h_scale * (y + 0.5) - 0.5
in_x = w_scale * (x + 0.5) - 0.5
else:
in_y = h_scale * y
in_x = w_scale * x
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
yint = te.floor(in_y).astype("int32")
yfract = in_y - te.floor(in_y)
# 1st row
p00 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint - 1, xint - 1, cc, inum, ic
)
p10 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint - 1, xint + 0, cc, inum, ic
)
p20 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint - 1, xint + 1, cc, inum, ic
)
p30 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint - 1, xint + 2, cc, inum, ic
)
# 2nd row
p01 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 0, xint - 1, cc, inum, ic
)
p11 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 0, xint + 0, cc, inum, ic
)
p21 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 0, xint + 1, cc, inum, ic
)
p31 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 0, xint + 2, cc, inum, ic
)
# 3rd row
p02 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 1, xint - 1, cc, inum, ic
)
p12 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 1, xint + 0, cc, inum, ic
)
p22 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 1, xint + 1, cc, inum, ic
)
p32 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 1, xint + 2, cc, inum, ic
)
# 4th row
p03 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 2, xint - 1, cc, inum, ic
)
p13 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 2, xint + 0, cc, inum, ic
)
p23 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 2, xint + 1, cc, inum, ic
)
p33 = _get_pixel(
data, layout, boxes, image_height, image_width, box_idx, c, yint + 2, xint + 2, cc, inum, ic
)
# Interpolate bicubically
col0 = _cubic_kernel(p00, p10, p20, p30, xfract)
col1 = _cubic_kernel(p01, p11, p21, p31, xfract)
col2 = _cubic_kernel(p02, p12, p22, p32, xfract)
col3 = _cubic_kernel(p03, p13, p23, p33, xfract)
value = _cubic_kernel(col0, col1, col2, col3, yfract)
# use extrapolation_value if in_y/in_x is out of boundary
if extrapolation_value is not None:
out = tvm.tir.if_then_else(
in_y < 0,
extrapolation_value,
tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value),
)
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize(
data,
size,
layout="NCHW",
method="bilinear",
coordinate_transformation_mode="half_pixel",
out_dtype=None,
output_shape=None,
):
"""Perform resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
size: Tuple
Output resolution scale to
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
method: {"bilinear", "nearest_neighbor", "bicubic"}
Method to be used for resizing.
out_dtype: string, optional
Type to return. If left None will be same as input type.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if method == "nearest_neighbor" and coordinate_transformation_mode != "asymmetric":
raise ValueError(
"Topi Resize does not support the combination of method %s "
"and coordinate_transformation_mode %s" % (method, coordinate_transformation_mode)
)
if layout == "NHWC":
in_n, in_h, in_w, in_c = data.shape
if output_shape is None:
output_shape = [in_n, size[0], size[1], in_c]
elif layout == "NCHW":
in_n, in_c, in_h, in_w = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1]]
elif nchw_pack_layout(layout): # for NCHWinic
in_n, in_c, in_h, in_w, in_inum, in_ic = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1], in_inum, in_ic]
elif nchw_xc_layout(layout): # for NCHWxc
in_n, in_c, in_h, in_w, in_cc = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1], in_cc]
else:
raise ValueError("%s layout is not supported." % layout)
def _nearest_neighbor(*indices):
return resize_nearest_neighbor(
indices,
data,
in_h,
in_w,
size[0],
size[1],
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
out_dtype=out_dtype,
)
def _bilinear(*indices):
return resize_bilinear(
indices,
data,
in_h,
in_w,
size[0],
size[1],
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
out_dtype=out_dtype,
)
def _bicubic(*indices):
return resize_bicubic(
indices,
data,
in_h,
in_w,
size[0],
size[1],
layout,
coordinate_transformation_mode=coordinate_transformation_mode,
out_dtype=out_dtype,
)
# Determine which interpolation method to use then run it.
if method == "nearest_neighbor":
compute_func = _nearest_neighbor
elif method == "bilinear":
compute_func = _bilinear
elif method == "bicubic":
compute_func = _bicubic
else:
raise ValueError("%s method is not supported." % method)
return te.compute(output_shape, compute_func, name="resize", tag=tag.INJECTIVE)
def crop_and_resize(
data,
boxes,
box_indices,
crop_size,
layout="NCHW",
method="bilinear",
extrapolation_value=0,
out_dtype=None,
):
"""Perform crop and resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
boxes : tvm.te.Tensor
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
crop_size : Tuple
The target size of each box.
layout : string, optional
"NCHW", "NHWC"
method : {"bilinear", "nearest_neighbor"}
Method to be used for resizing.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype : string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_boxes, channel, crop_height, crop_width]
or [num_boxes, crop_height, crop_width, channel]
"""
method = method.lower()
target_h = crop_size[0]
target_w = crop_size[1]
if layout == "NHWC":
output_shape = [box_indices.shape[0], crop_size[0], crop_size[1], data.shape[3]]
image_h = data.shape[1].astype("int32")
image_w = data.shape[2].astype("int32")
elif layout == "NCHW":
output_shape = [box_indices.shape[0], data.shape[1], crop_size[0], crop_size[1]]
image_h = data.shape[2].astype("int32")
image_w = data.shape[3].astype("int32")
elif layout.startswith("NCHW"): # for NCHWxc
output_shape = [
box_indices.shape[0],
data.shape[1],
crop_size[0],
crop_size[1],
data.shape[4],
]
image_h = data.shape[2].astype("int32")
image_w = data.shape[3].astype("int32")
else:
raise ValueError("%s layout is not supported." % layout)
def _bilinear(*indices):
return resize_bilinear(
indices,
data,
image_h,
image_w,
target_h,
target_w,
boxes,
box_indices,
extrapolation_value,
layout,
out_dtype=out_dtype,
)
def _nearest_neighbor(*indices):
return resize_nearest_neighbor(
indices,
data,
image_h,
image_w,
target_h,
target_w,
boxes,
box_indices,
extrapolation_value,
layout,
out_dtype=out_dtype,
)
# Determine which interpolation method to use then run it.
if method == "nearest_neighbor":
compute_func = _nearest_neighbor
elif method == "bilinear":
compute_func = _bilinear
else:
raise ValueError("%s method is not supported." % method)
return te.compute(output_shape, compute_func, name="crop_and_resize", tag=tag.INJECTIVE)
def resize3d(
data,
size,
layout="NCDHW",
method="nearest_neighbor",
coordinate_transformation_mode="align_corners",
out_dtype=None,
):
"""Perform resize operation on the data.
Parameters
----------
inputs: tvm.te.Tensor
inputs is a 5-D tensor with shape
[batch, channel, in_depth, in_height, in_width]
or [batch, in_depth, in_height, in_width, channel]
size: Tuple
Output resolution scale to
layout: string, optional
"NCDHW", "NDHWC", or "NCDHWc".
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
method: {"trilinear", "nearest_neighbor"}
Method to be used for resizing.
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, channel, in_depth*scale, in_height*scale, in_width*scale]
or [batch, in_depth*scale, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_depth*scale, in_height*scale, in_width*scale,
channel-minor]
"""
method = method.lower()
if layout == "NDHWC":
in_n, in_d, in_h, in_w, in_c = data.shape
output_shape = [in_n, size[0], size[1], size[2], in_c]
elif layout == "NCDHW":
in_n, in_c, in_d, in_h, in_w = data.shape
output_shape = [in_n, in_c, size[0], size[1], size[2]]
# Otherwise layout must be NCHWxc
else:
in_n, in_c, in_d, in_h, in_w, in_cc = data.shape
output_shape = [in_n, in_c, size[0], size[1], size[2], in_cc]
if coordinate_transformation_mode == "align_corners":
z_ratio = (in_d - 1).astype("float") / (size[0] - 1)
y_ratio = (in_h - 1).astype("float") / (size[1] - 1)
x_ratio = (in_w - 1).astype("float") / (size[2] - 1)
elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
z_ratio = (in_d).astype("float") / (size[0])
y_ratio = (in_h).astype("float") / (size[1])
x_ratio = (in_w).astype("float") / (size[2])
else:
raise ValueError(
"Unsupported coordinate_transformation_mode: {}".format(coordinate_transformation_mode)
)
def _get_pixel(n, c, z, y, x, cc):
z = tvm.te.max(tvm.te.min(z, in_d - 1), 0)
y = tvm.te.max(tvm.te.min(y, in_h - 1), 0)
x = tvm.te.max(tvm.te.min(x, in_w - 1), 0)
if layout == "NDHWC":
return data(n, z, y, x, c).astype("float")
if layout == "NCDHW":
return data(n, c, z, y, x).astype("float")
# else must be NCDHWxc
return data(n, c, z, y, x, cc).astype("float")
def _get_indices(*indices):
if layout == "NDHWC":
n, z, y, x, c = indices
cc = None
elif layout == "NCDHW":
n, c, z, y, x = indices
cc = None
else:
n, c, z, y, x, cc = indices
return n, c, z, y, x, cc
def _cast_output(value):
if out_dtype:
dtype = out_dtype
else:
dtype = data.dtype
return value.astype(dtype)
# Nearest neighbor computation
def _nearest_neighbor(*indices):
n, c, z, y, x, cc = _get_indices(*indices)
in_z = z_ratio * z
in_y = y_ratio * y
in_x = x_ratio * x
if coordinate_transformation_mode == "align_corners":
zint = te.round(in_z).astype("int32")
yint = te.round(in_y).astype("int32")
xint = te.round(in_x).astype("int32")
elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
zint = te.floor(in_z + epsilon).astype("int32")
yint = te.floor(in_y + epsilon).astype("int32")
xint = te.floor(in_x + epsilon).astype("int32")
else:
raise ValueError(
"Unsupported coordinate_transformation_mode: {}".format(
coordinate_transformation_mode
)
)
return _cast_output(_get_pixel(n, c, zint, yint, xint, cc))
# Trilinear helper functions and computation.
def _lerp(A, B, t):
return A * (1.0 - t) + B * t
def _trilinear(*indices):
n, c, z, y, x, cc = _get_indices(*indices)
if coordinate_transformation_mode == "half_pixel":
in_z = z_ratio * (z + 0.5) - 0.5
in_y = y_ratio * (y + 0.5) - 0.5
in_x = x_ratio * (x + 0.5) - 0.5
else:
in_z = z_ratio * z
in_y = y_ratio * y
in_x = x_ratio * x
zint = te.floor(in_z).astype("int32")
zfract = in_z - te.floor(in_z)
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
yint = te.floor(in_y).astype("int32")
yfract = in_y - te.floor(in_y)
p000 = _get_pixel(n, c, zint, yint, xint, cc)
p001 = _get_pixel(n, c, zint, yint, xint + 1, cc)
p010 = _get_pixel(n, c, zint, yint + 1, xint, cc)
p011 = _get_pixel(n, c, zint, yint + 1, xint + 1, cc)
p100 = _get_pixel(n, c, zint + 1, yint, xint, cc)
p101 = _get_pixel(n, c, zint + 1, yint, xint + 1, cc)
p110 = _get_pixel(n, c, zint + 1, yint + 1, xint, cc)
p111 = _get_pixel(n, c, zint + 1, yint + 1, xint + 1, cc)
dep00 = _lerp(p000, p100, zfract)
dep01 = _lerp(p001, p101, zfract)
dep10 = _lerp(p010, p110, zfract)
dep11 = _lerp(p011, p111, zfract)
col0 = _lerp(dep00, dep01, xfract)
col1 = _lerp(dep10, dep11, xfract)
value = _lerp(col0, col1, yfract)
return _cast_output(value)
# Determine which interpolation method to use then run it.
if method == "nearest_neighbor":
compute_func = _nearest_neighbor
elif method == "trilinear":
compute_func = _trilinear
else:
raise ValueError("%s method is not supported." % method)
return te.compute(output_shape, compute_func, name="resize3d", tag=tag.INJECTIVE)
|
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import logging
import os
import select
from subprocess import Popen, PIPE
from time import sleep
from conf import LisaLogging
from android import System, Workload
from env import TestEnv
from devlib.utils.misc import memoized
from devlib.utils.android import fastboot_command
class LisaBenchmark(object):
"""
A base class for LISA custom benchmarks execution
This class is intended to be subclassed in order to create a custom
benckmark execution for LISA.
It sets up the TestEnv and and provides convenience methods for
test environment setup, execution and post-processing.
Subclasses should provide a bm_conf to setup the TestEnv and
a set of optional callback methods to configuere a test environment
and process collected data.
Example users of this class can be found under LISA's tests/benchmarks
directory.
"""
bm_conf = None
"""Override this with a dictionary or JSON path to configure the TestEnv"""
bm_name = None
"""Override this with the name of the LISA's benchmark to run"""
bm_params = None
"""Override this with the set of parameters for the LISA's benchmark to run"""
bm_collect = None
"""Override this with the set of data to collect during test exeution"""
def benchmarkInit(self):
"""
Code executed before running the benchmark
"""
pass
def benchmarkFinalize(self):
"""
Code executed after running the benchmark
"""
pass
################################################################################
# Private Interface
@memoized
def _parseCommandLine(self):
parser = argparse.ArgumentParser(
description='LISA Benchmark Configuration')
# Bootup settings
parser.add_argument('--boot-image', type=str,
default=None,
help='Path of the Android boot.img to be used')
parser.add_argument('--boot-timeout', type=int,
default=20,
help='Timeout in [s] to wait after a reboot (default 20)')
# Android settings
parser.add_argument('--android-device', type=str,
default=None,
help='Identifier of the Android target to use')
parser.add_argument('--android-home', type=str,
default=None,
help='Path used to configure ANDROID_HOME')
# Test customization
parser.add_argument('--results-dir', type=str,
default=None,
help='Results folder, '
'if specified override test defaults')
parser.add_argument('--collect', type=str,
default=None,
help='Set of metrics to collect, '
'e.g. "energy systrace_30" to sample energy and collect a 30s systrace, '
'if specified overrides test defaults')
# Measurements settings
parser.add_argument('--iio-channel-map', type=str,
default=None,
help='List of IIO channels to sample, '
'e.g. "ch0:0,ch3:1" to sample CHs 0 and 3, '
'if specified overrides test defaults')
# Parse command line arguments
return parser.parse_args()
def _getBmConf(self):
if self.bm_conf is None:
msg = 'Benchmark subclasses must override the `bm_conf` attribute'
raise NotImplementedError(msg)
# Override default configuration with command line parameters
if self.args.android_device:
self.bm_conf['device'] = self.args.android_device
if self.args.android_home:
self.bm_conf['ANDROID_HOME'] = self.args.android_home
if self.args.results_dir:
self.bm_conf['results_dir'] = self.args.results_dir
if self.args.collect:
self.bm_collect = self.args.collect
# Override energy meter configuration
if self.args.iio_channel_map:
em = {
'instrument' : 'acme',
'channel_map' : {},
}
for ch in self.args.iio_channel_map.split(','):
ch_name, ch_id = ch.split(':')
em['channel_map'][ch_name] = ch_id
self.bm_conf['emeter'] = em
self._log.info('Using ACME energy meter channels: %s', em)
# Override EM if energy collection not required
if 'energy' not in self.bm_collect:
try:
self.bm_conf.pop('emeter')
except:
pass
return self.bm_conf
def _getWorkload(self):
if self.bm_name is None:
msg = 'Benchmark subclasses must override the `bm_name` attribute'
raise NotImplementedError(msg)
# Get a referench to the worload to run
wl = Workload.getInstance(self.te, self.bm_name)
if wl is None:
raise ValueError('Specified benchmark [{}] is not supported'\
.format(self.bm_name))
return wl
def _getBmParams(self):
if self.bm_params is None:
msg = 'Benchmark subclasses must override the `bm_params` attribute'
raise NotImplementedError(msg)
return self.bm_params
def _getBmCollect(self):
if self.bm_collect is None:
msg = 'Benchmark subclasses must override the `bm_collect` attribute'
self._log.warning(msg)
return ''
return self.bm_collect
def __init__(self):
"""
Set up logging and trigger running experiments
"""
LisaLogging.setup()
self._log = logging.getLogger('Benchmark')
self._log.info('=== CommandLine parsing...')
self.args = self._parseCommandLine()
self._log.info('=== TestEnv setup...')
self.bm_conf = self._getBmConf()
self.te = TestEnv(self.bm_conf)
self.target = self.te.target
self._log.info('=== Initialization...')
self.wl = self._getWorkload()
self.out_dir=self.te.res_dir
try:
self.benchmarkInit()
except:
self._log.warning('Benchmark initialization failed: execution aborted')
raise
self._log.info('=== Execution...')
self.wl.run(out_dir=self.out_dir,
collect=self._getBmCollect(),
**self.bm_params)
self._log.info('=== Finalization...')
self.benchmarkFinalize()
def _wait_for_logcat_idle(self, seconds=1):
lines = 0
# Clear logcat
# os.system('{} logcat -s {} -c'.format(adb, DEVICE));
self.target.clear_logcat()
# Dump logcat output
logcat_cmd = 'adb -s {} logcat'.format(self.target.adb_name)
logcat = Popen(logcat_cmd, shell=True, stdout=PIPE)
logcat_poll = select.poll()
logcat_poll.register(logcat.stdout, select.POLLIN)
# Monitor logcat until it's idle for the specified number of [s]
self._log.info('Waiting for system to be almost idle')
self._log.info(' i.e. at least %d[s] of no logcat messages', seconds)
while True:
poll_result = logcat_poll.poll(seconds * 1000)
if not poll_result:
break
lines = lines + 1
line = logcat.stdout.readline(1024)
if lines % 1000:
self._log.debug(' still waiting...')
if lines > 1e6:
self._log.warning('device logcat seems quite busy, '
'continuing anyway... ')
break
def reboot_target(self, disable_charge=True):
"""
Reboot the target if a "boot-image" has been specified
If the user specify a boot-image as a command line parameter, this
method will reboot the target with the specified kernel and wait
for the target to be up and running.
"""
# Reboot the device, if a boot_image has been specified
if self.args.boot_image:
self._log.warning('=== Rebooting...')
self._log.warning('Rebooting image to use: %s', self.args.boot_image)
self._log.debug('Waiting 6[s] to enter bootloader...')
self.target.adb_reboot_bootloader()
sleep(6)
# self._fastboot('boot {}'.format(self.args.boot_image))
cmd = 'boot {}'.format(self.args.boot_image)
fastboot_command(cmd, device=self.target.adb_name)
self._log.debug('Waiting {}[s] for boot to start...'\
.format(self.args.boot_timeout))
sleep(self.args.boot_timeout)
else:
self._log.warning('Device NOT rebooted, using current image')
# Restart ADB in root mode
self._log.warning('Restarting ADB in root mode...')
self.target.adb_root(force=True)
# TODO add check for kernel SHA1
self._log.warning('Skipping kernel SHA1 cross-check...')
# Disable charge via USB
if disable_charge:
self._log.debug('Disabling charge over USB...')
self.target.charging_enabled = False
# Log current kernel version
self._log.info('Running with kernel:')
self._log.info(' %s', self.target.kernel_version)
# Wait for the system to complete the boot
self._wait_for_logcat_idle()
# vim :set tabstop=4 shiftwidth=4 expandtab
|
|
#!python -u
import math
import pickle
import copy
import random
import pyglet
import pyglet.gl as gl
import pyglet.window.key as key
import vector as vec
from vector import Vector as Vec
import timevars as tv
import sprites
import gameelements
import activeobjects as ao
from geoip import GeoIPData
from gameassets import GameAssets
from config import Config
class GamePhase(object):
""" Abstract base class for one phase of a game."""
def __init__(self, gameElements, windowProps, evtSrc):
super(GamePhase, self).__init__()
# You can create all the objects you need for this game phase
def start(self):
# Called when game phase becomes active
pass
def update(self, dt, userInput):
# Called every game-tick when active
# Returns:
# Either 'None' to indicate that this phase is still active,
# or an instance of the next Game Phase.
# This implements our state machine!
pass
def draw(self, window):
# Called every draw-tick when active.
# Make OpenGL calls here, but leave the stack the way you found it.
pass
def delete(self):
# Called after update returns next Game Phase.
# A chance to free up resources (OpenGL stuff?)
pass
class PlayPhaseBase(GamePhase):
def __init__(self, windowProps, evtSrc ):
#super(GamePhase, self).__init__()
self.windowProps = windowProps
self.evtSrc = evtSrc
self.gameElements = gameelements.GameElements(windowProps)
self.gameElements.populateGame( GameAssets.Instance() )
self.shotMargin = 20
self.viewportOrigin = [0.0, 0.0]
self.destinationTracker = tv.TimeAverage2(0.7, *self.gameElements.ship.getPosition())
self.shake = None
self.endGame = None
self.explodedMarker = ao.MarkerAO()
self.drawFrameMarker = ao.MarkerAO()
def start(self):
pass
def update(self, dt, userInput):
ge = self.gameElements
# Elements that evolve pretty much by themselves.
ge.update(dt)
# End of game display
if self.endGame is not None:
self.endGame.update(dt, userInput)
# Regular game play
# Use controls to update the ship.
if not self.explodedMarker.done():
if userInput.joystick is not None:
self.joystickUpdate(dt, userInput.joystick)
else:
self.keyboardUpdate(dt, userInput.keys)
if userInput.keys[key.M]:
print ""
for i,m in enumerate(g.swarm.meteors):
print "Meteor % 2d: %s" %(i,m.dump())
if userInput.keys[key.K]:
pass
self.spaceShipDeath()
# Interactions
# We handle lazer blasts when they are created
# What else should go here?
# Let the viewport continue to drift even after ship destroyed
# instead of just freezing
#if not self.explodedMarker.done():
self.updateViewport(dt)
posn = ge.ship.getPosition()
ge.swarm.spawnNew(posn, self.viewportOrigin)
hitObj, severity = self.findHit()
if hitObj is not None:
if severity > 0.8:
#print "graze", severity
pass
elif severity > 0.28:
# Shake
if self.shake is None:
self.shake = tv.Shaker2(0.75, 10.0, 3.0)
self.spaceShipShake()
else:
# Explode
self.spaceShipDeath()
#self.radar.setNumItems( g.swarm.nItems())
def findHit(self):
# Check for hits on space ship
ship = self.gameElements.ship
p1 = Vec(*ship.getPosition())
r1 = ship.getRadius()
prevMinD = 1000000 # infinity
hitObj = None
severity = 1000.0
for o in self.gameElements.swarm.objects():
p2 = Vec(*o.getCenter())
r2 = o.getRadius()
d = vec.Distance(p1, p2)
if d < r1 + r2 and d < prevMinD:
#print d, r1, r2
prevMinD = d
hitObj = o
severity = d/(r1+r2)
if hitObj is None:
return None, None
#print "Hit", severity
else:
return hitObj, severity
def joystickUpdate(self, dt, joystick):
js = joystick
g = self.gameElements
#g.dbgSquare1.shift(3*js.rx, -3*js.ry)
#g.dbgSquare2.shift(3*js.rx, -3*js.ry)
#g.dbgSquare2.xPos.target = g.dbgSquare1.xPos
#g.dbgSquare2.yPos.target = g.dbgSquare1.yPos
# Right joystick rotates and uses rear engine
r, th = getJoystickPolarRight(js)
if r > 0.1:
g.ship.angleVar.setTarget(th + 90)
g.ship.thrust(dt, r)
else:
g.ship.thrust(dt, 0) # needed for drawing flames
# Left joystick just rotates
r, th = getJoystickPolarLeft(js)
if r > 0.45:
g.ship.angleVar.setTarget(th + 90)
#g.ship.angle = th + 90.0
# Front thrust, useful for braking
# Nope, converted to increasing drag
if js.buttons[4] :
g.ship.drag( 1.0 )
#g.ship.sprite = pyglet.sprite.Sprite(self.assets.dbgImage2)
else:
g.ship.drag(0.0)
if js.buttons[7]:
# dump a few more meteors in
#g.addMeteors(10)
pass
#if js.buttons[5]:
if js.z < -0.15:
self.shoot()
def keyboardUpdate(self, dt, keys):
# Use keyboard to control ship
g = self.gameElements
drot = 800
rot = g.ship.angleVar.value
thrust = 0
rotNew = rot
if keys[key.LEFT]:
rotNew += -drot * dt
#g.ship.angleVar.setTarget(th - drot * dt)
#g.ship.rot( -drot * dt)
if keys[key.RIGHT]:
rotNew += +drot * dt
#g.ship.angleVar.setTarget(th + drot * dt)
#g.ship.rot( drot * dt)
if keys[key.UP]:
thrust += 1.0
#g.ship.thrust( dt, 1.0)
# Nope, converted to increasing drag
if keys[key.DOWN]:
g.ship.drag( 1.0 )
#g.ship.sprite = pyglet.sprite.Sprite(self.assets.dbgImage2)
else:
g.ship.drag(0.0)
g.ship.thrust( dt, thrust)
g.ship.angleVar.setTarget(rotNew)
if keys[key.SPACE] :
self.shoot()
def shoot(self):
ga = GameAssets.Instance()
g = self.gameElements
shot = g.ship.shoot(g.shotBatch)
if shot is not None:
g.shots.append(shot)
if Config.Instance().sound():
ga.getSound('lazer-shot-1').play()
m = g.swarm.findShotHit(shot, self.shotMargin)
if m is not None:
self.processHit(m)
def processHit(self, meteor):
ga = GameAssets.Instance()
g = self.gameElements
points = meteor.getValue()
self.score.addScore(points)
g.swarm.explode(meteor)
if Config.Instance().sound():
ga.getSound('bomb-explosion-1').play()
def updateViewport(self, dt):
# Viewport tracks ship, roughly, i.e. it shift when ship gets near an edge.
#
# Well, we either need to explicitly trim the projected position so it doesn't
# shove the ship off the screen, or, acheive the same effect by making the
# amount of time we project forward depend on the angle of travel. Why?
# because drag limits speed, and horizontally, 1.2sec ahead and the ship
# always fit on the same screen. Travelling vertically, they get too far apart.
#
ship = self.gameElements.ship
w = self.windowProps.windowWidth
h = self.windowProps.windowHeight
#x,y = ship.getPosition()
#x,y = ship.getProjectedPosition(0.4)
vx,vy = ship.getDirection()
t = 1.3 * abs(vx) + 0.4 * abs(vy)
x,y = self.destinationTracker.update(*ship.getProjectedPosition(t))
border = 200
factor = 0.8
# Shift the viewport
xRel = x - self.viewportOrigin[0]
if xRel > w - border:
self.viewportOrigin[0] += factor * (xRel - w + border)
elif xRel < border:
self.viewportOrigin[0] -= factor * (border - xRel)
yRel = y - self.viewportOrigin[1]
if yRel > h - border:
self.viewportOrigin[1] += factor * (yRel - h + border)
elif yRel < border:
self.viewportOrigin[1] -= factor * (border - yRel)
if self.shake is not None:
self.shake.update(dt)
(sx,sy) = self.shake.getValue()
self.viewportOrigin[0] += sx
self.viewportOrigin[1] += sy
if not self.shake.alive:
self.shake = None
# XXX You're in the base class - remove this
def draw(self, window):
self.drawSpace(window)
if self.endGame and self.drawFrameMarker.done():
# draw "Game Done" in absolute window position
self.endGame.draw(window)
def drawSpace(self, window):
gl.glPushMatrix()
# GL matrices are applied last-added-first, so this *is* the right
# order for pushing them.
gl.glTranslatef(-self.viewportOrigin[0], -self.viewportOrigin[1], 0.0)
if self.shake is not None:
# We want to rotate around the center of the current viewport
# vpc = view port center
vpc_x = self.viewportOrigin[0] + self.windowProps.windowWidth//2
vpc_y = self.viewportOrigin[1] + self.windowProps.windowHeight//2
gl.glTranslatef(vpc_x, vpc_y, 0.0)
gl.glRotatef(self.shake.getAngle(), 0, 0, 1)
gl.glTranslatef(-vpc_x, -vpc_y, 0.0)
ge = self.gameElements
ge.starField.draw()
ge.swarm.draw()
if not self.explodedMarker.done():
ge.ship.draw()
if self.endGame and not self.drawFrameMarker.done():
self.endGame.draw(window)
for shot in ge.shots:
if shot.alive:
shot.draw()
gl.glPopMatrix()
class PlayCountPhase(PlayPhaseBase):
def __init__(self, windowProps, evtSrc ):
super(PlayCountPhase, self).__init__( windowProps, evtSrc )
self.score = sprites.ScoreBoard(windowProps)
self.radar = sprites.MeteorRadar(windowProps)
self.timer = tv.CountUpTimer(running=True)
self.timeDisplay = sprites.TimeDisplay(windowProps)
def update(self, dt, userInput):
gp = super(PlayCountPhase, self).update(dt, userInput)
if gp is not None:
return gp
self.score.update(dt)
#if self.explodedMarker is None or not self.explodedMarker.done():
if not self.explodedMarker.done():
self.timer.update(dt)
self.timeDisplay.setTime( self.timer.time())
self.radar.setNumItems( self.gameElements.swarm.nItems())
if self.endGame and self.endGame.done():
# Go to the leader board
score = self.score.value
state = GeoIPData.Instance().state
d = (None, state, score)
lb = LeaderBoardPhase(self.windowProps, self.evtSrc, d)
return lb
def draw(self, window):
gl.glClearColor(0.0, 0.0, 0.0, 0.0)
gl.glEnable( gl.GL_BLEND)
window.clear()
# Replace with call to drawSpace()
super(PlayCountPhase, self).draw(window)
self.score.draw()
self.radar.draw()
self.timeDisplay.draw()
def spaceShipShake(self):
self.score.addScore([-1]*4)
def spaceShipDeath(self):
if self.endGame:
return
shakeTime = 0.75
self.shake = tv.Shaker2(shakeTime, 22.0, 8.0)
self.score.addScore([-2]*8)
ship = self.gameElements.ship
# Set up end-of-game ActiveObjects
# Position doesn't matter - it'll be updated later
explosionSprite = sprites.MultiExplosion(0.,0., [0.0, 0.3, 0.5, 0.6, 1.0, 1.8])
def positionExplosion(ship=self.gameElements.ship, explosion=explosionSprite):
p = ship.getPosition()
explosion.x = p[0]
explosion.y = p[1]
self.endGame = ao.SerialObjects(
ao.DelayAO(shakeTime/2.),
ao.FunctionWrapperAO(positionExplosion),
self.explodedMarker,
ao.FunctionWrapperAO(lambda: ship.drag(0.66)),
ao.SoundWrapperAO(GameAssets.Instance().getSound('wilhelm')),
ao.SpriteWrapperAO(explosionSprite),
ao.FunctionWrapperAO(lambda: ship.drag(0.9)),
self.drawFrameMarker,
ao.DelayAO(0.2),
ao.GameOverAO(self.windowProps)
)
self.endGame.start()
class PlayTimePhase(PlayPhaseBase):
def __init__(self, windowProps, evtSrc ):
super(PlayTimePhase, self).__init__( windowProps, evtSrc )
#self.score = sprites.ScoreBoard(windowProps)
#self.radar = sprites.MeteorRadar(windowProps)
#self.timer = sprites.Timer(windowProps)
self.timeElapsed = tv.CountUpTimer(running=True)
self.timeElapsedDisplay = sprites.TimeDisplay(windowProps)
self.timeRemaining = 20.0
self.timeRemainingDisplay = sprites.TimeDisplay(windowProps, displayTenths = True)
self.playing = True
def update(self, dt, userInput):
if self.playing:
gp = super(PlayTimePhase, self).update(dt, userInput)
if gp is not None:
return gp
if self.playing:
self.timeElapsed.update(dt)
self.timeElapsedDisplay.setTime( self.timeElapsed.time())
self.timeRemaining -= dt
if self.timeRemaining > 0.:
self.timeRemainingDisplay.setTime(self.timeRemaining)
return
# Time ran out
self.playing = False
self.timeRemaining = 0.
self.timeRemainingDisplay.setTime(self.timeRemaining)
#self.score.update(dt)
#self.timer.update(dt)
#self.radar.setNumItems( self.gameElements.swarm.nItems())
def draw(self, window):
gl.glClearColor(0.0, 0.0, 0.0, 0.0)
gl.glEnable( gl.GL_BLEND)
window.clear()
super(PlayTimePhase, self).draw(window)
self.timeElapsedDisplay.draw()
gl.glPushMatrix()
gl.glTranslatef(-250., 0., 0.)
self.timeRemainingDisplay.draw()
gl.glPopMatrix()
def processHit(self, meteor):
ga = GameAssets.Instance()
g = self.gameElements
points = meteor.getValue()
self.timeRemaining += 1.5
#self.score.addScore(points)
g.swarm.explode(meteor)
if Config.Instance().sound():
ga.getSound('bomb-explosion-1').play()
def spaceShipShake(self):
self.timeRemaining -= 2.
class TextEntryWidget(object):
""" Does not allow user to input an empty string.
Could control this with an option later, if needed.
"""
def __init__(self, x, y, width, evtSrc):
#super(TextEntryWidget, self).__init__()
self.x = x
self.y = y
self.width = width
self.evtSrc = evtSrc
self.enteredText = None # gets a value when the user hits Enter
self.cursorBlink = tv.Blinker(1.2)
batch = pyglet.graphics.Batch()
self.documentBatch = batch
self.document = pyglet.text.document.UnformattedDocument("")
self.document.set_style(0, 0, dict(
color=(0, 255, 0, 200),
background_color=None,
font_name='Orbitron',
bold=True,
font_size=50,
#kerning=7,
#underline=(0,200,35,180)
))
font = self.document.get_font()
height = font.ascent - font.descent
self.layout = pyglet.text.layout.IncrementalTextLayout(
self.document, width, height, multiline=False, batch=batch)
self.layout.x = x
self.layout.y = y
self.caret = pyglet.text.caret.Caret(self.layout)
self.caret.visible = False
# Stuff for my cursor
self.cursor = pyglet.text.Label(text="_",
font_name='Orbitron', bold=True,
font_size=50,
anchor_x = "left", anchor_y="top",
color=(0, 255, 0, 200))
evtSrc.push_handlers(self.caret)
evtSrc.push_handlers(on_text=self.on_text)
def on_text(self, text):
if self.enteredText is not None:
return True
if ord(text) == 13:
if self.document.text != "":
self.enteredText = self.document.text
#print self.enteredText
return True
else:
return False
def update(self, dt):
self.cursorBlink.update(dt)
def draw(self, window):
self.documentBatch.draw()
p = self.layout.get_point_from_position(self.caret.position)
self.cursor.x = p[0]+11
self.cursor.y = p[1]+62
if self.enteredText is None and self.cursorBlink.isOn():
self.cursor.draw()
def delete(self):
self.evtSrc.pop_handlers()
self.evtSrc.pop_handlers()
class LeaderBoardData(object):
"""docstring for LeaderBoardData"""
def __init__(self):
self.msg = ("!!!!! DO NOT LOOK IN THIS FILE !!!!!" +
" If you do your nose will fall off." +
" You have been warned.")
# Triplets of (Name, State, Score)
self.leaders = []
@classmethod
def fromFile(cls, fileName):
try:
with open(fileName, 'rb') as f:
t = pickle.load(f)
except Exception as e:
print "Can't load leaderboard. Starting fresh one."
t = LeaderBoardData() # Just return an empty one
return t
def write(self, fileName):
try:
with open(fileName, 'wb') as f:
pickle.dump(self, f)
except Exception as e:
print "Error saving", e
def __repr__(self):
return "Msg: %s\nLeaders: %s" % (self.msg, self.leaders)
class LeaderBoardPhase(GamePhase):
lbdFileName = 'leaderboard.dat'
def __init__(self, windowProps, evtSrc, newData):
self.windowProps = windowProps
self.evtSrc = evtSrc
self.maxNScores = 6
self.done = False
self.newData = newData
# newData is (None, state, score). We have to get the name.
self.newData = newData
_, state, newScore = newData
self.foo = True
self.leaderData = LeaderBoardData.fromFile( LeaderBoardPhase.lbdFileName)
# Insert new score in the appropriate place
ldrs = copy.deepcopy(self.leaderData.leaders)
if self.foo:
topScore = ldrs[0][2] if len(ldrs) > 0 else 0
topScore = max(topScore, newScore) + random.randint(50,150)
d = {'CA': "Superstar", 'WA': "Coby", 'MA': "Sergio"}
taunt = d.get(state, "Mr. Jones")
ldrs.insert(0, (taunt, 'XX', topScore))
nLdrs = len(ldrs)
i = 0
while i < nLdrs and ldrs[i][2] >= newScore:
i += 1
iNewItem = i
# Did we make the board?
self.madeLeaderBoard = iNewItem < self.maxNScores
#print newScore, self.madeLeaderBoard
self.iNewItem = iNewItem
self.newLdrs = newLdrs = copy.deepcopy(ldrs)
newLdrs.insert(iNewItem, newData)
newLdrs = newLdrs[0:self.maxNScores]
# Create the screen objects
staticLabels = []
w, h = windowProps.windowWidth, windowProps.windowHeight
over1 = 190
over2 = w - 180
down1 = 80
yVal = h - 200
color = (0, 255, 0, 200)
fontSize = 50
staticBatch = pyglet.graphics.Batch()
self.staticBatch = staticBatch
def makeLabel( x, y, text):
l = pyglet.text.Label(text=text,
font_name='Orbitron', bold=True,
font_size=fontSize,
x=x, y=y,
color=color, batch=staticBatch)
staticLabels.append(l)
return l
l = makeLabel(w//2, h-20, "Leader Board")
l.anchor_x, l.anchor_y = "center", "top"
l.font_size = fontSize + 12
for i, item in enumerate(newLdrs):
name, _, score = item
l = makeLabel(over1, yVal, str(i+1) + ". ")
l.anchor_x, l.anchor_y = "right", "bottom"
if i == iNewItem:
inputLocation = (over1+15, yVal)
else:
l = makeLabel(over1+15, yVal, name)
l.anchor_x, l.anchor_y = "left", "bottom"
l = makeLabel(over2, yVal, str(score))
l.anchor_x, l.anchor_y = "right", "bottom"
yVal -= down1
self.labels = staticLabels
if self.madeLeaderBoard:
# Create input entry
width = over2 - over1 - 320 # 320 estimates width of the scores column
#print width
self.textEntry = TextEntryWidget(inputLocation[0], inputLocation[1],width, evtSrc)
else:
self.textEntry = None
self.bottomBatch = pyglet.graphics.Batch()
instText = "[ Space Bar to start new game - Ctrl-Q to quit ]"
self.instructions = pyglet.text.Label(text=instText,
font_name='Orbitron', bold=True,
font_size=24,
x=w//2, y=10,
anchor_x='center', anchor_y='bottom',
color=color, batch=self.bottomBatch)
if not self.madeLeaderBoard:
scoreText = "Your Score: %d" % newScore
self.scoreLabel = pyglet.text.Label(text=scoreText,
font_name='Orbitron', bold=True,
font_size=32,
x=w//2, y=59,
anchor_x='center', anchor_y='bottom',
color=color, batch=self.bottomBatch)
ga = GameAssets.Instance()
if self.madeLeaderBoard:
self.done = False
self.fanfare = ga.getSound('tada')
else:
self.done = True
self.fanfare = ga.getSound('ohno')
def start(self):
if Config.Instance().sound():
self.fanfare.play()
def update(self, dt, userInput):
if self.done:
k = userInput.keys
if k[key.SPACE]:
# Start a new game
#newGe = gameelements.GameElements(self.windowProps)
#newGe.populateGame( GameAssets.Instance() )
gp = PlayCountPhase(self.windowProps, self.evtSrc)
return gp
return
if self.textEntry:
self.textEntry.update(dt)
if (self.textEntry and
self.textEntry.enteredText is not None and
self.textEntry.enteredText != ""):
#print "data entered -%s-" % self.textEntry.enteredText
t = self.newLdrs[self.iNewItem]
newT = (self.textEntry.enteredText, t[1], t[2])
self.newLdrs[self.iNewItem] = newT
if self.foo:
self.newLdrs.pop(0)
self.leaderData.leaders = self.newLdrs
#self.leaders.leaders.append(self.textEntry.enteredText)
self.leaderData.write(LeaderBoardPhase.lbdFileName)
self.done = True
#self.x.update(dt, userInput)
def draw(self, window):
#print "draw dbgphase"
window.clear()
self.staticBatch.draw()
if self.done:
self.bottomBatch.draw()
if self.textEntry:
self.textEntry.draw(window)
#for l in self.labels:
# l.draw()
def delete(self):
if self.textEntry:
self.textEntry.delete()
def getJoystickPolarLeft(js):
# Note 1: I assume th will just be jittery around the origin.
# Note 2: It's possible r will go above 1.0. We can normalize r based
# on angle here if we want.
x,y = js.x, js.y
r2 = x*x + y*y
th = math.atan2(y,x) * (180.0/math.pi)
return math.sqrt(r2), th
def getJoystickPolarRight(js):
x,y = js.rx, js.ry
r2 = x*x + y*y
th = math.atan2(y,x) * (180.0/math.pi)
return math.sqrt(r2), th
|
|
# Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
import json
import re
import types
from . import validators
__version__ = "1.2.0"
# constants for DeletionPolicy
Delete = 'Delete'
Retain = 'Retain'
Snapshot = 'Snapshot'
# Pseudo Parameters
AWS_ACCOUNT_ID = 'AWS::AccountId'
AWS_NOTIFICATION_ARNS = 'AWS::NotificationARNs'
AWS_NO_VALUE = 'AWS::NoValue'
AWS_REGION = 'AWS::Region'
AWS_STACK_ID = 'AWS::StackId'
AWS_STACK_NAME = 'AWS::StackName'
valid_names = re.compile(r'^[a-zA-Z0-9]+$')
class BaseAWSObject(object):
def __init__(self, title, template=None, **kwargs):
self.title = title
self.template = template
# Cache the keys for validity checks
self.propnames = self.props.keys()
self.attributes = ['DependsOn', 'DeletionPolicy',
'Metadata', 'UpdatePolicy',
'Condition', 'CreationPolicy']
# unset/None is also legal
if title and not valid_names.match(title):
raise ValueError('Name "%s" not alphanumeric' % title)
# Create the list of properties set on this object by the user
self.properties = {}
dictname = getattr(self, 'dictname', None)
if dictname:
self.resource = {
dictname: self.properties,
}
else:
self.resource = self.properties
if hasattr(self, 'resource_type') and self.resource_type is not None:
self.resource['Type'] = self.resource_type
self.__initialized = True
# Check for properties defined in the class
for k, (_, required) in self.props.items():
v = getattr(type(self), k, None)
if v is not None and k not in kwargs:
self.__setattr__(k, v)
# Now that it is initialized, populate it with the kwargs
for k, v in kwargs.items():
self.__setattr__(k, v)
# Bound it to template if we know it
if self.template is not None:
self.template.add_resource(self)
def __getattr__(self, name):
try:
return self.properties.__getitem__(name)
except KeyError:
# Fall back to the name attribute in the object rather than
# in the properties dict. This is for non-OpenStack backwards
# compatibility since OpenStack objects use a "name" property.
if name == 'name':
return self.__getattribute__('title')
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__.keys() \
or '_BaseAWSObject__initialized' not in self.__dict__:
return dict.__setattr__(self, name, value)
elif name in self.attributes:
self.resource[name] = value
return None
elif name in self.propnames:
# Check the type of the object and compare against what we were
# expecting.
expected_type = self.props[name][0]
# If the value is a AWSHelperFn we can't do much validation
# we'll have to leave that to Amazon. Maybe there's another way
# to deal with this that we'll come up with eventually
if isinstance(value, AWSHelperFn):
return self.properties.__setitem__(name, value)
# If it's a function, call it...
elif isinstance(expected_type, types.FunctionType):
value = expected_type(value)
return self.properties.__setitem__(name, value)
# If it's a list of types, check against those types...
elif isinstance(expected_type, list):
# If we're expecting a list, then make sure it is a list
if not isinstance(value, list):
self._raise_type(name, value, expected_type)
# Iterate over the list and make sure it matches our
# type checks (as above accept AWSHelperFn because
# we can't do the validation ourselves)
for v in value:
if not isinstance(v, tuple(expected_type)) \
and not isinstance(v, AWSHelperFn):
self._raise_type(name, v, expected_type)
# Validated so assign it
return self.properties.__setitem__(name, value)
# Single type so check the type of the object and compare against
# what we were expecting. Special case AWS helper functions.
elif isinstance(value, expected_type):
return self.properties.__setitem__(name, value)
else:
self._raise_type(name, value, expected_type)
type_name = getattr(self, 'resource_type', self.__class__.__name__)
if type_name == 'AWS::CloudFormation::CustomResource' or \
type_name.startswith('Custom::'):
# Add custom resource arguments to the dict without any further
# validation. The properties of a CustomResource is not known.
return self.properties.__setitem__(name, value)
raise AttributeError("%s object does not support attribute %s" %
(type_name, name))
def _raise_type(self, name, value, expected_type):
raise TypeError('%s is %s, expected %s' %
(name, type(value), expected_type))
def validate(self):
pass
@classmethod
def from_dict(cls, title, dict):
obj = cls(title)
obj.properties.update(dict)
return obj
def JSONrepr(self):
for k, (_, required) in self.props.items():
if required and k not in self.properties:
rtype = getattr(self, 'resource_type', "<unknown type>")
raise ValueError(
"Resource %s required in type %s" % (k, rtype))
self.validate()
# If no other properties are set, only return the Type.
# Mainly used to not have an empty "Properties".
if self.properties:
return self.resource
elif hasattr(self, 'resource_type'):
return {'Type': self.resource_type}
else:
return {}
class AWSObject(BaseAWSObject):
dictname = 'Properties'
class AWSDeclaration(BaseAWSObject):
"""
Used for CloudFormation Resource Property objects
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/
aws-product-property-reference.html
"""
def __init__(self, title, **kwargs):
super(AWSDeclaration, self).__init__(title, **kwargs)
class AWSProperty(BaseAWSObject):
"""
Used for CloudFormation Resource Property objects
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/
aws-product-property-reference.html
"""
dictname = None
def __init__(self, title=None, **kwargs):
super(AWSProperty, self).__init__(title, **kwargs)
class AWSAttribute(BaseAWSObject):
dictname = None
"""
Used for CloudFormation Resource Attribute objects
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/
aws-product-attribute-reference.html
"""
def __init__(self, title=None, **kwargs):
super(AWSAttribute, self).__init__(title, **kwargs)
def validate_pausetime(pausetime):
if not pausetime.startswith('PT'):
raise ValueError('PauseTime should look like PT#H#M#S')
return pausetime
class UpdatePolicy(BaseAWSObject):
def __init__(self, title, **kwargs):
raise DeprecationWarning(
"This UpdatePolicy class is deprecated, please switch to using "
"the more general UpdatePolicy in troposphere.policies.\n"
)
class AWSHelperFn(object):
def getdata(self, data):
if isinstance(data, BaseAWSObject):
return data.title
else:
return data
class Base64(AWSHelperFn):
def __init__(self, data):
self.data = {'Fn::Base64': data}
def JSONrepr(self):
return self.data
class FindInMap(AWSHelperFn):
def __init__(self, mapname, key, value):
self.data = {'Fn::FindInMap': [self.getdata(mapname), key, value]}
def JSONrepr(self):
return self.data
class GetAtt(AWSHelperFn):
def __init__(self, logicalName, attrName):
self.data = {'Fn::GetAtt': [self.getdata(logicalName), attrName]}
def JSONrepr(self):
return self.data
class GetAZs(AWSHelperFn):
def __init__(self, region=""):
self.data = {'Fn::GetAZs': region}
def JSONrepr(self):
return self.data
class If(AWSHelperFn):
def __init__(self, cond, true, false):
self.data = {'Fn::If': [self.getdata(cond), true, false]}
def JSONrepr(self):
return self.data
class Equals(AWSHelperFn):
def __init__(self, value_one, value_two):
self.data = {'Fn::Equals': [value_one, value_two]}
def JSONrepr(self):
return self.data
class And(AWSHelperFn):
def __init__(self, cond_one, cond_two, *conds):
self.data = {'Fn::And': [cond_one, cond_two] + list(conds)}
def JSONrepr(self):
return self.data
class Or(AWSHelperFn):
def __init__(self, cond_one, cond_two, *conds):
self.data = {'Fn::Or': [cond_one, cond_two] + list(conds)}
def JSONrepr(self):
return self.data
class Not(AWSHelperFn):
def __init__(self, cond):
self.data = {'Fn::Not': [self.getdata(cond)]}
def JSONrepr(self):
return self.data
class Join(AWSHelperFn):
def __init__(self, delimiter, values):
self.data = {'Fn::Join': [delimiter, values]}
def JSONrepr(self):
return self.data
class Name(AWSHelperFn):
def __init__(self, data):
self.data = self.getdata(data)
def JSONrepr(self):
return self.data
class Select(AWSHelperFn):
def __init__(self, indx, objects):
self.data = {'Fn::Select': [indx, objects]}
def JSONrepr(self):
return self.data
class Ref(AWSHelperFn):
def __init__(self, data):
self.data = {'Ref': self.getdata(data)}
def JSONrepr(self):
return self.data
class Condition(AWSHelperFn):
def __init__(self, data):
self.data = {'Condition': self.getdata(data)}
def JSONrepr(self):
return self.data
class awsencode(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'JSONrepr'):
return obj.JSONrepr()
return json.JSONEncoder.default(self, obj)
class Tags(AWSHelperFn):
def __init__(self, **kwargs):
self.tags = []
for k, v in sorted(kwargs.iteritems()):
self.tags.append({
'Key': k,
'Value': v,
})
def JSONrepr(self):
return self.tags
class Template(object):
props = {
'AWSTemplateFormatVersion': (basestring, False),
'Description': (basestring, False),
'Parameters': (dict, False),
'Mappings': (dict, False),
'Resources': (dict, False),
'Outputs': (dict, False),
}
def __init__(self):
self.description = None
self.metadata = {}
self.conditions = {}
self.mappings = {}
self.outputs = {}
self.parameters = {}
self.resources = {}
self.version = None
def add_description(self, description):
self.description = description
def add_metadata(self, metadata):
self.metadata = metadata
def add_condition(self, name, condition):
self.conditions[name] = condition
def handle_duplicate_key(self, key):
raise ValueError('duplicate key "%s" detected' % key)
def _update(self, d, values):
if isinstance(values, list):
for v in values:
if v.title in d:
self.handle_duplicate_key(v.title)
d[v.title] = v
else:
if values.title in d:
self.handle_duplicate_key(values.title)
d[values.title] = values
return values
def add_output(self, output):
return self._update(self.outputs, output)
def add_mapping(self, name, mapping):
self.mappings[name] = mapping
def add_parameter(self, parameter):
return self._update(self.parameters, parameter)
def add_resource(self, resource):
return self._update(self.resources, resource)
def add_version(self, version=None):
if version:
self.version = version
else:
self.version = "2010-09-09"
def to_json(self, indent=4, sort_keys=True, separators=(',', ': ')):
t = {}
if self.description:
t['Description'] = self.description
if self.metadata:
t['Metadata'] = self.metadata
if self.conditions:
t['Conditions'] = self.conditions
if self.mappings:
t['Mappings'] = self.mappings
if self.outputs:
t['Outputs'] = self.outputs
if self.parameters:
t['Parameters'] = self.parameters
if self.version:
t['AWSTemplateFormatVersion'] = self.version
t['Resources'] = self.resources
return json.dumps(t, cls=awsencode, indent=indent,
sort_keys=sort_keys, separators=separators)
def JSONrepr(self):
return [self.parameters, self.mappings, self.resources]
class Output(AWSDeclaration):
props = {
'Description': (basestring, False),
'Value': (basestring, True),
}
class Parameter(AWSDeclaration):
STRING_PROPERTIES = ['AllowedPattern', 'MaxLength', 'MinLength']
NUMBER_PROPERTIES = ['MaxValue', 'MinValue']
props = {
'Type': (basestring, True),
'Default': (basestring, False),
'NoEcho': (bool, False),
'AllowedValues': (list, False),
'AllowedPattern': (basestring, False),
'MaxLength': (validators.positive_integer, False),
'MinLength': (validators.positive_integer, False),
'MaxValue': (validators.integer, False),
'MinValue': (validators.integer, False),
'Description': (basestring, False),
'ConstraintDescription': (basestring, False),
}
def validate(self):
if self.properties['Type'] != 'String':
for p in self.STRING_PROPERTIES:
if p in self.properties:
raise ValueError("%s can only be used with parameters of "
"the String type." % p)
if self.properties['Type'] != 'Number':
for p in self.NUMBER_PROPERTIES:
if p in self.properties:
raise ValueError("%s can only be used with parameters of "
"the Number type." % p)
|
|
'''
Widget class
============
The :class:`Widget` class is the base class required for creating Widgets.
This widget class was designed with a couple of principles in mind:
* *Event Driven*
Widget interaction is built on top of events that occur. If a property
changes, the widget can respond to the change in the 'on_<propname>'
callback. If nothing changes, nothing will be done. That's the main
goal of the :class:`~kivy.properties.Property` class.
* *Separation Of Concerns (the widget and its graphical representation)*
Widgets don't have a `draw()` method. This is done on purpose: The idea
is to allow you to create your own graphical representation outside the
widget class.
Obviously you can still use all the available properties to do that, so
that your representation properly reflects the widget's current state.
Every widget has its own :class:`~kivy.graphics.Canvas` that you
can use to draw. This separation allows Kivy to run your
application in a very efficient manner.
* *Bounding Box / Collision*
Often you want to know if a certain point is within the bounds of your
widget. An example would be a button widget where you only want to
trigger an action when the button itself is actually touched.
For this, you can use the :meth:`~Widget.collide_point` method, which
will return True if the point you pass to it is inside the axis-aligned
bounding box defined by the widget's position and size.
If a simple AABB is not sufficient, you can override the method to
perform the collision checks with more complex shapes, e.g. a polygon.
You can also check if a widget collides with another widget with
:meth:`Widget.collide_widget`.
We also have some default values and behaviors that you should be aware of:
* A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not
change the position or the size of its children. If you want control over
positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.
* The default size of a widget is (100, 100). This is only changed if the
parent is a :class:`~kivy.uix.layout.Layout`.
For example, if you add a :class:`Label` inside a
:class:`Button`, the label will not inherit the button's size or position
because the button is not a *Layout*: it's just another *Widget*.
* The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the
widget size will be the parent layout's size.
* :meth:`Widget.on_touch_down`, :meth:`Widget.on_touch_move`,
:meth:`Widget.on_touch_up` don't do any sort of collisions. If you want to
know if the touch is inside your widget, use :meth:`Widget.collide_point`.
Using Properties
----------------
When you read the documentation, all properties are described in the format::
<name> is a <property class> and defaults to <default value>.
e.g.
:attr:`~kivy.uix.label.Label.text` is a
:class:`~kivy.properties.StringProperty` and defaults to ''.
If you want to be notified when the pos attribute changes, i.e. when the
widget moves, you can bind your own callback function like this::
def callback_pos(instance, value):
print('The widget', instance, 'moved to', value)
wid = Widget()
wid.bind(pos=callback_pos)
Read more about :doc:`/api-kivy.properties`.
Basic drawing
-------------
Widgets support a range of drawing instructions that you can use to customize
the look of your widgets and layouts. For example, to draw a background image
for your widget, you can do the following:
.. code-block:: python
def redraw(self, args):
self.bg_rect.size = self.size
self.bg_rect.pos = self.pos
widget = Widget()
with widget.canvas:
widget.bg_rect = Rectangle(source="cover.jpg", pos=self.pos, \
size=self.size)
widget.bind(pos=redraw, size=redraw)
To draw a background in kv:
.. code-block:: kv
Widget:
canvas:
Rectangle:
source: "cover.jpg"
size: self.size
pos: self.pos
These examples only scratch the surface. Please see the :mod:`kivy.graphics`
documentation for more information.
.. _widget-event-bubbling:
Widget touch event bubbling
---------------------------
When you catch touch events between multiple widgets, you often
need to be aware of the order in which these events are propogated. In Kivy,
events bubble up from the most recently added widget and then backwards through
its children (from the most recently added back to the first child). This order
is the same for the `on_touch_move` and `on_touch_up` events.
If you want to reverse this order, you can raise events in the children before
the parent by using the `super` command. For example:
.. code-block:: python
class MyWidget(Widget):
def on_touch_down(self, touch):
super(MyWidget, self).on_touch_down(touch)
# Do stuff here
In general, this would seldom be the best approach as every event bubbles all
the way through event time and there is no way of determining if it has been
handled. In order to stop this event bubbling, one of these methods must
return `True`. At this point, Kivy assumes the event has been handled and the
propogation stops.
This means that the recommended approach is to let the event bubble naturally
but swallow the event if it has been handled. For example:
.. code-block:: python
class MyWidget(Widget):
def on_touch_down(self, touch):
If <some_condition>:
# Do stuff here and kill the event
return True
else:
# Continue normal event bubbling
return super(MyWidget, self).on_touch_down(touch)
This approach gives you good control over exactly how events are dispatched
and managed. Sometimes, however, you may wish to let the event be completely
propogated before taking action. You can use the
:class:`~kivy.clock.Clock` to help you here:
.. code-block:: python
class MyLabel(Label):
def on_touch_down(self, touch, after=False):
if after:
print "Fired after the event has been dispatched!"
else:
Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))
return super(MyLabel, self).on_touch_down(touch)
Usage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`
----------------------------------------------------------------------------
A common mistake when using one of the computed properties such as
:attr:`Widget.right` is to use it to make a widget follow its parent with a
KV rule such as `right: self.parent.right`. Consider, for example:
.. code-block:: kv
FloatLayout:
id: layout
width: 100
Widget:
id: wid
right: layout.right
The (mistaken) expectation is that this rule ensures that wid's right will
always be whatever layout's right is - that is wid.right and layout.right will
always be identical. In actual fact, this rule only says that "whenever
layout's `right` changes, wid's right will be set to that value". The
difference being that as long as `layout.right` doesn't change, `wid.right`
could be anything, even a value that will make them different.
Specifically, for the KV code above, consider the following example::
>>> print(layout.right, wid.right)
(100, 100)
>>> wid.x = 200
>>> print(layout.right, wid.right)
(100, 300)
As can be seen, initially they are in sync, however, when we change `wid.x`
they go out of sync because `layout.right` is not changed and the rule is not
triggered.
The proper way to make the widget follow its parent's right is to use
:attr:`Widget.pos_hint`. If instead of `right: layout.right` we did
`pos_hint: {'right': 1}`, then the widgets right will always be set to be
at the parent's right at each layout update.
'''
__all__ = ('Widget', 'WidgetException')
from kivy.event import EventDispatcher
from kivy.factory import Factory
from kivy.properties import (
NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,
ObjectProperty, ListProperty, DictProperty, BooleanProperty)
from kivy.graphics import (
Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)
from kivy.graphics.transformation import Matrix
from kivy.base import EventLoop
from kivy.lang import Builder
from kivy.context import get_current_context
from kivy.weakproxy import WeakProxy
from functools import partial
from itertools import islice
# References to all the widget destructors (partial method with widget uid as
# key).
_widget_destructors = {}
def _widget_destructor(uid, r):
# Internal method called when a widget is deleted from memory. the only
# thing we remember about it is its uid. Clear all the associated callbacks
# created in kv language.
del _widget_destructors[uid]
Builder.unbind_widget(uid)
class WidgetException(Exception):
'''Fired when the widget gets an exception.
'''
pass
class WidgetMetaclass(type):
'''Metaclass to automatically register new widgets for the
:class:`~kivy.factory.Factory`.
.. warning::
This metaclass is used by the Widget. Do not use it directly!
'''
def __init__(mcs, name, bases, attrs):
super(WidgetMetaclass, mcs).__init__(name, bases, attrs)
Factory.register(name, cls=mcs)
#: Base class used for Widget, that inherits from :class:`EventDispatcher`
WidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})
class Widget(WidgetBase):
'''Widget class. See module documentation for more information.
:Events:
`on_touch_down`:
Fired when a new touch event occurs
`on_touch_move`:
Fired when an existing touch moves
`on_touch_up`:
Fired when an existing touch disappears
.. warning::
Adding a `__del__` method to a class derived from Widget with Python
prior to 3.4 will disable automatic garbage collection for instances
of that class. This is because the Widget class creates reference
cycles, thereby `preventing garbage collection
<https://docs.python.org/2/library/gc.html#gc.garbage>`_.
.. versionchanged:: 1.0.9
Everything related to event properties has been moved to the
:class:`~kivy.event.EventDispatcher`. Event properties can now be used
when contructing a simple class without subclassing :class:`Widget`.
.. versionchanged:: 1.5.0
The constructor now accepts on_* arguments to automatically bind
callbacks to properties or events, as in the Kv language.
'''
__metaclass__ = WidgetMetaclass
__events__ = ('on_touch_down', 'on_touch_move', 'on_touch_up')
_proxy_ref = None
def __init__(self, **kwargs):
# Before doing anything, ensure the windows exist.
EventLoop.ensure_window()
# Assign the default context of the widget creation.
if not hasattr(self, '_context'):
self._context = get_current_context()
no_builder = '__no_builder' in kwargs
if no_builder:
del kwargs['__no_builder']
on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}
for key in on_args:
del kwargs[key]
super(Widget, self).__init__(**kwargs)
# Create the default canvas if it does not exist.
if self.canvas is None:
self.canvas = Canvas(opacity=self.opacity)
# Apply all the styles.
if not no_builder:
#current_root = Builder.idmap.get('root')
#Builder.idmap['root'] = self
Builder.apply(self)
#if current_root is not None:
# Builder.idmap['root'] = current_root
#else:
# Builder.idmap.pop('root')
# Bind all the events.
if on_args:
self.bind(**on_args)
@property
def proxy_ref(self):
'''Return a proxy reference to the widget, i.e. without creating a
reference to the widget. See `weakref.proxy
<http://docs.python.org/2/library/weakref.html?highlight\
=proxy#weakref.proxy>`_ for more information.
.. versionadded:: 1.7.2
'''
_proxy_ref = self._proxy_ref
if _proxy_ref is not None:
return _proxy_ref
f = partial(_widget_destructor, self.uid)
self._proxy_ref = _proxy_ref = WeakProxy(self, f)
# Only f should be enough here, but it appears that is a very
# specific case, the proxy destructor is not called if both f and
# _proxy_ref are not together in a tuple.
_widget_destructors[self.uid] = (f, _proxy_ref)
return _proxy_ref
def __hash__(self):
return id(self)
@property
def __self__(self):
return self
#
# Collision
#
def collide_point(self, x, y):
'''
Check if a point (x, y) is inside the widget's axis aligned bounding
box.
:Parameters:
`x`: numeric
x position of the point (in window coordinates)
`y`: numeric
y position of the point (in window coordinates)
:Returns:
A bool. True if the point is inside the bounding box, False
otherwise.
.. code-block:: python
>>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)
True
'''
return self.x <= x <= self.right and self.y <= y <= self.top
def collide_widget(self, wid):
'''
Check if another widget collides with this widget. This function
performs an axis-aligned bounding box intersection test by default.
:Parameters:
`wid`: :class:`Widget` class
Widget to collide with.
:Returns:
bool. True if the other widget collides with this widget, False
otherwise.
.. code-block:: python
>>> wid = Widget(size=(50, 50))
>>> wid2 = Widget(size=(50, 50), pos=(25, 25))
>>> wid.collide_widget(wid2)
True
>>> wid2.pos = (55, 55)
>>> wid.collide_widget(wid2)
False
'''
if self.right < wid.x:
return False
if self.x > wid.right:
return False
if self.top < wid.y:
return False
if self.y > wid.top:
return False
return True
#
# Default event handlers
#
def on_touch_down(self, touch):
'''Receive a touch down event.
:Parameters:
`touch`: :class:`~kivy.input.motionevent.MotionEvent` class
Touch received. The touch is in parent coordinates. See
:mod:`~kivy.uix.relativelayout` for a discussion on
coordinate systems.
:Returns:
bool. If True, the dispatching of the touch event will stop.
If False, the event will continue to be dispatched to the rest
of the widget tree.
'''
if self.disabled and self.collide_point(*touch.pos):
return True
for child in self.children[:]:
if child.dispatch('on_touch_down', touch):
return True
def on_touch_move(self, touch):
'''Receive a touch move event. The touch is in parent coordinates.
See :meth:`on_touch_down` for more information.
'''
if self.disabled:
return
for child in self.children[:]:
if child.dispatch('on_touch_move', touch):
return True
def on_touch_up(self, touch):
'''Receive a touch up event. The touch is in parent coordinates.
See :meth:`on_touch_down` for more information.
'''
if self.disabled:
return
for child in self.children[:]:
if child.dispatch('on_touch_up', touch):
return True
def on_disabled(self, instance, value):
for child in self.children:
child.disabled = value
#
# Tree management
#
def add_widget(self, widget, index=0, canvas=None):
'''Add a new widget as a child of this widget.
:Parameters:
`widget`: :class:`Widget`
Widget to add to our list of children.
`index`: int, defaults to 0
Index to insert the widget in the list.
.. versionadded:: 1.0.5
`canvas`: str, defaults to None
Canvas to add widget's canvas to. Can be 'before', 'after' or
None for the default canvas.
.. versionadded:: 1.9.0
.. code-block:: python
>>> from kivy.uix.button import Button
>>> from kivy.uix.slider import Slider
>>> root = Widget()
>>> root.add_widget(Button())
>>> slider = Slider()
>>> root.add_widget(slider)
'''
if not isinstance(widget, Widget):
raise WidgetException(
'add_widget() can be used only with instances'
' of the Widget class.')
widget = widget.__self__
if widget is self:
raise WidgetException(
'Widget instances cannot be added to themselves.')
parent = widget.parent
# Check if the widget is already a child of another widget.
if parent:
raise WidgetException('Cannot add %r, it already has a parent %r'
% (widget, parent))
widget.parent = parent = self
# Child will be disabled if added to a disabled parent.
if parent.disabled:
widget.disabled = True
canvas = self.canvas.before if canvas == 'before' else \
self.canvas.after if canvas == 'after' else self.canvas
if index == 0 or len(self.children) == 0:
self.children.insert(0, widget)
canvas.add(widget.canvas)
else:
canvas = self.canvas
children = self.children
if index >= len(children):
index = len(children)
next_index = 0
else:
next_child = children[index]
next_index = canvas.indexof(next_child.canvas)
if next_index == -1:
next_index = canvas.length()
else:
next_index += 1
children.insert(index, widget)
# We never want to insert widget _before_ canvas.before.
if next_index == 0 and canvas.has_before:
next_index = 1
canvas.insert(next_index, widget.canvas)
def remove_widget(self, widget):
'''Remove a widget from the children of this widget.
:Parameters:
`widget`: :class:`Widget`
Widget to remove from our children list.
.. code-block:: python
>>> from kivy.uix.button import Button
>>> root = Widget()
>>> button = Button()
>>> root.add_widget(button)
>>> root.remove_widget(button)
'''
if widget not in self.children:
return
self.children.remove(widget)
if widget.canvas in self.canvas.children:
self.canvas.remove(widget.canvas)
elif widget.canvas in self.canvas.after.children:
self.canvas.after.remove(widget.canvas)
elif widget.canvas in self.canvas.before.children:
self.canvas.before.remove(widget.canvas)
widget.parent = None
def clear_widgets(self, children=None):
'''
Remove all (or the specified) :attr:`~Widget.children` of this widget.
If the 'children' argument is specified, it should be a list (or
filtered list) of children of the current widget.
.. versionchanged:: 1.8.0
The `children` argument can be used to specify the children you
want to remove.
'''
if not children:
children = self.children
remove_widget = self.remove_widget
for child in children[:]:
remove_widget(child)
def export_to_png(self, filename, *args):
'''Saves an image of the widget and its children in png format at the
specified filename. Works by removing the widget canvas from its
parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling
:meth:`~kivy.graphics.texture.Texture.save`.
.. note::
The image includes only this widget and its children. If you want
to include widgets elsewhere in the tree, you must call
:meth:`~Widget.export_to_png` from their common parent, or use
:meth:`~kivy.core.window.WindowBase.screenshot` to capture the whole
window.
.. note::
The image will be saved in png format, you should include the
extension in your filename.
.. versionadded:: 1.9.0
'''
if self.parent is not None:
canvas_parent_index = self.parent.canvas.indexof(self.canvas)
self.parent.canvas.remove(self.canvas)
fbo = Fbo(size=self.size, with_stencilbuffer=True)
with fbo:
ClearColor(0, 0, 0, 1)
ClearBuffers()
Scale(1, -1, 1)
Translate(-self.x, -self.y - self.height, 0)
fbo.add(self.canvas)
fbo.draw()
fbo.texture.save(filename, flipped=False)
fbo.remove(self.canvas)
if self.parent is not None:
self.parent.canvas.insert(canvas_parent_index, self.canvas)
return True
def get_root_window(self):
'''Return the root window.
:Returns:
Instance of the root window. Can be a
:class:`~kivy.core.window.WindowBase` or
:class:`Widget`.
'''
if self.parent:
return self.parent.get_root_window()
def get_parent_window(self):
'''Return the parent window.
:Returns:
Instance of the parent window. Can be a
:class:`~kivy.core.window.WindowBase` or
:class:`Widget`.
'''
if self.parent:
return self.parent.get_parent_window()
def _walk(self, restrict=False, loopback=False, index=None):
# We pass index only when we are going on the parent
# so don't yield the parent as well.
if index is None:
index = len(self.children)
yield self
for child in reversed(self.children[:index]):
for walk_child in child._walk(restrict=True):
yield walk_child
# If we want to continue with our parent, just do it.
if not restrict:
parent = self.parent
try:
if parent is None or not isinstance(parent, Widget):
raise ValueError
index = parent.children.index(self)
except ValueError:
# Self is root, if we want to loopback from the first element:
if not loopback:
return
# If we started with root (i.e. index==None), then we have to
# start from root again, so we return self again. Otherwise, we
# never returned it, so return it now starting with it.
parent = self
index = None
for walk_child in parent._walk(loopback=loopback, index=index):
yield walk_child
def walk(self, restrict=False, loopback=False):
''' Iterator that walks the widget tree starting with this widget and
goes forward returning widgets in the order in which layouts display
them.
:Parameters:
`restrict`: bool, defaults to False
If True, it will only iterate through the widget and its
children (or children of its children etc.). Defaults to False.
`loopback`: bool, defaults to False
If True, when the last widget in the tree is reached,
it'll loop back to the uppermost root and start walking until
we hit this widget again. Naturally, it can only loop back when
`restrict` is False. Defaults to False.
:return:
A generator that walks the tree, returning widgets in the
forward layout order.
For example, given a tree with the following structure:
.. code-block:: kv
GridLayout:
Button
BoxLayout:
id: box
Widget
Button
Widget
walking this tree:
.. code-block:: python
>>> # Call walk on box with loopback True, and restrict False
>>> [type(widget) for widget in box.walk(loopback=True)]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
<class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]
>>> # Now with loopback False, and restrict False
>>> [type(widget) for widget in box.walk()]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
<class 'Widget'>]
>>> # Now with restrict True
>>> [type(widget) for widget in box.walk(restrict=True)]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]
.. versionadded:: 1.9.0
'''
gen = self._walk(restrict, loopback)
yield next(gen)
for node in gen:
if node is self:
return
yield node
def _walk_reverse(self, loopback=False, go_up=False):
# process is walk up level, walk down its children tree, then walk up
# next level etc.
# default just walk down the children tree
root = self
index = 0
# we need to go up a level before walking tree
if go_up:
root = self.parent
try:
if root is None or not isinstance(root, Widget):
raise ValueError
index = root.children.index(self) + 1
except ValueError:
if not loopback:
return
index = 0
go_up = False
root = self
# now walk children tree starting with last-most child
for child in islice(root.children, index, None):
for walk_child in child._walk_reverse(loopback=loopback):
yield walk_child
# we need to return ourself last, in all cases
yield root
# if going up, continue walking up the parent tree
if go_up:
for walk_child in root._walk_reverse(loopback=loopback,
go_up=go_up):
yield walk_child
def walk_reverse(self, loopback=False):
''' Iterator that walks the widget tree backwards starting with the
widget before this, and going backwards returning widgets in the
reverse order in which layouts display them.
This walks in the opposite direction of :meth:`walk`, so a list of the
tree generated with :meth:`walk` will be in reverse order compared
to the list generated with this, provided `loopback` is True.
:Parameters:
`loopback`: bool, defaults to False
If True, when the uppermost root in the tree is
reached, it'll loop back to the last widget and start walking
back until after we hit widget again. Defaults to False.
:return:
A generator that walks the tree, returning widgets in the
reverse layout order.
For example, given a tree with the following structure:
.. code-block:: kv
GridLayout:
Button
BoxLayout:
id: box
Widget
Button
Widget
walking this tree:
.. code-block:: python
>>> # Call walk on box with loopback True
>>> [type(widget) for widget in box.walk_reverse(loopback=True)]
[<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,
<class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]
>>> # Now with loopback False
>>> [type(widget) for widget in box.walk_reverse()]
[<class 'Button'>, <class 'GridLayout'>]
>>> forward = [w for w in box.walk(loopback=True)]
>>> backward = [w for w in box.walk_reverse(loopback=True)]
>>> forward == backward[::-1]
True
.. versionadded:: 1.9.0
'''
for node in self._walk_reverse(loopback=loopback, go_up=True):
yield node
if node is self:
return
def to_widget(self, x, y, relative=False):
'''Convert the given coordinate from window to local widget
coordinates. See :mod:`~kivy.uix.relativelayout` for details on the
coordinate systems.
'''
if self.parent:
x, y = self.parent.to_widget(x, y)
return self.to_local(x, y, relative=relative)
def to_window(self, x, y, initial=True, relative=False):
'''Transform local coordinates to window coordinates. See
:mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
'''
if not initial:
x, y = self.to_parent(x, y, relative=relative)
if self.parent:
return self.parent.to_window(x, y, initial=False,
relative=relative)
return (x, y)
def to_parent(self, x, y, relative=False):
'''Transform local coordinates to parent coordinates. See
:mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
:Parameters:
`relative`: bool, defaults to False
Change to True if you want to translate relative positions from
a widget to its parent coordinates.
'''
if relative:
return (x + self.x, y + self.y)
return (x, y)
def to_local(self, x, y, relative=False):
'''Transform parent coordinates to local coordinates. See
:mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
:Parameters:
`relative`: bool, defaults to False
Change to True if you want to translate coordinates to
relative widget coordinates.
'''
if relative:
return (x - self.x, y - self.y)
return (x, y)
def _apply_transform(self, m, pos=None):
if self.parent:
x, y = self.parent.to_widget(relative=True,
*self.to_window(*(pos or self.pos)))
m.translate(x, y, 0)
m = self.parent._apply_transform(m) if self.parent else m
return m
def get_window_matrix(self, x=0, y=0):
'''Calculate the transformation matrix to convert between window and
widget coordinates.
:Parameters:
`x`: float, defaults to 0
Translates the matrix on the x axis.
`y`: float, defaults to 0
Translates the matrix on the y axis.
'''
m = Matrix()
m.translate(x, y, 0)
m = self._apply_transform(m)
return m
x = NumericProperty(0)
'''X position of the widget.
:attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
y = NumericProperty(0)
'''Y position of the widget.
:attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
width = NumericProperty(100)
'''Width of the widget.
:attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults
to 100.
.. warning::
Keep in mind that the `width` property is subject to layout logic and
that this has not yet happened at the time of the widget's `__init__`
method.
'''
height = NumericProperty(100)
'''Height of the widget.
:attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults
to 100.
.. warning::
Keep in mind that the `height` property is subject to layout logic and
that this has not yet happened at the time of the widget's `__init__`
method.
'''
pos = ReferenceListProperty(x, y)
'''Position of the widget.
:attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`x`, :attr:`y`) properties.
'''
size = ReferenceListProperty(width, height)
'''Size of the widget.
:attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`width`, :attr:`height`) properties.
'''
def get_right(self):
return self.x + self.width
def set_right(self, value):
self.x = value - self.width
right = AliasProperty(get_right, set_right, bind=('x', 'width'))
'''Right position of the widget.
:attr:`right` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`x` + :attr:`width`).
'''
def get_top(self):
return self.y + self.height
def set_top(self, value):
self.y = value - self.height
top = AliasProperty(get_top, set_top, bind=('y', 'height'))
'''Top position of the widget.
:attr:`top` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`y` + :attr:`height`).
'''
def get_center_x(self):
return self.x + self.width / 2.
def set_center_x(self, value):
self.x = value - self.width / 2.
center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))
'''X center position of the widget.
:attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`x` + :attr:`width` / 2.).
'''
def get_center_y(self):
return self.y + self.height / 2.
def set_center_y(self, value):
self.y = value - self.height / 2.
center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))
'''Y center position of the widget.
:attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`y` + :attr:`height` / 2.).
'''
center = ReferenceListProperty(center_x, center_y)
'''Center position of the widget.
:attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`center_x`, :attr:`center_y`) properties.
'''
cls = ListProperty([])
'''Class of the widget, used for styling.
'''
id = StringProperty(None, allownone=True)
'''Unique identifier of the widget in the tree.
:attr:`id` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
.. warning::
If the :attr:`id` is already used in the tree, an exception will
be raised.
'''
children = ListProperty([])
'''List of children of this widget.
:attr:`children` is a :class:`~kivy.properties.ListProperty` and
defaults to an empty list.
Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the
children list. Don't manipulate the children list directly unless you know
what you are doing.
'''
parent = ObjectProperty(None, allownone=True)
'''Parent of this widget. The parent of a widget is set when the widget
is added to another widget and unset when the widget is removed from its
parent.
:attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
size_hint_x = NumericProperty(1, allownone=True)
'''X size hint. Represents how much space the widget should use in the
direction of the X axis relative to its parent's width.
Only the :class:`~kivy.uix.layout.Layout` and
:class:`~kivy.core.window.Window` classes make use of the hint.
The size_hint is used by layouts for two purposes:
- When the layout considers widgets on their own rather than in
relation to its other children, the size_hint_x is a direct proportion
of the parent width, normally between 0.0 and 1.0. For instance, a
widget with ``size_hint_x=0.5`` in
a vertical BoxLayout will take up half the BoxLayout's width, or
a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%
of the FloatLayout width. If the size_hint is greater than 1, the
widget will be wider than the parent.
- When multiple widgets can share a row of a layout, such as in a
horizontal BoxLayout, their widths will be their size_hint_x as a
fraction of the sum of widget size_hints. For instance, if the
size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a
width of 25% of the parent width.
:attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
'''
size_hint_y = NumericProperty(1, allownone=True)
'''Y size hint.
:attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
See :attr:`size_hint_x` for more information, but with widths and heights
swapped.
'''
size_hint = ReferenceListProperty(size_hint_x, size_hint_y)
'''Size hint.
:attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`size_hint_x`, :attr:`size_hint_y`) properties.
See :attr:`size_hint_x` for more information.
'''
pos_hint = ObjectProperty({})
'''Position hint. This property allows you to set the position of
the widget inside its parent layout, in percent (similar to
size_hint).
For example, if you want to set the top of the widget to be at 90%
height of its parent layout, you can write::
widget = Widget(pos_hint={'top': 0.9})
The keys 'x', 'right' and 'center_x' will use the parent width.
The keys 'y', 'top' and 'center_y' will use the parent height.
See :doc:`api-kivy.uix.floatlayout` for further reference.
.. note::
:attr:`pos_hint` is not used by all layouts. Check the documentation
of the layout in question to see if it supports pos_hint.
:attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`
containing a dict.
'''
ids = DictProperty({})
'''This is a dictionary of ids defined in your kv language. This will only
be populated if you use ids in your kv language code.
.. versionadded:: 1.7.0
:attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an
empty dict {}.
The :attr:`ids` are populated for each root level widget definition. For
example:
.. code-block:: kv
# in kv
<MyWidget@Widget>:
id: my_widget
Label:
id: label_widget
Widget:
id: inner_widget
Label:
id: inner_label
TextInput:
id: text_input
OtherWidget:
id: other_widget
<OtherWidget@Widget>
id: other_widget
Label:
id: other_label
TextInput:
id: other_textinput
Then, in python:
.. code-block:: python
>>> widget = MyWidget()
>>> print(widget.ids)
{'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,
'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,
'inner_label': <weakproxy at 04143540 to Label at 04138260>,
'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,
'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}
>>> print(widget.ids['other_widget'].ids)
{'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,
'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}
>>> print(widget.ids['label_widget'].ids)
{}
'''
opacity = NumericProperty(1.0)
'''Opacity of the widget and all its children.
.. versionadded:: 1.4.1
The opacity attribute controls the opacity of the widget and its children.
Be careful, it's a cumulative attribute: the value is multiplied by the
current global opacity and the result is applied to the current context
color.
For example, if the parent has an opacity of 0.5 and a child has an
opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.
Then, the opacity is applied by the shader as:
.. code-block:: python
frag_color = color * vec4(1.0, 1.0, 1.0, opacity);
:attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults
to 1.0.
'''
def on_opacity(self, instance, value):
canvas = self.canvas
if canvas is not None:
canvas.opacity = value
canvas = None
'''Canvas of the widget.
The canvas is a graphics object that contains all the drawing instructions
for the graphical representation of the widget.
There are no general properties for the Widget class, such as background
color, to keep the design simple and lean. Some derived classes, such as
Button, do add such convenience properties but generally the developer is
responsible for implementing the graphics representation for a custom
widget from the ground up. See the derived widget classes for patterns to
follow and extend.
See :class:`~kivy.graphics.Canvas` for more information about the usage.
'''
disabled = BooleanProperty(False)
'''Indicates whether this widget can interact with input or not.
.. note::
1. Child Widgets, when added to a disabled widget, will be disabled
automatically.
2. Disabling/enabling a parent disables/enables all
of its children.
.. versionadded:: 1.8.0
:attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import (RegexURLResolver, Resolver404, reverse,
RegexURLPattern)
from django.utils import six
from django.utils.importlib import import_module
from django.utils.translation import get_language
from cms.apphook_pool import apphook_pool
from cms.models.pagemodel import Page
from cms.utils.i18n import force_language, get_language_list
APP_RESOLVERS = []
def clear_app_resolvers():
global APP_RESOLVERS
APP_RESOLVERS = []
def applications_page_check(request, current_page=None, path=None):
"""Tries to find if given path was resolved over application.
Applications have higher priority than other cms pages.
"""
if current_page:
return current_page
if path is None:
# We should get in this branch only if an apphook is active on /
# This removes the non-CMS part of the URL.
path = request.path_info.replace(reverse('pages-root'), '', 1)
# check if application resolver can resolve this
for lang in get_language_list():
if path.startswith(lang + "/"):
path = path[len(lang + "/"):]
for resolver in APP_RESOLVERS:
try:
page_id = resolver.resolve_page_id(path)
# yes, it is application page
page = Page.objects.public().get(id=page_id)
# If current page was matched, then we have some override for
# content from cms, but keep current page. Otherwise return page
# to which was application assigned.
return page
except Resolver404:
# Raised if the page is not managed by an apphook
pass
return None
class AppRegexURLResolver(RegexURLResolver):
def __init__(self, *args, **kwargs):
self.page_id = None
self.url_patterns_dict = {}
super(AppRegexURLResolver, self).__init__(*args, **kwargs)
@property
def url_patterns(self):
language = get_language()
if language in self.url_patterns_dict:
return self.url_patterns_dict[language]
else:
return []
def resolve_page_id(self, path):
"""Resolves requested path similar way how resolve does, but instead
of return callback,.. returns page_id to which was application
assigned.
"""
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
if isinstance(pattern, AppRegexURLResolver):
try:
return pattern.resolve_page_id(new_path)
except Resolver404:
pass
else:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
tried_match = e.args[0].get('tried')
if tried_match is not None:
tried.extend([[pattern] + t for t in tried_match])
else:
tried.extend([pattern])
else:
if sub_match:
return pattern.page_id
tried.append(pattern.regex.pattern)
raise Resolver404({'tried': tried, 'path': new_path})
def recurse_patterns(path, pattern_list, page_id, default_args=None, nested=False):
"""
Recurse over a list of to-be-hooked patterns for a given path prefix
"""
newpatterns = []
for pattern in pattern_list:
app_pat = pattern.regex.pattern
# make sure we don't get patterns that start with more than one '^'!
app_pat = app_pat.lstrip('^')
path = path.lstrip('^')
regex = r'^%s%s' % (path, app_pat) if not nested else r'^%s' % (app_pat)
if isinstance(pattern, RegexURLResolver):
# this is an 'include', recurse!
resolver = RegexURLResolver(regex, 'cms_appresolver',
pattern.default_kwargs, pattern.app_name, pattern.namespace)
resolver.page_id = page_id
# include default_args
args = pattern.default_kwargs
if default_args:
args.update(default_args)
# see lines 243 and 236 of urlresolvers.py to understand the next line
resolver._urlconf_module = recurse_patterns(regex, pattern.url_patterns, page_id, args, nested=True)
else:
# Re-do the RegexURLPattern with the new regular expression
args = pattern.default_args
if default_args:
args.update(default_args)
resolver = RegexURLPattern(regex, pattern.callback,
args, pattern.name)
resolver.page_id = page_id
newpatterns.append(resolver)
return newpatterns
def _set_permissions(patterns, exclude_permissions):
for pattern in patterns:
if isinstance(pattern, RegexURLResolver):
if pattern.namespace in exclude_permissions:
continue
_set_permissions(pattern.url_patterns, exclude_permissions)
else:
from cms.utils.decorators import cms_perms
pattern._callback = cms_perms(pattern.callback)
def get_app_urls(urls):
for urlconf in urls:
if isinstance(urlconf, six.string_types):
mod = import_module(urlconf)
if not hasattr(mod, 'urlpatterns'):
raise ImproperlyConfigured(
"URLConf `%s` has no urlpatterns attribute" % urlconf)
yield getattr(mod, 'urlpatterns')
else:
yield urlconf
def get_patterns_for_title(path, title):
"""
Resolve the urlconf module for a path+title combination
Returns a list of url objects.
"""
app = apphook_pool.get_apphook(title.page.application_urls)
url_patterns = []
for pattern_list in get_app_urls(app.urls):
if path and not path.endswith('/'):
path += '/'
page_id = title.page.id
url_patterns += recurse_patterns(path, pattern_list, page_id)
return url_patterns
def get_app_patterns():
"""
Get a list of patterns for all hooked apps.
How this works:
By looking through all titles with an app hook (application_urls) we find all
urlconf modules we have to hook into titles.
If we use the ML URL Middleware, we namespace those patterns with the title
language.
All 'normal' patterns from the urlconf get re-written by prefixing them with
the title path and then included into the cms url patterns.
"""
from cms.models import Title
try:
current_site = Site.objects.get_current()
except Site.DoesNotExist:
current_site = None
included = []
# we don't have a request here so get_page_queryset() can't be used,
# so use public() queryset.
# This can be done because url patterns are used just in frontend
title_qs = Title.objects.public().filter(page__site=current_site)
hooked_applications = {}
# Loop over all titles with an application hooked to them
for title in title_qs.exclude(page__application_urls=None).exclude(page__application_urls='').select_related():
path = title.path
mix_id = "%s:%s:%s" % (path + "/", title.page.application_urls, title.language)
if mix_id in included:
# don't add the same thing twice
continue
if not settings.APPEND_SLASH:
path += '/'
if title.page_id not in hooked_applications:
hooked_applications[title.page_id] = {}
app = apphook_pool.get_apphook(title.page.application_urls)
app_ns = app.app_name, title.page.application_namespace
with force_language(title.language):
hooked_applications[title.page_id][title.language] = (app_ns, get_patterns_for_title(path, title), app)
included.append(mix_id)
# Build the app patterns to be included in the cms urlconfs
app_patterns = []
for page_id in hooked_applications.keys():
resolver = None
for lang in hooked_applications[page_id].keys():
(app_ns, inst_ns), current_patterns, app = hooked_applications[page_id][lang]
if not resolver:
resolver = AppRegexURLResolver(r'', 'app_resolver', app_name=app_ns, namespace=inst_ns)
resolver.page_id = page_id
if app.permissions:
_set_permissions(current_patterns, app.exclude_permissions)
resolver.url_patterns_dict[lang] = current_patterns
app_patterns.append(resolver)
APP_RESOLVERS.append(resolver)
return app_patterns
|
|
"""API harvester for DataOne - for the SHARE project
Example query: https://cn.dataone.org/cn/v1/query/solr/?q=dateModified:[NOW-5DAY%20TO%20*]&rows=10
"""
from __future__ import unicode_literals
import re
import logging
from datetime import timedelta, date
from lxml import etree
from dateutil.parser import parse
from xml.etree import ElementTree
from nameparser import HumanName
from scrapi import requests
from scrapi import settings
from scrapi.base import XMLHarvester
from scrapi.util import copy_to_unicode
from scrapi.linter.document import RawDocument
from scrapi.base.helpers import compose, single_result, build_properties, date_formatter
logger = logging.getLogger(__name__)
DEFAULT_ENCODING = 'UTF-8'
DATAONE_SOLR_ENDPOINT = 'https://cn.dataone.org/cn/v1/query/solr/'
def process_doi(service_id, doc_doi):
doi_re = '10\\.\\d{4}/\\w*\\.\\w*(/\\w*)?'
doi_list = map(lambda x: x.replace('doi', ''), doc_doi) if isinstance(doc_doi, list) else [doc_doi.replace('doi', '')]
for item in [service_id] + doi_list:
try:
return re.search(doi_re, item).group(0)
except AttributeError:
continue
return ''
def process_contributors(author, submitters, contributors,
investigators):
if not author:
author = ''
elif isinstance(author, list):
author = author[0]
if not isinstance(contributors, list):
contributors = [contributors]
if not isinstance(investigators, list):
investigators = [investigators]
unique_contributors = list(set([author] + contributors + investigators))
if len(unique_contributors) < 1:
return []
# this is the index of the author in the unique_contributors list
if author != '':
author_index = unique_contributors.index(author)
else:
author_index = None
# grabs the email if there is one, this should go with the author index
email = ''
for submitter in submitters:
if '@' in submitter:
email = submitter
contributor_list = []
for index, contributor in enumerate(unique_contributors):
if author_index is not None and index == author_index:
# if contributor == NAME and email != '':
# # TODO - maybe add this back in someday
# sometimes this yields really weird names like mjg4
# # TODO - names not always perfectly lined up with emails...
# contributor = name_from_email(email)
name = HumanName(contributor)
contributor_dict = {
'name': contributor,
'givenName': name.first,
'additionalName': name.middle,
'familyName': name.last,
}
if email:
contributor_dict['email'] = email
contributor_list.append(contributor_dict)
else:
name = HumanName(contributor)
contributor_list.append({
'name': contributor,
'givenName': name.first,
'additionalName': name.middle,
'familyName': name.last,
})
return contributor_list
class DataOneHarvester(XMLHarvester):
short_name = 'dataone'
long_name = 'DataONE: Data Observation Network for Earth'
url = 'https://www.dataone.org/'
namespaces = {}
record_encoding = None
schema = {
'otherProperties': build_properties(
('authorGivenName', ("str[@name='authorGivenName']/node()")),
('authorSurName', ("str[@name='authorSurName']/node()")),
('authoritativeMN', ("str[@name='authoritativeMN']/node()")),
('checksum', ("str[@name='checksum']/node()")),
('checksumAlgorithm', ("str[@name='checksumAlgorithm']/node()")),
('datasource', ("str[@name='datasource']/node()")),
('datePublished', ("date[@name='datePublished']/node()")),
('dateUploaded', ("date[@name='dateUploaded']/node()")),
('pubDate', ("date[@name='pubDate']/node()")),
('updateDate', ("date[@name='updateDate']/node()")),
('fileID', ("str[@name='fileID']/node()")),
('formatId', ("str[@name='formatId']/node()")),
('formatType', ("str[@name='formatType']/node()")),
('identifier', ("str[@name='identifier']/node()")),
('readPermission', "arr[@name='readPermission']/str/node()"),
('replicaMN', "arr[@name='replicaMN']/str/node()"),
('replicaVerifiedDate', "arr[@name='replicaVerifiedDate']/date/node()"),
('replicationAllowed', ("bool[@name='replicationAllowed']/node()")),
('numberReplicas', ("int[@name='numberReplicas']/node()")),
('preferredReplicationMN', "arr[@name='preferredReplicationMN']/str/node()"),
('rightsHolder', ("str[@name='rightsHolder']/node()")),
('scientificName', "arr[@name='scientificName']/str/node()"),
('site', "arr[@name='site']/str/node()"),
('size', ("long[@name='size']/node()")),
('isDocumentedBy', "arr[@name='isDocumentedBy']/str/node()"),
('serviceID', "str[@name='id']/node()"),
('sku', "str[@name='sku']/node()")
),
'freeToRead': {
'startDate': ("bool[@name='isPublic']/node()", "date[@name='dateModified']/node()", lambda x, y: parse(y[0]).date().isoformat() if x else None)
},
'contributors': ("str[@name='author']/node()", "str[@name='submitter']/node()", "arr[@name='origin']/str/node()", "arr[@name='investigator']/str/node()", process_contributors),
'uris': {
'canonicalUri': ("str[@name='id']/node()", "//str[@name='dataUrl']/node()", lambda x, y: y[0] if 'http' in single_result(y) else x[0] if 'http' in single_result(x) else ''),
'objectUri': ("arr[@name='resourceMap']/str/node()", compose(lambda x: x.replace('doi:', 'http://dx.doi.org/'), single_result))
},
'tags': ("//arr[@name='keywords']/str/node()", lambda x: x if isinstance(x, list) else [x]),
'providerUpdatedDateTime': ("str[@name='dateModified']/node()", compose(date_formatter, single_result)),
'title': ("str[@name='title']/node()", single_result),
'description': ("str[@name='abstract']/node()", single_result)
}
def harvest(self, start_date=None, end_date=None):
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
records = self.get_records(start_date, end_date)
xml_list = []
for record in records:
doc_id = record.xpath("str[@name='id']")[0].text
record = ElementTree.tostring(record, encoding=self.record_encoding)
xml_list.append(RawDocument({
'doc': record,
'source': self.short_name,
'docID': copy_to_unicode(doc_id),
'filetype': 'xml'
}))
return xml_list
def get_records(self, start_date, end_date):
''' helper function to get a response from the DataONE
API, with the specified number of rows.
Returns an etree element with results '''
query = 'dateModified:[{}T00:00:00Z TO {}T00:00:00Z]'.format(start_date.isoformat(), end_date.isoformat())
doc = requests.get(DATAONE_SOLR_ENDPOINT, params={
'q': query,
'start': 0,
'rows': 1
})
doc = etree.XML(doc.content)
rows = int(doc.xpath("//result/@numFound")[0])
n = 0
while n < rows:
data = requests.get(DATAONE_SOLR_ENDPOINT, params={
'q': query,
'start': n,
'rows': 1000
})
docs = etree.XML(data.content).xpath('//doc')
for doc in docs:
yield doc
n += 1000
|
|
#!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
import tempfile
import urllib2
from common_includes import *
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
fetchspecs = [
"+refs/heads/*:refs/heads/*",
"+refs/pending/*:refs/pending/*",
"+refs/pending-tags/*:refs/pending-tags/*",
]
self.Git("fetch origin %s" % " ".join(fetchspecs))
self.GitCheckout("origin/master")
self.DeleteBranch("work-branch")
class PrepareBranchRevision(Step):
MESSAGE = "Check from which revision to branch off."
def RunStep(self):
self["push_hash"] = (self._options.revision or
self.GitLog(n=1, format="%H", branch="origin/master"))
assert self["push_hash"]
print "Release revision %s" % self["push_hash"]
class IncrementVersion(Step):
MESSAGE = "Increment version number."
def RunStep(self):
latest_version = self.GetLatestVersion()
# The version file on master can be used to bump up major/minor at
# branch time.
self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
self.ReadAndPersistVersion("master_")
master_version = self.ArrayToVersion("master_")
# Use the highest version from master or from tags to determine the new
# version.
authoritative_version = sorted(
[master_version, latest_version], key=SortingKey)[1]
self.StoreVersion(authoritative_version, "authoritative_")
# Variables prefixed with 'new_' contain the new version numbers for the
# ongoing candidates push.
self["new_major"] = self["authoritative_major"]
self["new_minor"] = self["authoritative_minor"]
self["new_build"] = str(int(self["authoritative_build"]) + 1)
# Make sure patch level is 0 in a new push.
self["new_patch"] = "0"
# The new version is not a candidate.
self["new_candidate"] = "0"
self["version"] = "%s.%s.%s" % (self["new_major"],
self["new_minor"],
self["new_build"])
print ("Incremented version to %s" % self["version"])
class DetectLastRelease(Step):
MESSAGE = "Detect commit ID of last release base."
def RunStep(self):
self["last_push_master"] = self.GetLatestReleaseBase()
class PrepareChangeLog(Step):
MESSAGE = "Prepare raw ChangeLog entry."
def Reload(self, body):
"""Attempts to reload the commit message from rietveld in order to allow
late changes to the LOG flag. Note: This is brittle to future changes of
the web page name or structure.
"""
match = re.search(r"^Review URL: https://codereview\.chromium\.org/(\d+)$",
body, flags=re.M)
if match:
cl_url = ("https://codereview.chromium.org/%s/description"
% match.group(1))
try:
# Fetch from Rietveld but only retry once with one second delay since
# there might be many revisions.
body = self.ReadURL(cl_url, wait_plan=[1])
except urllib2.URLError: # pragma: no cover
pass
return body
def RunStep(self):
self["date"] = self.GetDate()
output = "%s: Version %s\n\n" % (self["date"], self["version"])
TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
commits = self.GitLog(format="%H",
git_hash="%s..%s" % (self["last_push_master"],
self["push_hash"]))
# Cache raw commit messages.
commit_messages = [
[
self.GitLog(n=1, format="%s", git_hash=commit),
self.Reload(self.GitLog(n=1, format="%B", git_hash=commit)),
self.GitLog(n=1, format="%an", git_hash=commit),
] for commit in commits.splitlines()
]
# Auto-format commit messages.
body = MakeChangeLogBody(commit_messages, auto_format=True)
AppendToFile(body, self.Config("CHANGELOG_ENTRY_FILE"))
msg = (" Performance and stability improvements on all platforms."
"\n#\n# The change log above is auto-generated. Please review if "
"all relevant\n# commit messages from the list below are included."
"\n# All lines starting with # will be stripped.\n#\n")
AppendToFile(msg, self.Config("CHANGELOG_ENTRY_FILE"))
# Include unformatted commit messages as a reference in a comment.
comment_body = MakeComment(MakeChangeLogBody(commit_messages))
AppendToFile(comment_body, self.Config("CHANGELOG_ENTRY_FILE"))
class EditChangeLog(Step):
MESSAGE = "Edit ChangeLog entry."
def RunStep(self):
print ("Please press <Return> to have your EDITOR open the ChangeLog "
"entry, then edit its contents to your liking. When you're done, "
"save the file and exit your EDITOR. ")
self.ReadLine(default="")
self.Editor(self.Config("CHANGELOG_ENTRY_FILE"))
# Strip comments and reformat with correct indentation.
changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")).rstrip()
changelog_entry = StripComments(changelog_entry)
changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
changelog_entry = changelog_entry.lstrip()
if changelog_entry == "": # pragma: no cover
self.Die("Empty ChangeLog entry.")
# Safe new change log for adding it later to the candidates patch.
TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
class MakeBranch(Step):
MESSAGE = "Create the branch."
def RunStep(self):
self.Git("reset --hard origin/master")
self.Git("checkout -b work-branch %s" % self["push_hash"])
self.GitCheckoutFile(CHANGELOG_FILE, self["latest_version"])
self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
self.GitCheckoutFile(WATCHLISTS_FILE, self["latest_version"])
class AddChangeLog(Step):
MESSAGE = "Add ChangeLog changes to release branch."
def RunStep(self):
changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE))
new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
TextToFile(new_change_log, os.path.join(self.default_cwd, CHANGELOG_FILE))
class SetVersion(Step):
MESSAGE = "Set correct version for candidates."
def RunStep(self):
self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
class EnableMergeWatchlist(Step):
MESSAGE = "Enable watchlist entry for merge notifications."
def RunStep(self):
old_watchlist_content = FileToText(os.path.join(self.default_cwd,
WATCHLISTS_FILE))
new_watchlist_content = re.sub("(# 'v8-merges@googlegroups\.com',)",
"'[email protected]',",
old_watchlist_content)
TextToFile(new_watchlist_content, os.path.join(self.default_cwd,
WATCHLISTS_FILE))
class CommitBranch(Step):
MESSAGE = "Commit version and changelog to new branch."
def RunStep(self):
# Convert the ChangeLog entry to commit message format.
text = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
# Remove date and trailing white space.
text = re.sub(r"^%s: " % self["date"], "", text.rstrip())
# Remove indentation and merge paragraphs into single long lines, keeping
# empty lines between them.
def SplitMapJoin(split_text, fun, join_text):
return lambda text: join_text.join(map(fun, text.split(split_text)))
text = SplitMapJoin(
"\n\n", SplitMapJoin("\n", str.strip, " "), "\n\n")(text)
if not text: # pragma: no cover
self.Die("Commit message editing failed.")
self["commit_title"] = text.splitlines()[0]
TextToFile(text, self.Config("COMMITMSG_FILE"))
self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
os.remove(self.Config("COMMITMSG_FILE"))
os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
class PushBranch(Step):
MESSAGE = "Push changes."
def RunStep(self):
pushspecs = [
"refs/heads/work-branch:refs/pending/heads/%s" % self["version"],
"%s:refs/pending-tags/heads/%s" % (self["push_hash"], self["version"]),
"%s:refs/heads/%s" % (self["push_hash"], self["version"]),
]
cmd = "push origin %s" % " ".join(pushspecs)
if self._options.dry_run:
print "Dry run. Command:\ngit %s" % cmd
else:
self.Git(cmd)
class TagRevision(Step):
MESSAGE = "Tag the new revision."
def RunStep(self):
if self._options.dry_run:
print ("Dry run. Tagging \"%s\" with %s" %
(self["commit_title"], self["version"]))
else:
self.vc.Tag(self["version"],
"origin/%s" % self["version"],
self["commit_title"])
class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
print("Congratulations, you have successfully created version %s."
% self["version"])
self.GitCheckout("origin/master")
self.DeleteBranch("work-branch")
self.Git("gc")
class CreateRelease(ScriptsBase):
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group()
group.add_argument("-f", "--force",
help="Don't prompt the user.",
default=True, action="store_true")
group.add_argument("-m", "--manual",
help="Prompt the user at every important step.",
default=False, action="store_true")
parser.add_argument("-R", "--revision",
help="The git commit ID to push (defaults to HEAD).")
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
print "Reviewer (-r) and author (-a) are required."
return False
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/create-releases-tempfile",
"CHANGELOG_ENTRY_FILE":
"/tmp/v8-create-releases-tempfile-changelog-entry",
"COMMITMSG_FILE": "/tmp/v8-create-releases-tempfile-commitmsg",
}
def _Steps(self):
return [
Preparation,
PrepareBranchRevision,
IncrementVersion,
DetectLastRelease,
PrepareChangeLog,
EditChangeLog,
MakeBranch,
AddChangeLog,
SetVersion,
EnableMergeWatchlist,
CommitBranch,
PushBranch,
TagRevision,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(CreateRelease().Run())
|
|
"""
Test the API of the symtable module.
"""
import symtable
import unittest
TEST_CODE = """
import sys
glob = 42
some_var = 12
class Mine:
instance_var = 24
def a_method(p1, p2):
pass
def spam(a, b, *var, **kw):
global bar
bar = 47
some_var = 10
x = 23
glob
def internal():
return x
def other_internal():
nonlocal some_var
some_var = 3
return some_var
return internal
def foo():
pass
def namespace_test(): pass
def namespace_test(): pass
"""
def find_block(block, name):
for ch in block.get_children():
if ch.get_name() == name:
return ch
class SymtableTest(unittest.TestCase):
top = symtable.symtable(TEST_CODE, "?", "exec")
# These correspond to scopes in TEST_CODE
Mine = find_block(top, "Mine")
a_method = find_block(Mine, "a_method")
spam = find_block(top, "spam")
internal = find_block(spam, "internal")
other_internal = find_block(spam, "other_internal")
foo = find_block(top, "foo")
def test_type(self):
self.assertEqual(self.top.get_type(), "module")
self.assertEqual(self.Mine.get_type(), "class")
self.assertEqual(self.a_method.get_type(), "function")
self.assertEqual(self.spam.get_type(), "function")
self.assertEqual(self.internal.get_type(), "function")
def test_optimized(self):
self.assertFalse(self.top.is_optimized())
self.assertFalse(self.top.has_exec())
self.assertTrue(self.spam.is_optimized())
def test_nested(self):
self.assertFalse(self.top.is_nested())
self.assertFalse(self.Mine.is_nested())
self.assertFalse(self.spam.is_nested())
self.assertTrue(self.internal.is_nested())
def test_children(self):
self.assertTrue(self.top.has_children())
self.assertTrue(self.Mine.has_children())
self.assertFalse(self.foo.has_children())
def test_lineno(self):
self.assertEqual(self.top.get_lineno(), 0)
self.assertEqual(self.spam.get_lineno(), 12)
def test_function_info(self):
func = self.spam
self.assertEqual(sorted(func.get_parameters()), ["a", "b", "kw", "var"])
expected = ['a', 'b', 'internal', 'kw', 'other_internal', 'some_var', 'var', 'x']
self.assertEqual(sorted(func.get_locals()), expected)
self.assertEqual(sorted(func.get_globals()), ["bar", "glob"])
self.assertEqual(self.internal.get_frees(), ("x",))
def test_globals(self):
self.assertTrue(self.spam.lookup("glob").is_global())
self.assertFalse(self.spam.lookup("glob").is_declared_global())
self.assertTrue(self.spam.lookup("bar").is_global())
self.assertTrue(self.spam.lookup("bar").is_declared_global())
self.assertFalse(self.internal.lookup("x").is_global())
self.assertFalse(self.Mine.lookup("instance_var").is_global())
def test_nonlocal(self):
self.assertFalse(self.spam.lookup("some_var").is_nonlocal())
self.assertTrue(self.other_internal.lookup("some_var").is_nonlocal())
expected = ("some_var",)
self.assertEqual(self.other_internal.get_nonlocals(), expected)
def test_local(self):
self.assertTrue(self.spam.lookup("x").is_local())
self.assertFalse(self.internal.lookup("x").is_local())
def test_referenced(self):
self.assertTrue(self.internal.lookup("x").is_referenced())
self.assertTrue(self.spam.lookup("internal").is_referenced())
self.assertFalse(self.spam.lookup("x").is_referenced())
def test_parameters(self):
for sym in ("a", "var", "kw"):
self.assertTrue(self.spam.lookup(sym).is_parameter())
self.assertFalse(self.spam.lookup("x").is_parameter())
def test_symbol_lookup(self):
self.assertEqual(len(self.top.get_identifiers()),
len(self.top.get_symbols()))
self.assertRaises(KeyError, self.top.lookup, "not_here")
def test_namespaces(self):
self.assertTrue(self.top.lookup("Mine").is_namespace())
self.assertTrue(self.Mine.lookup("a_method").is_namespace())
self.assertTrue(self.top.lookup("spam").is_namespace())
self.assertTrue(self.spam.lookup("internal").is_namespace())
self.assertTrue(self.top.lookup("namespace_test").is_namespace())
self.assertFalse(self.spam.lookup("x").is_namespace())
self.assertTrue(self.top.lookup("spam").get_namespace() is self.spam)
ns_test = self.top.lookup("namespace_test")
self.assertEqual(len(ns_test.get_namespaces()), 2)
self.assertRaises(ValueError, ns_test.get_namespace)
def test_assigned(self):
self.assertTrue(self.spam.lookup("x").is_assigned())
self.assertTrue(self.spam.lookup("bar").is_assigned())
self.assertTrue(self.top.lookup("spam").is_assigned())
self.assertTrue(self.Mine.lookup("a_method").is_assigned())
self.assertFalse(self.internal.lookup("x").is_assigned())
def test_annotated(self):
st1 = symtable.symtable('def f():\n x: int\n', 'test', 'exec')
st2 = st1.get_children()[0]
self.assertTrue(st2.lookup('x').is_local())
self.assertTrue(st2.lookup('x').is_annotated())
self.assertFalse(st2.lookup('x').is_global())
st3 = symtable.symtable('def f():\n x = 1\n', 'test', 'exec')
st4 = st3.get_children()[0]
self.assertTrue(st4.lookup('x').is_local())
self.assertFalse(st4.lookup('x').is_annotated())
# Test that annotations in the global scope are valid after the
# variable is declared as nonlocal.
st5 = symtable.symtable('global x\nx: int', 'test', 'exec')
self.assertTrue(st5.lookup("x").is_global())
# Test that annotations for nonlocals are valid after the
# variable is declared as nonlocal.
st6 = symtable.symtable('def g():\n'
' x = 2\n'
' def f():\n'
' nonlocal x\n'
' x: int',
'test', 'exec')
def test_imported(self):
self.assertTrue(self.top.lookup("sys").is_imported())
def test_name(self):
self.assertEqual(self.top.get_name(), "top")
self.assertEqual(self.spam.get_name(), "spam")
self.assertEqual(self.spam.lookup("x").get_name(), "x")
self.assertEqual(self.Mine.get_name(), "Mine")
def test_class_info(self):
self.assertEqual(self.Mine.get_methods(), ('a_method',))
def test_filename_correct(self):
### Bug tickler: SyntaxError file name correct whether error raised
### while parsing or building symbol table.
def checkfilename(brokencode, offset):
try:
symtable.symtable(brokencode, "spam", "exec")
except SyntaxError as e:
self.assertEqual(e.filename, "spam")
self.assertEqual(e.lineno, 1)
self.assertEqual(e.offset, offset)
else:
self.fail("no SyntaxError for %r" % (brokencode,))
checkfilename("def f(x): foo)(", 14) # parse-time
checkfilename("def f(x): global x", 11) # symtable-build-time
symtable.symtable("pass", b"spam", "exec")
with self.assertWarns(DeprecationWarning), \
self.assertRaises(TypeError):
symtable.symtable("pass", bytearray(b"spam"), "exec")
with self.assertWarns(DeprecationWarning):
symtable.symtable("pass", memoryview(b"spam"), "exec")
with self.assertRaises(TypeError):
symtable.symtable("pass", list(b"spam"), "exec")
def test_eval(self):
symbols = symtable.symtable("42", "?", "eval")
def test_single(self):
symbols = symtable.symtable("42", "?", "single")
def test_exec(self):
symbols = symtable.symtable("def f(x): return x", "?", "exec")
def test_bytes(self):
top = symtable.symtable(TEST_CODE.encode('utf8'), "?", "exec")
self.assertIsNotNone(find_block(top, "Mine"))
code = b'# -*- coding: iso8859-15 -*-\nclass \xb4: pass\n'
top = symtable.symtable(code, "?", "exec")
self.assertIsNotNone(find_block(top, "\u017d"))
if __name__ == '__main__':
unittest.main()
|
|
from sitemessage.models import Message, Dispatch, Subscription, DispatchError
from sitemessage.toolbox import recipients
from sitemessage.utils import Recipient
from .testapp.sitemessages import MessageForTest
class TestSubscriptionModel:
def test_create(self, user):
s = Subscription.create('abc', 'message', 'messenger')
assert s.time_created is not None
assert s.recipient is None
assert s.address == 'abc'
assert s.message_cls == 'message'
assert s.messenger_cls == 'messenger'
s = Subscription.create(user, 'message', 'messenger')
assert s.time_created is not None
assert s.address is None
assert s.recipient_id == user.id
assert s.message_cls == 'message'
assert s.messenger_cls == 'messenger'
def test_cancel(self, user):
Subscription.create('abc', 'message', 'messenger')
Subscription.create('abc', 'message1', 'messenger')
Subscription.create('abc', 'message', 'messenger1')
assert Subscription.objects.filter(address='abc').count() == 3
Subscription.cancel('abc', 'message', 'messenger')
assert Subscription.objects.filter(address='abc').count() == 2
Subscription.create(user, 'message', 'messenger')
assert Subscription.objects.filter(recipient=user).count() == 1
Subscription.cancel(user, 'message', 'messenger')
assert Subscription.objects.filter(recipient=user).count() == 0
def test_replace_for_user(self, user):
new_prefs = [('message3', 'messenger3')]
assert Subscription.replace_for_user(user, new_prefs)
Subscription.create(user, 'message', 'messenger')
Subscription.create(user, 'message2', 'messenger2')
assert Subscription.get_for_user(user).count() == 3
Subscription.replace_for_user(user, new_prefs)
s = Subscription.get_for_user(user)
assert s.count() == 1
s = s[0]
assert s.message_cls == 'message3'
assert s.messenger_cls == 'messenger3'
def test_get_for_user(self, user):
r = Subscription.get_for_user(user)
assert list(r) == []
assert Subscription.get_for_user(user).count() == 0
Subscription.create(user, 'message', 'messenger')
assert Subscription.get_for_user(user).count() == 1
def test_get_for_message_cls(self):
assert Subscription.get_for_message_cls('mymsg').count() == 0
Subscription.create('aaa', 'mymsg', 'messenger')
Subscription.create('bbb', 'mymsg', 'messenger2')
assert Subscription.get_for_message_cls('mymsg').count() == 2
def test_str(self):
s = Subscription()
s.address = 'aaa'
assert 'aaa' in str(s)
class TestDispatchErrorModel:
def test_str(self):
e = DispatchError()
e.dispatch_id = 444
assert '444' in str(e)
class TestDispatchModel:
def test_create(self, user):
message = Message(cls='test_message')
message.save()
recipients_ = recipients('test_messenger', [user])
recipients_ += recipients('buggy', 'idle')
dispatches = Dispatch.create(message, recipients_)
assert isinstance(dispatches[0], Dispatch)
assert isinstance(dispatches[1], Dispatch)
assert dispatches[0].message_id == message.id
assert dispatches[1].message_id == message.id
assert dispatches[0].messenger == 'test_messenger'
assert dispatches[1].messenger == 'buggy'
dispatches = Dispatch.create(message, Recipient('msgr', None, 'address'))
assert len(dispatches) == 1
def test_log_dispatches_errors(self):
assert DispatchError.objects.count() == 0
m = Message()
m.save()
d1 = Dispatch(message_id=m.id)
d1.save()
d1.error_log = 'some_text'
Dispatch.log_dispatches_errors([d1])
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'some_text'
def test_get_unread(self):
m = Message()
m.save()
d1 = Dispatch(message_id=m.id)
d1.save()
d2 = Dispatch(message_id=m.id)
d2.save()
assert Dispatch.get_unread().count() == 2
d2.read_status = Dispatch.READ_STATUS_READ
d2.save()
assert Dispatch.get_unread().count() == 1
def test_set_dispatches_statuses(self):
m = Message()
m.save()
d = Dispatch(message_id=m.id)
d.save()
Dispatch.set_dispatches_statuses(**{'sent': [d]})
d_ = Dispatch.objects.get(pk=d.id)
assert d_.dispatch_status == Dispatch.DISPATCH_STATUS_SENT
assert d_.retry_count == 1
Dispatch.set_dispatches_statuses(**{'error': [d]})
d_ = Dispatch.objects.get(pk=d.id)
assert d_.dispatch_status == Dispatch.DISPATCH_STATUS_ERROR
assert d_.retry_count == 2
def test_str(self):
d = Dispatch()
d.address = 'tttt'
assert 'tttt' in str(d)
def test_mark_read(self):
d = Dispatch()
assert d.read_status == d.READ_STATUS_UNREAD
d.mark_read()
assert d.read_status == d.READ_STATUS_READ
class TestMessageModel:
def test_create(self, user):
m, _ = Message.create('some', {'abc': 'abc'}, sender=user, priority=22)
assert m.cls == 'some'
assert m.context == {'abc': 'abc'}
assert m.sender == user
assert m.priority == 22
assert not m.dispatches_ready
ctx = {'a': 'a', 'b': 'b'}
m, _ = Message.create('some2', ctx)
assert m.cls == 'some2'
assert m.context == ctx
assert m.sender is None
assert not m.dispatches_ready
def test_deserialize_context(self):
m = Message(cls='some_cls', context={'a': 'a', 'b': 'b', 'c': 'c'})
m.save()
m2 = Message.objects.get(pk=m.pk)
assert m2.context == {'a': 'a', 'b': 'b', 'c': 'c'}
def test_get_type(self):
m = Message(cls='test_message')
assert m.get_type() is MessageForTest
def test_str(self):
m = Message()
m.cls = 'aaa'
assert str(m) == 'aaa'
|
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
indexed_matrix = total_matrix ## keep a copy for index of genomic coordinates
total_matrix = total_matrix.drop("index", axis=1)
drop_columns = total_matrix ## keep copy in order to create 0/1/? matrix such that each character is a column
len(drop_columns.columns)
len(total_matrix.columns)
cell_samples = ['RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
total_matrix.columns = cell_samples
print(total_matrix.shape)
## >>> print(total_matrix.shape)
## (6336559, 104)
drop_columns = drop_columns.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
drop_columns = drop_columns.astype(str).apply(''.join)
drop_columns = drop_columns.reset_index()
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests")
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott_drop_columns = pd.Series(drop_columns.index.astype(str).str.cat(total_matrix.astype(str),' ')) ## [104 rows x 6336566 columns]
print(tott.shape)
print(tott_drop_columns.shape)
df_tott_column_position = tott_drop_columns.apply(lambda x: pd.Series(list(x))) ## [104 rows x 6336566 columns]
## extra NaN's here
df_tott_column_position_T = df_tott_column_position.T ## create transpose, and shift on columns [I don't think there is a pandas-efficient way to shift row elements left/right systematically]
for i in range(10): ## 0 to 9
df_tott_column_position_T[i]= df_tott_column_position_T[i].shift(2)
for i in range(90):
j = i + 10 ## 10 to 99
df_tott_column_position_T[j]= df_tott_column_position_T[j].shift(1)
df_tott_column_position = df_tott_column_position_T.T
df_tott_column_position.drop( df_tott_column_position.columns[[i for i in range(7)]], axis=1, inplace=True) ## drop first 6 columns
### rename columns
indexed_matrixT = indexed_matrix.T
df_tott_column_position.columns = indexed_matrixT.ix[0]
integers_to_sort = df_tott_column_position.columns.to_series().str.extract("([a-z-A-Z]+)(\d*)_(\d+)", expand=True) # use str.extract to get integers to sort
integers_to_sort[1] = integers_to_sort[1].str.zfill(2)
integers_to_sort[2] = integers_to_sort[2].str.zfill(10)
integers_to_sort["new_coordinates"] = integers_to_sort.apply(lambda x: "{}{}_{}".format(x[0],x[1],x[2]), axis=1)
df_tott_column_position.columns = integers_to_sort["new_coordinates"]
df_tott_column_position.columns.name = None
df_tott_column_position = df_tott_column_position.sort_index(axis=1)
|
|
import os.path
import re
import tornado.web
import tornado.websocket
import logging
import json
from .. import version, filt
class APIError(tornado.web.HTTPError):
pass
class RequestHandler(tornado.web.RequestHandler):
def set_default_headers(self):
super(RequestHandler, self).set_default_headers()
self.set_header("Server", version.NAMEVERSION)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws://* ; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type").startswith("application/json"):
return None
return json.loads(self.request.body)
@property
def state(self):
return self.application.master.state
@property
def master(self):
return self.application.master
@property
def flow(self):
flow_id = str(self.path_kwargs["flow_id"])
flow = self.state.flows.get(flow_id)
if flow:
return flow
else:
raise APIError(400, "Flow not found.")
def write_error(self, status_code, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super(RequestHandler, self).write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
_ = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
self.render("index.html")
class FiltHelp(RequestHandler):
def get(self):
self.write(dict(
commands=filt.help
))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections = None
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False)
for conn in cls.connections:
try:
conn.write_message(message)
except:
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections = set()
class Flows(RequestHandler):
def get(self):
self.write(dict(
data=[f.get_state(short=True) for f in self.state.flows]
))
class ClearAll(RequestHandler):
def post(self):
self.state.clear()
class AcceptFlows(RequestHandler):
def post(self):
self.state.flows.accept_all(self.master)
class AcceptFlow(RequestHandler):
def post(self, flow_id):
self.flow.accept_intercept(self.master)
class FlowHandler(RequestHandler):
def delete(self, flow_id):
self.flow.kill(self.master)
self.state.delete_flow(self.flow)
def put(self, flow_id):
flow = self.flow
flow.backup()
for a, b in self.json.iteritems():
if a == "request":
request = flow.request
for k, v in b.iteritems():
if k in ["method", "scheme", "host", "path", "httpversion"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.load_state(v)
else:
print "Warning: Unknown update {}.{}: {}".format(a, k, v)
elif a == "response":
response = flow.response
for k, v in b.iteritems():
if k == "msg":
response.msg = str(v)
elif k == "code":
response.code = int(v)
elif k == "httpversion":
response.httpversion = str(v)
elif k == "headers":
response.headers.load_state(v)
else:
print "Warning: Unknown update {}.{}: {}".format(a, k, v)
else:
print "Warning: Unknown update {}: {}".format(a, b)
self.state.update_flow(flow)
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
self.master.duplicate_flow(self.flow)
class RevertFlow(RequestHandler):
def post(self, flow_id):
self.state.revert(self.flow)
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.flow.backup()
self.flow.response = None
self.state.update_flow(self.flow)
r = self.master.replay_request(self.flow)
if r:
raise APIError(400, r)
class FlowContent(RequestHandler):
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search("filename=([\w\" \.\-\(\)]+)", original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r"[^\w\" \.\-\(\)]", "", filename)
cd = "attachment; filename={}".format(filename)
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.content)
class Events(RequestHandler):
def get(self):
self.write(dict(
data=list(self.state.events)
))
class Settings(RequestHandler):
def get(self):
self.write(dict(
data=dict(
version=version.VERSION,
mode=str(self.master.server.config.mode),
intercept=self.state.intercept_txt
)
))
def put(self):
update = {}
for k, v in self.json.iteritems():
if k == "intercept":
self.state.set_intercept(v)
update[k] = v
else:
print("Warning: Unknown setting {}: {}".format(k, v))
ClientConnection.broadcast(
type="settings",
cmd="update",
data=update
)
class Application(tornado.web.Application):
def __init__(self, master, debug):
self.master = master
handlers = [
(r"/", IndexHandler),
(r"/filter-help", FiltHelp),
(r"/updates", ClientConnection),
(r"/events", Events),
(r"/flows", Flows),
(r"/flows/accept", AcceptFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/accept", AcceptFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content", FlowContent),
(r"/settings", Settings),
(r"/clear", ClearAll),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
)
super(Application, self).__init__(handlers, **settings)
|
|
from setup.linux.installer import Installer
from benchmark import framework_test
import os
import json
import subprocess
import time
import textwrap
import pprint
import csv
import sys
import logging
import socket
from multiprocessing import Process
from datetime import datetime
class Benchmarker:
##########################################################################################
# Public methods
##########################################################################################
############################################################
# Prints all the available tests
############################################################
def run_list_tests(self):
all_tests = self.__gather_tests
for test in all_tests:
print test.name
self.__finish()
############################################################
# End run_list_tests
############################################################
############################################################
# Prints the metadata for all the available tests
############################################################
def run_list_test_metadata(self):
all_tests = self.__gather_tests
all_tests_json = json.dumps(map(lambda test: {
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus
}, all_tests))
with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
f.write(all_tests_json)
self.__finish()
############################################################
# End run_list_test_metadata
############################################################
############################################################
# parse_timestamp
# Re-parses the raw data for a given timestamp
############################################################
def parse_timestamp(self):
all_tests = self.__gather_tests
for test in all_tests:
test.parse_all()
self.__parse_results(all_tests)
self.__finish()
############################################################
# End parse_timestamp
############################################################
############################################################
# Run the tests:
# This process involves setting up the client/server machines
# with any necessary change. Then going through each test,
# running their setup script, verifying the URLs, and
# running benchmarks against them.
############################################################
def run(self):
##########################
# Get a list of all known
# tests that we can run.
##########################
all_tests = self.__gather_tests
##########################
# Setup client/server
##########################
print textwrap.dedent("""
=====================================================
Preparing Server, Database, and Client ...
=====================================================
""")
self.__setup_server()
self.__setup_database()
self.__setup_client()
## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
#if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
# raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
##########################
# Run tests
##########################
self.__run_tests(all_tests)
##########################
# Parse results
##########################
if self.mode == "benchmark":
print textwrap.dedent("""
=====================================================
Parsing Results ...
=====================================================
""")
self.__parse_results(all_tests)
self.__finish()
############################################################
# End run
############################################################
############################################################
# database_sftp_string(batch_file)
# generates a fully qualified URL for sftp to database
############################################################
def database_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.database_identity_file != None:
sftp_string += " -i " + self.database_identity_file + " "
return sftp_string + self.database_user + "@" + self.database_host
############################################################
# End database_sftp_string
############################################################
############################################################
# client_sftp_string(batch_file)
# generates a fully qualified URL for sftp to client
############################################################
def client_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.client_identity_file != None:
sftp_string += " -i " + self.client_identity_file + " "
return sftp_string + self.client_user + "@" + self.client_host
############################################################
# End client_sftp_string
############################################################
############################################################
# generate_url(url, port)
# generates a fully qualified URL for accessing a test url
############################################################
def generate_url(self, url, port):
return self.server_host + ":" + str(port) + url
############################################################
# End generate_url
############################################################
############################################################
# output_file(test_name, test_type)
# returns the output file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def output_file(self, test_name, test_type):
path = os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# full_results_directory
############################################################
def full_results_directory(self):
path = os.path.join(self.result_directory, self.timestamp)
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# Latest intermediate results dirctory
############################################################
def latest_results_directory(self):
path = os.path.join(self.result_directory,"latest")
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# report_results
############################################################
def report_results(self, framework, test, results):
if test not in self.results['rawData'].keys():
self.results['rawData'][test] = dict()
self.results['rawData'][test][framework.name] = results
############################################################
# End report_results
############################################################
##########################################################################################
# Private methods
##########################################################################################
############################################################
# Gathers all the tests
############################################################
@property
def __gather_tests(self):
tests = []
# Loop through each directory (we assume we're being run from the benchmarking root)
# and look for the files that signify a benchmark test
for dirname, dirnames, filenames in os.walk('.'):
# Look for the benchmark_config file, this will set up our tests.
# Its format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
if 'benchmark_config' in filenames:
config = None
config_file_name = os.path.join(dirname, 'benchmark_config')
with open(config_file_name, 'r') as config_file:
# Load json file into config object
try:
config = json.load(config_file)
except:
print("Error loading '%s'." % config_file_name)
raise
if config == None:
continue
tests = tests + framework_test.parse_config(config, dirname[2:], self)
tests.sort(key=lambda x: x.name)
return tests
############################################################
# End __gather_tests
############################################################
############################################################
# Gathers all the frameworks
############################################################
def __gather_frameworks(self):
frameworks = []
# Loop through each directory (we assume we're being run from the benchmarking root)
for dirname, dirnames, filenames in os.walk('.'):
# Look for the benchmark_config file, this will contain our framework name
# It's format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
if 'benchmark_config' in filenames:
config = None
with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
# Load json file into config object
config = json.load(config_file)
if config == None:
continue
frameworks.append(str(config['framework']))
return frameworks
############################################################
# End __gather_frameworks
############################################################
############################################################
# Makes any necessary changes to the server that should be
# made before running the tests. This involves setting kernal
# settings to allow for more connections, or more file
# descriptiors
#
# http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
############################################################
def __setup_server(self):
try:
if os.name == 'nt':
return True
subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
except subprocess.CalledProcessError:
return False
############################################################
# End __setup_server
############################################################
############################################################
# Makes any necessary changes to the database machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include database specific
# changes.
############################################################
def __setup_database(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_database
############################################################
############################################################
# Makes any necessary changes to the client machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include client specific
# changes.
############################################################
def __setup_client(self):
p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_client
############################################################
############################################################
# __run_tests
#
# 2013-10-02 ASB Calls each test passed in tests to
# __run_test in a separate process. Each
# test is given a set amount of time and if
# kills the child process (and subsequently
# all of its child processes). Uses
# multiprocessing module.
############################################################
def __run_tests(self, tests):
logging.debug("Start __run_tests.")
logging.debug("__name__ = %s",__name__)
if self.os.lower() == 'windows':
logging.debug("Executing __run_tests on Windows")
for test in tests:
self.__run_test(test)
else:
logging.debug("Executing __run_tests on Linux")
# These features do not work on Windows
for test in tests:
if __name__ == 'benchmark.benchmarker':
test_process = Process(target=self.__run_test, args=(test,))
test_process.start()
test_process.join(self.run_test_timeout_seconds)
if(test_process.is_alive()):
logging.debug("Child process for %s is still alive. Terminating.",test.name)
self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
test_process.terminate()
logging.debug("End __run_tests.")
############################################################
# End __run_tests
############################################################
############################################################
# __run_test
# 2013-10-02 ASB Previously __run_tests. This code now only
# processes a single test.
#
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __run_test(self, test):
# If the user specified which tests to run, then
# we can skip over tests that are not in that list
if self.test != None and test.name not in self.test:
return
if hasattr(test, 'skip'):
if test.skip.lower() == "true":
logging.info("Test %s benchmark_config specifies to skip this test. Skipping.", test.name)
return
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
# the operating system requirements of this test for the
# application server or the database server don't match
# our current environment
logging.info("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.")
return
# If the test is in the excludes list, we skip it
if self.exclude != None and test.name in self.exclude:
logging.info("Test %s has been added to the excludes list. Skipping.", test.name)
return
# If the test does not contain an implementation of the current test-type, skip it
if self.type != 'all' and not test.contains_type(self.type):
logging.info("Test type %s does not contain an implementation of the current test-type. Skipping", self.type)
return
logging.debug("test.os.lower() = %s test.database_os.lower() = %s",test.os.lower(),test.database_os.lower())
logging.debug("self.results['frameworks'] != None: " + str(self.results['frameworks'] != None))
logging.debug("test.name: " + str(test.name))
logging.debug("self.results['completed']: " + str(self.results['completed']))
if self.results['frameworks'] != None and test.name in self.results['completed']:
logging.info('Framework %s found in latest saved data. Skipping.',str(test.name))
return
print textwrap.dedent("""
=====================================================
Beginning {name}
-----------------------------------------------------
""".format(name=test.name))
##########################
# Start this test
##########################
print textwrap.dedent("""
-----------------------------------------------------
Starting {name}
-----------------------------------------------------
""".format(name=test.name))
try:
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo restart mysql
sudo restart mongodb
sudo /etc/init.d/postgresql restart
""")
time.sleep(10)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
print textwrap.dedent("""
---------------------------------------------------------
Error: Port {port} is not available before start {name}
---------------------------------------------------------
""".format(name=test.name, port=str(test.port)))
return
result = test.start()
if result != 0:
test.stop()
time.sleep(5)
print "ERROR: Problem starting " + test.name
print textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name))
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
return
time.sleep(self.sleep)
##########################
# Verify URLs
##########################
print textwrap.dedent("""
-----------------------------------------------------
Verifying URLs for {name}
-----------------------------------------------------
""".format(name=test.name))
test.verify_urls()
##########################
# Benchmark this test
##########################
if self.mode == "benchmark":
print textwrap.dedent("""
-----------------------------------------------------
Benchmarking {name} ...
-----------------------------------------------------
""".format(name=test.name))
test.benchmark()
##########################
# Stop this test
##########################
test.stop()
time.sleep(5)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
print textwrap.dedent("""
-----------------------------------------------------
Error: Port {port} was not released by stop {name}
-----------------------------------------------------
""".format(name=test.name, port=str(test.port)))
return
print textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name))
time.sleep(5)
##########################################################
# Save results thus far into toolset/benchmark/latest.json
##########################################################
print textwrap.dedent("""
----------------------------------------------------
Saving results through {name}
----------------------------------------------------
""".format(name=test.name))
self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
except (OSError, IOError, subprocess.CalledProcessError):
self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
print textwrap.dedent("""
-----------------------------------------------------
Subprocess Error {name}
-----------------------------------------------------
""".format(name=test.name))
try:
test.stop()
except (subprocess.CalledProcessError):
self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
print textwrap.dedent("""
-----------------------------------------------------
Subprocess Error: Test .stop() raised exception {name}
-----------------------------------------------------
""".format(name=test.name))
except (KeyboardInterrupt, SystemExit):
test.stop()
print """
-----------------------------------------------------
Cleaning up....
-----------------------------------------------------
"""
self.__finish()
sys.exit()
############################################################
# End __run_tests
############################################################
############################################################
# __is_port_bound
# Check if the requested port is available. If it
# isn't available, then a previous test probably didn't
# shutdown properly.
############################################################
def __is_port_bound(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to bind to all IP addresses, this port
s.bind(("", port))
# If we get here, we were able to bind successfully,
# which means the port is free.
except:
# If we get an exception, it might be because the port is still bound
# which would be bad, or maybe it is a privileged port (<1024) and we
# are not running as root, or maybe the server is gone, but sockets are
# still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
# connect.
try:
s.connect(("127.0.0.1", port))
# If we get here, we were able to connect to something, which means
# that the port is still bound.
return True
except:
# An exception means that we couldn't connect, so a server probably
# isn't still running on the port.
pass
finally:
s.close()
return False
############################################################
# End __is_port_bound
############################################################
############################################################
# __parse_results
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __parse_results(self, tests):
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
f.write(json.dumps(self.results))
############################################################
# End __parse_results
############################################################
#############################################################
# __count_sloc
# This is assumed to be run from the benchmark root directory
#############################################################
def __count_sloc(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
lineCount = subprocess.check_output(command, shell=True)
# Find the last instance of the word 'code' in the yaml output. This should
# be the line count for the sum of all listed files or just the line count
# for the last file in the case where there's only one file listed.
lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
lineCount = lineCount.strip('code: ')
lineCount = lineCount[0:lineCount.rfind('comment')]
jsonResult[framework['name']] = int(lineCount)
except:
continue
self.results['rawData']['slocCounts'] = jsonResult
############################################################
# End __count_sloc
############################################################
############################################################
# __count_commits
############################################################
def __count_commits(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except:
continue
self.results['rawData']['commitCounts'] = jsonResult
self.commits = jsonResult
############################################################
# End __count_commits
############################################################
############################################################
# __write_intermediate_results
############################################################
def __write_intermediate_results(self,test_name,status_message):
try:
self.results["completed"][test_name] = status_message
with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
f.write(json.dumps(self.results))
except (IOError):
logging.error("Error writing results.json")
############################################################
# End __write_intermediate_results
############################################################
############################################################
# __finish
############################################################
def __finish(self):
print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
############################################################
# End __finish
############################################################
##########################################################################################
# Constructor
##########################################################################################
############################################################
# Initialize the benchmarker. The args are the arguments
# parsed via argparser.
############################################################
def __init__(self, args):
self.__dict__.update(args)
self.start_time = time.time()
self.run_test_timeout_seconds = 3600
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
# setup some additional variables
if self.database_user == None: self.database_user = self.client_user
if self.database_host == None: self.database_host = self.client_host
if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
# setup results and latest_results directories
self.result_directory = os.path.join("results", self.name)
self.latest_results_directory = self.latest_results_directory()
if self.parse != None:
self.timestamp = self.parse
else:
self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
# Setup the concurrency levels array. This array goes from
# starting_concurrency to max concurrency, doubling each time
self.concurrency_levels = []
concurrency = self.starting_concurrency
while concurrency <= self.max_concurrency:
self.concurrency_levels.append(concurrency)
concurrency = concurrency * 2
# Setup query interval array
# starts at 1, and goes up to max_queries, using the query_interval
self.query_intervals = []
queries = 1
while queries <= self.max_queries:
self.query_intervals.append(queries)
if queries == 1:
queries = 0
queries = queries + self.query_interval
# Load the latest data
#self.latest = None
#try:
# with open('toolset/benchmark/latest.json', 'r') as f:
# # Load json file into config object
# self.latest = json.load(f)
# logging.info("toolset/benchmark/latest.json loaded to self.latest")
# logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
#except IOError:
# logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
#
#self.results = None
#try:
# if self.latest != None and self.name in self.latest.keys():
# with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
# # Load json file into config object
# self.results = json.load(f)
#except IOError:
# pass
self.results = None
try:
with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
#Load json file into results object
self.results = json.load(f)
except IOError:
logging.warn("results.json for test %s not found.",self.name)
if self.results == None:
self.results = dict()
self.results['name'] = self.name
self.results['concurrencyLevels'] = self.concurrency_levels
self.results['queryIntervals'] = self.query_intervals
self.results['frameworks'] = [t.name for t in self.__gather_tests]
self.results['duration'] = self.duration
self.results['rawData'] = dict()
self.results['rawData']['json'] = dict()
self.results['rawData']['db'] = dict()
self.results['rawData']['query'] = dict()
self.results['rawData']['fortune'] = dict()
self.results['rawData']['update'] = dict()
self.results['rawData']['plaintext'] = dict()
self.results['completed'] = dict()
else:
#for x in self.__gather_tests():
# if x.name not in self.results['frameworks']:
# self.results['frameworks'] = self.results['frameworks'] + [x.name]
# Always overwrite framework list
self.results['frameworks'] = [t.name for t in self.__gather_tests]
# Setup the ssh command string
self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
if self.database_identity_file != None:
self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
if self.client_identity_file != None:
self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
if self.install_software:
install = Installer(self)
install.install_software()
############################################################
# End __init__
############################################################
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from swift.common import utils as swift_utils
from swift.common.middleware import acl as swift_acl
class KeystoneAuth(object):
"""Swift middleware to Keystone authorization system.
In Swift's proxy-server.conf add this middleware to your pipeline::
[pipeline:main]
pipeline = catch_errors cache authtoken keystoneauth proxy-server
Make sure you have the authtoken middleware before the
keystoneauth middleware.
The authtoken middleware will take care of validating the user and
keystoneauth will authorize access.
The authtoken middleware is shipped directly with keystone it
does not have any other dependences than itself so you can either
install it by copying the file directly in your python path or by
installing keystone.
If support is required for unvalidated users (as with anonymous
access) or for tempurl/formpost middleware, authtoken will need
to be configured with delay_auth_decision set to 1. See the
Keystone documentation for more detail on how to configure the
authtoken middleware.
In proxy-server.conf you will need to have the setting account
auto creation to true::
[app:proxy-server] account_autocreate = true
And add a swift authorization filter section, such as::
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin, swiftoperator
This maps tenants to account in Swift.
The user whose able to give ACL / create Containers permissions
will be the one that are inside the operator_roles
setting which by default includes the admin and the swiftoperator
roles.
The option is_admin if set to true will allow the
username that has the same name as the account name to be the owner.
Example: If we have the account called hellocorp with a user
hellocorp that user will be admin on that account and can give ACL
to all other users for hellocorp.
If you need to have a different reseller_prefix to be able to
mix different auth servers you can configure the option
reseller_prefix in your keystoneauth entry like this :
reseller_prefix = NEWAUTH_
Make sure you have a underscore at the end of your new
reseller_prefix option.
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = swift_utils.get_logger(conf, log_route='keystoneauth')
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_').strip()
self.operator_roles = conf.get('operator_roles',
'admin, swiftoperator')
self.reseller_admin_role = conf.get('reseller_admin_role',
'ResellerAdmin')
config_is_admin = conf.get('is_admin', "false").lower()
self.is_admin = config_is_admin in swift_utils.TRUE_VALUES
cfg_synchosts = conf.get('allowed_sync_hosts', '127.0.0.1')
self.allowed_sync_hosts = [h.strip() for h in cfg_synchosts.split(',')
if h.strip()]
config_overrides = conf.get('allow_overrides', 't').lower()
self.allow_overrides = config_overrides in swift_utils.TRUE_VALUES
def __call__(self, environ, start_response):
identity = self._keystone_identity(environ)
# Check if one of the middleware like tempurl or formpost have
# set the swift.authorize_override environ and want to control the
# authentication
if (self.allow_overrides and
environ.get('swift.authorize_override', False)):
msg = 'Authorizing from an overriding middleware (i.e: tempurl)'
self.logger.debug(msg)
return self.app(environ, start_response)
if identity:
self.logger.debug('Using identity: %r' % (identity))
environ['keystone.identity'] = identity
environ['REMOTE_USER'] = identity.get('tenant')
environ['swift.authorize'] = self.authorize
else:
self.logger.debug('Authorizing as anonymous')
environ['swift.authorize'] = self.authorize_anonymous
environ['swift.clean_acl'] = swift_acl.clean_acl
return self.app(environ, start_response)
def _keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = []
if 'HTTP_X_ROLES' in environ:
roles = environ['HTTP_X_ROLES'].split(',')
identity = {'user': environ.get('HTTP_X_USER_NAME'),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles}
return identity
def _get_account_for_tenant(self, tenant_id):
return '%s%s' % (self.reseller_prefix, tenant_id)
def _reseller_check(self, account, tenant_id):
"""Check reseller prefix."""
return account == self._get_account_for_tenant(tenant_id)
def authorize(self, req):
env = req.environ
env_identity = env.get('keystone.identity', {})
tenant_id, tenant_name = env_identity.get('tenant')
try:
part = swift_utils.split_path(req.path, 1, 4, True)
version, account, container, obj = part
except ValueError:
return webob.exc.HTTPNotFound(request=req)
user_roles = env_identity.get('roles', [])
# Give unconditional access to a user with the reseller_admin
# role.
if self.reseller_admin_role in user_roles:
msg = 'User %s has reseller admin authorizing'
self.logger.debug(msg % tenant_id)
req.environ['swift_owner'] = True
return
# Check if a user tries to access an account that does not match their
# token
if not self._reseller_check(account, tenant_id):
log_msg = 'tenant mismatch: %s != %s' % (account, tenant_id)
self.logger.debug(log_msg)
return self.denied_response(req)
# Check the roles the user is belonging to. If the user is
# part of the role defined in the config variable
# operator_roles (like admin) then it will be
# promoted as an admin of the account/tenant.
for role in self.operator_roles.split(','):
role = role.strip()
if role in user_roles:
log_msg = 'allow user with role %s as account admin' % (role)
self.logger.debug(log_msg)
req.environ['swift_owner'] = True
return
# If user is of the same name of the tenant then make owner of it.
user = env_identity.get('user', '')
if self.is_admin and user == tenant_name:
req.environ['swift_owner'] = True
return
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
authorized = self._authorize_unconfirmed_identity(req, obj, referrers,
roles)
if authorized:
return
elif authorized is not None:
return self.denied_response(req)
# Allow ACL at individual user level (tenant:user format)
# For backward compatibility, check for ACL in tenant_id:user format
if ('%s:%s' % (tenant_name, user) in roles
or '%s:%s' % (tenant_id, user) in roles):
log_msg = 'user %s:%s or %s:%s allowed in ACL authorizing'
self.logger.debug(log_msg % (tenant_name, user, tenant_id, user))
return
# Check if we have the role in the userroles and allow it
for user_role in user_roles:
if user_role in roles:
log_msg = 'user %s:%s allowed in ACL: %s authorizing'
self.logger.debug(log_msg % (tenant_name, user, user_role))
return
return self.denied_response(req)
def authorize_anonymous(self, req):
"""
Authorize an anonymous request.
:returns: None if authorization is granted, an error page otherwise.
"""
try:
part = swift_utils.split_path(req.path, 1, 4, True)
version, account, container, obj = part
except ValueError:
return webob.exc.HTTPNotFound(request=req)
is_authoritative_authz = (account and
account.startswith(self.reseller_prefix))
if not is_authoritative_authz:
return self.denied_response(req)
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
authorized = self._authorize_unconfirmed_identity(req, obj, referrers,
roles)
if not authorized:
return self.denied_response(req)
def _authorize_unconfirmed_identity(self, req, obj, referrers, roles):
""""
Perform authorization for access that does not require a
confirmed identity.
:returns: A boolean if authorization is granted or denied. None if
a determination could not be made.
"""
# Allow container sync.
if (req.environ.get('swift_sync_key')
and req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None)
and 'x-timestamp' in req.headers
and (req.remote_addr in self.allowed_sync_hosts
or swift_utils.get_remote_client(req)
in self.allowed_sync_hosts)):
log_msg = 'allowing proxy %s for container-sync' % req.remote_addr
self.logger.debug(log_msg)
return True
# Check if referrer is allowed.
if swift_acl.referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in roles:
log_msg = 'authorizing %s via referer ACL' % req.referrer
self.logger.debug(log_msg)
return True
return False
def denied_response(self, req):
"""Deny WSGI Response.
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return webob.exc.HTTPForbidden(request=req)
else:
return webob.exc.HTTPUnauthorized(request=req)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return KeystoneAuth(app, conf)
return auth_filter
|
|
#!/usr/bin/env python
# Dan Blankenberg
from __future__ import print_function
import base64
import datetime
import logging
import optparse
import os
import shutil
import subprocess
import tempfile
from json import (
dumps,
loads
)
from os.path import basename
from xml.etree.ElementTree import tostring
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2 imports
from urllib2 import urlopen
_log_name = __name__
if _log_name == '__builtin__':
_log_name = 'toolshed.installed.g2.rsync.data.manager'
log = logging.getLogger(_log_name)
# Get the Data from the Galaxy Project rsync server
RSYNC_CMD = 'rsync'
RSYNC_SERVER = "rsync://datacache.g2.bx.psu.edu/"
LOCATION_DIR = "location"
INDEX_DIR = "indexes"
# Pull the Tool Data Table files from github
# FIXME: These files should be accessible from the rsync server directly.
TOOL_DATA_TABLE_CONF_XML_URLS = {'main': "https://raw.githubusercontent.com/galaxyproject/usegalaxy-playbook/master/env/main/files/galaxy/config/tool_data_table_conf.xml",
'test': "https://raw.githubusercontent.com/galaxyproject/usegalaxy-playbook/master/env/test/files/galaxy/config/tool_data_table_conf.xml"}
# Replace data table source entries with local temporary location
GALAXY_DATA_CANONICAL_PATH = "/galaxy/data/"
TOOL_DATA_TABLE_CONF_XML_REPLACE_SOURCE = '<file path="%slocation/' % (GALAXY_DATA_CANONICAL_PATH)
TOOL_DATA_TABLE_CONF_XML_REPLACE_TARGET = '<file path="%s/'
# Some basic Caching, so we don't have to reload and download everything every time
CACHE_TIME = datetime.timedelta(minutes=10)
TOOL_DATA_TABLES_LOADED_BY_URL = {}
# Entries will not be selected by default
DEFAULT_SELECTED = False
# Exclude data managers without 'path' column or that are in the manual exclude list
PATH_COLUMN_NAMES = ['path']
EXCLUDE_DATA_TABLES = []
# TODO: Make additional handler actions available for tables that can't fit into the the basic
# "take the value of path" as a dir and copy contents.
# e.g. mafs. Although this maf table is goofy and doesn't have path defined in <table> def,
# but it does exit in the .loc.
# --- These methods are called by/within the Galaxy Application
def exec_before_job(app, inp_data, out_data, param_dict, tool=None, **kwd):
# Look for any data tables that haven't been defined for this data manager before and dynamically add them to Galaxy
param_dict = dict(**param_dict)
param_dict['data_table_entries'] = param_dict.get('data_table_entries', [])
if not isinstance(param_dict['data_table_entries'], list):
param_dict['data_table_entries'] = [param_dict['data_table_entries']]
param_dict['data_table_entries'] = ",".join(param_dict['data_table_entries'])
if tool:
tool_shed_repository = tool.tool_shed_repository
else:
tool_shed_repository = None
tdtm = None
data_manager = app.data_managers.get_manager(tool.data_manager_id, None)
data_table_entries = get_data_table_entries(param_dict)
data_tables = load_data_tables_from_url(data_table_class=app.tool_data_tables.__class__).get('data_tables')
for data_table_name, entries in data_table_entries.items():
# get data table managed by this data Manager
has_data_table = app.tool_data_tables.get_tables().get(str(data_table_name))
if has_data_table:
has_data_table = bool(has_data_table.get_filename_for_source(data_manager, None))
if not has_data_table:
if tdtm is None:
from tool_shed.tools import data_table_manager
tdtm = data_table_manager.ToolDataTableManager(app)
target_dir, tool_path, relative_target_dir = tdtm.get_target_install_dir(tool_shed_repository)
# Dynamically add this data table
log.debug("Attempting to dynamically create a missing Tool Data Table named %s." % data_table_name)
data_table = data_tables[data_table_name]
repo_info = tdtm.generate_repository_info_elem_from_repository(tool_shed_repository, parent_elem=None)
if repo_info is not None:
repo_info = tostring(repo_info)
tmp_file = tempfile.NamedTemporaryFile(mode="w")
tmp_file.write(get_new_xml_definition(app, data_table, data_manager, repo_info, target_dir))
tmp_file.flush()
app.tool_data_tables.add_new_entries_from_config_file(tmp_file.name, None, app.config.shed_tool_data_table_config, persist=True)
tmp_file.close()
def galaxy_code_get_available_data_tables(trans):
# list of data tables
found_tables = get_available_tables(trans)
rval = [(x, x, DEFAULT_SELECTED) for x in found_tables]
return rval
def galaxy_code_get_available_data_tables_entries(trans, dbkey, data_table_names):
# available entries, optionally filtered by dbkey and table names
if dbkey in [None, '', '?']:
dbkey = None
if data_table_names in [None, '', '?']:
data_table_names = None
found_tables = get_available_tables_for_dbkey(trans, dbkey, data_table_names)
dbkey_text = '(%s) ' % (dbkey) if dbkey else ''
rval = [("%s%s" % (dbkey_text, x[0]), base64.b64encode(dumps(dict(name=x[0].split(': ')[0], entry=x[1]), sort_keys=True).rstrip().encode('utf-8')).decode('utf-8'), DEFAULT_SELECTED) for x in found_tables.items()]
return rval
# --- End Galaxy called Methods ---
def rsync_urljoin(base, url):
# urlparse.urljoin doesn't work correctly for our use-case
# probably because it doesn't recognize rhe rsync scheme
base = base.rstrip('/')
url = url.lstrip('/')
return "%s/%s" % (base, url)
def rsync_list_dir(server, dir=None, skip_names=[]):
# drwxr-xr-x 50 2014/05/16 20:58:11 .
if dir:
dir = rsync_urljoin(server, dir)
else:
dir = server
rsync_response = tempfile.NamedTemporaryFile()
rsync_stderr = tempfile.NamedTemporaryFile()
rsync_cmd = [RSYNC_CMD, '--list-only', dir]
return_code = subprocess.call(rsync_cmd, stdout=rsync_response, stderr=rsync_stderr)
rsync_response.flush()
rsync_response.seek(0)
rsync_stderr.flush()
rsync_stderr.seek(0)
if return_code:
msg = "stdout:\n%s\nstderr:\n%s" % (rsync_response.read(), rsync_stderr.read())
rsync_response.close()
rsync_stderr.close()
raise Exception('Failed to execute rsync command (%s), returncode=%s. Rsync_output:\n%s' % (rsync_cmd, return_code, msg))
rsync_stderr.close()
rval = {}
for line in rsync_response:
perms, line = line.split(None, 1)
line = line.strip()
size, line = line.split(None, 1)
line = line.strip()
date, line = line.split(None, 1)
line = line.strip()
time, line = line.split(None, 1)
name = line.strip()
if name in skip_names:
continue
size = line.strip()
rval[name] = dict(name=name, permissions=perms, bytes=size, date=date, time=time)
rsync_response.close()
return rval
def rsync_sync_to_dir(source, target):
rsync_response = tempfile.NamedTemporaryFile()
rsync_stderr = tempfile.NamedTemporaryFile()
rsync_cmd = [RSYNC_CMD, '-avzP', source, target]
return_code = subprocess.call(rsync_cmd, stdout=rsync_response, stderr=rsync_stderr)
rsync_response.flush()
rsync_response.seek(0)
rsync_stderr.flush()
rsync_stderr.seek(0)
if return_code:
msg = "stdout:\n%s\nstderr:\n%s" % (rsync_response.read(), rsync_stderr.read())
rsync_response.close()
rsync_stderr.close()
raise Exception('Failed to execute rsync command (%s), returncode=%s. Rsync_output:\n%s' % (rsync_cmd, return_code, msg))
rsync_response.close()
rsync_stderr.close()
return return_code
def data_table_needs_refresh(cached_data_table, url):
if cached_data_table is None:
return True, {}
if datetime.datetime.now() - cached_data_table.get('time_loaded') > CACHE_TIME:
data_table_text = urlopen(url).read().decode('utf-8')
if cached_data_table.get('data_table_text', None) != data_table_text:
return True, {'data_table_text': data_table_text}
loc_file_attrs = rsync_list_dir(RSYNC_SERVER, LOCATION_DIR)
if cached_data_table.get('loc_file_attrs', None) != loc_file_attrs:
return True, {'loc_file_attrs': loc_file_attrs}
return False, {}
def load_data_tables_from_url(url=None, site='main', data_table_class=None):
if not url:
url = TOOL_DATA_TABLE_CONF_XML_URLS.get(site, None)
assert url, ValueError('You must provide either a URL or a site=name.')
cached_data_table = TOOL_DATA_TABLES_LOADED_BY_URL.get(url, None)
refresh, attribs = data_table_needs_refresh(cached_data_table, url)
if refresh:
data_table_text = attribs.get('data_table_text') or urlopen(url).read().decode('utf-8')
loc_file_attrs = attribs.get('loc_file_attrs') or rsync_list_dir(RSYNC_SERVER, LOCATION_DIR)
tmp_dir = tempfile.mkdtemp(prefix='rsync_g2_')
tmp_loc_dir = os.path.join(tmp_dir, 'location')
os.mkdir(tmp_loc_dir)
rsync_sync_to_dir(rsync_urljoin(RSYNC_SERVER, LOCATION_DIR), os.path.abspath(tmp_loc_dir))
new_data_table_text = data_table_text.replace(TOOL_DATA_TABLE_CONF_XML_REPLACE_SOURCE, TOOL_DATA_TABLE_CONF_XML_REPLACE_TARGET % (tmp_loc_dir))
data_table_fh = tempfile.NamedTemporaryFile(dir=tmp_dir, prefix='rysnc_data_manager_data_table_conf_', mode="w")
data_table_fh.write(new_data_table_text)
data_table_fh.flush()
tmp_data_dir = os.path.join(tmp_dir, 'tool-data')
os.mkdir(tmp_data_dir)
data_tables = data_table_class(tmp_data_dir, config_filename=data_table_fh.name)
for name, data_table in list(data_tables.data_tables.items()):
if name in EXCLUDE_DATA_TABLES or not data_table_has_path_column(data_table):
log.debug('Removing data table "%s" because it is excluded by name or does not have a defined "path" column.', name)
del data_tables.data_tables[name]
cached_data_table = {'data_tables': data_tables, 'tmp_dir': tmp_dir, 'data_table_text': data_table_text, 'tmp_loc_dir': tmp_loc_dir, 'loc_file_attrs': loc_file_attrs, 'time_loaded': datetime.datetime.now()}
TOOL_DATA_TABLES_LOADED_BY_URL[url] = cached_data_table
# delete the files
data_table_fh.close()
cleanup_before_exit(tmp_dir)
return cached_data_table
def data_table_has_path_column(data_table):
col_names = data_table.get_column_name_list()
for name in PATH_COLUMN_NAMES:
if name in col_names:
return True
return False
def get_available_tables(trans):
# list of data tables
data_tables = load_data_tables_from_url(data_table_class=trans.app.tool_data_tables.__class__)
return data_tables.get('data_tables').get_tables().keys()
def get_new_xml_definition(app, data_table, data_manager, repo_info=None, location_file_dir=None):
sub_dict = {'table_name': data_table.name, 'comment_char': '', 'columns': '', 'file_path': ''}
sub_dict.update(data_manager.get_tool_shed_repository_info_dict())
if data_table.comment_char:
sub_dict['comment_char'] = 'comment_char="%s"' % (data_table.comment_char)
for i, name in enumerate(data_table.get_column_name_list()):
if name is not None:
sub_dict['columns'] = "%s\n%s" % (sub_dict['columns'], '<column name="%s" index="%s" />' % (name, i))
location_file_dir = location_file_dir or app.config.galaxy_data_manager_data_path
for filename in data_table.filenames.keys():
sub_dict['file_path'] = basename(filename)
sub_dict['file_path'] = os.path.join(location_file_dir, sub_dict['file_path']) # os.path.abspath?
if not os.path.exists(sub_dict['file_path']):
# Create empty file
open(sub_dict['file_path'], 'wb+').close()
break
sub_dict['repo_info'] = repo_info or ''
return """
<tables><table name="%(table_name)s" %(comment_char)s>
%(columns)s
<file path="%(file_path)s" />
%(repo_info)s
</table></tables>
""" % sub_dict
def get_available_tables_for_dbkey(trans, dbkey, data_table_names):
data_tables = load_data_tables_from_url(data_table_class=trans.app.tool_data_tables.__class__)
rval = {}
for name, data_table in data_tables.get('data_tables').get_tables().items():
if (not data_table_names or name in data_table_names):
# TODO: check that columns are similiar
if not dbkey:
entry_getter = data_table.get_named_fields_list()
else:
entry_getter = data_table.get_entries('dbkey', dbkey, None, default=[])
for entry in entry_getter:
name = "%s: %s" % (data_table.name, dumps(entry))
rval[name] = entry
return rval
def split_path_all(path):
rval = []
path = path.rstrip('/')
while True:
head, tail = os.path.split(path)
if tail:
rval.append(tail)
path = head
elif head:
rval.append(head)
break
else:
break
rval.reverse()
return rval
def get_data_for_path(path, data_root_dir):
# We list dir with a /, but copy data without
# listing with / gives a . entry when its a dir
# cloning without the / will copy that whole directory into the target,
# instead of just that target's contents
if path.startswith(GALAXY_DATA_CANONICAL_PATH):
path = path[len(GALAXY_DATA_CANONICAL_PATH):]
make_path = path
rsync_source = rsync_urljoin(rsync_urljoin(RSYNC_SERVER, INDEX_DIR), path)
if rsync_source.endswith('/'):
rsync_source = rsync_source[:-1]
try:
dir_list = rsync_list_dir(rsync_source + "/")
except Exception:
dir_list = None
while not dir_list or '.' not in dir_list:
head, tail = os.path.split(make_path)
if not head:
head = tail
make_path = head
rsync_source = rsync_urljoin(rsync_urljoin(RSYNC_SERVER, INDEX_DIR), head) # if we error here, likely due to a connection issue
if rsync_source.endswith('/'):
rsync_source = rsync_source[:-1]
dir_list = rsync_list_dir(rsync_source + "/")
split_path = split_path_all(make_path)
target_path = data_root_dir
for p in split_path[:-1]:
target_path = os.path.join(target_path, p)
if not os.path.exists(target_path):
os.mkdir(target_path)
rsync_sync_to_dir(rsync_source, target_path)
return path
def get_data_and_munge_path(data_table_name, data_table_entry, data_root_dir):
path_cols = []
for key, value in data_table_entry.items():
if key in PATH_COLUMN_NAMES:
path_cols.append((key, value))
if path_cols:
for col_name, value in path_cols:
if value.startswith(GALAXY_DATA_CANONICAL_PATH):
data_table_entry[col_name] = get_data_for_path(value, data_root_dir)
else:
print('unable to determine location of rsync data for', data_table_name, data_table_entry)
return data_table_entry
def fulfill_data_table_entries(data_table_entries, data_manager_dict, data_root_dir):
for data_table_name, entries in data_table_entries.items():
for entry in entries:
entry = get_data_and_munge_path(data_table_name, entry, data_root_dir)
_add_data_table_entry(data_manager_dict, data_table_name, entry)
return data_manager_dict
def _add_data_table_entry(data_manager_dict, data_table_name, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get(data_table_name, [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def get_data_table_entries(params):
rval = {}
data_table_entries = params.get('data_table_entries', None)
if data_table_entries:
for entry_text in data_table_entries.split(','):
entry_text = base64.b64decode(entry_text.strip().encode('utf-8'))
entry_dict = loads(entry_text)
data_table_name = entry_dict['name']
data_table_entry = entry_dict['entry']
rval[data_table_name] = rval.get(data_table_name, [])
rval[data_table_name].append(data_table_entry)
return rval
def main():
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
filename = args[0]
params = loads(open(filename).read())
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
data_table_entries = get_data_table_entries(params['param_dict'])
# Populate the data Tables
data_manager_dict = fulfill_data_table_entries(data_table_entries, data_manager_dict, target_directory)
# save info to json file
open(filename, 'w').write(dumps(data_manager_dict, sort_keys=True))
if __name__ == "__main__":
main()
|
|
"""
Support for deCONZ light.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.deconz/
"""
from homeassistant.components.deconz.const import (
CONF_ALLOW_DECONZ_GROUPS, DOMAIN as DATA_DECONZ,
DATA_DECONZ_ID, DATA_DECONZ_UNSUB, DECONZ_DOMAIN,
COVER_TYPES, SWITCH_TYPES)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_HS_COLOR,
ATTR_TRANSITION, EFFECT_COLORLOOP, FLASH_LONG, FLASH_SHORT,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT,
SUPPORT_FLASH, SUPPORT_TRANSITION, Light)
from homeassistant.core import callback
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
DEPENDENCIES = ['deconz']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Old way of setting up deCONZ lights and group."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ lights and groups from a config entry."""
@callback
def async_add_light(lights):
"""Add light from deCONZ."""
entities = []
for light in lights:
if light.type not in COVER_TYPES + SWITCH_TYPES:
entities.append(DeconzLight(light))
async_add_entities(entities, True)
hass.data[DATA_DECONZ_UNSUB].append(
async_dispatcher_connect(hass, 'deconz_new_light', async_add_light))
@callback
def async_add_group(groups):
"""Add group from deCONZ."""
entities = []
allow_group = config_entry.data.get(CONF_ALLOW_DECONZ_GROUPS, True)
for group in groups:
if group.lights and allow_group:
entities.append(DeconzLight(group))
async_add_entities(entities, True)
hass.data[DATA_DECONZ_UNSUB].append(
async_dispatcher_connect(hass, 'deconz_new_group', async_add_group))
async_add_light(hass.data[DATA_DECONZ].lights.values())
async_add_group(hass.data[DATA_DECONZ].groups.values())
class DeconzLight(Light):
"""Representation of a deCONZ light."""
def __init__(self, light):
"""Set up light and add update callback to get data from websocket."""
self._light = light
self._features = SUPPORT_BRIGHTNESS
self._features |= SUPPORT_FLASH
self._features |= SUPPORT_TRANSITION
if self._light.ct is not None:
self._features |= SUPPORT_COLOR_TEMP
if self._light.xy is not None:
self._features |= SUPPORT_COLOR
if self._light.effect is not None:
self._features |= SUPPORT_EFFECT
async def async_added_to_hass(self):
"""Subscribe to lights events."""
self._light.register_async_callback(self.async_update_callback)
self.hass.data[DATA_DECONZ_ID][self.entity_id] = self._light.deconz_id
async def async_will_remove_from_hass(self) -> None:
"""Disconnect light object when removed."""
self._light.remove_callback(self.async_update_callback)
self._light = None
@callback
def async_update_callback(self, reason):
"""Update the light's state."""
self.async_schedule_update_ha_state()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._light.brightness
@property
def effect_list(self):
"""Return the list of supported effects."""
return [EFFECT_COLORLOOP]
@property
def color_temp(self):
"""Return the CT color value."""
if self._light.colormode != 'ct':
return None
return self._light.ct
@property
def hs_color(self):
"""Return the hs color value."""
if self._light.colormode in ('xy', 'hs') and self._light.xy:
return color_util.color_xy_to_hs(*self._light.xy)
return None
@property
def is_on(self):
"""Return true if light is on."""
return self._light.state
@property
def name(self):
"""Return the name of the light."""
return self._light.name
@property
def unique_id(self):
"""Return a unique identifier for this light."""
return self._light.uniqueid
@property
def supported_features(self):
"""Flag supported features."""
return self._features
@property
def available(self):
"""Return True if light is available."""
return self._light.reachable
@property
def should_poll(self):
"""No polling needed."""
return False
async def async_turn_on(self, **kwargs):
"""Turn on light."""
data = {'on': True}
if ATTR_COLOR_TEMP in kwargs:
data['ct'] = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
data['xy'] = color_util.color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
if ATTR_BRIGHTNESS in kwargs:
data['bri'] = kwargs[ATTR_BRIGHTNESS]
if ATTR_TRANSITION in kwargs:
data['transitiontime'] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_SHORT:
data['alert'] = 'select'
del data['on']
elif kwargs[ATTR_FLASH] == FLASH_LONG:
data['alert'] = 'lselect'
del data['on']
if ATTR_EFFECT in kwargs:
if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP:
data['effect'] = 'colorloop'
else:
data['effect'] = 'none'
await self._light.async_set_state(data)
async def async_turn_off(self, **kwargs):
"""Turn off light."""
data = {'on': False}
if ATTR_TRANSITION in kwargs:
data['bri'] = 0
data['transitiontime'] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_SHORT:
data['alert'] = 'select'
del data['on']
elif kwargs[ATTR_FLASH] == FLASH_LONG:
data['alert'] = 'lselect'
del data['on']
await self._light.async_set_state(data)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
attributes['is_deconz_group'] = self._light.type == 'LightGroup'
if self._light.type == 'LightGroup':
attributes['all_on'] = self._light.all_on
return attributes
@property
def device_info(self):
"""Return a device description for device registry."""
if (self._light.uniqueid is None or
self._light.uniqueid.count(':') != 7):
return None
serial = self._light.uniqueid.split('-', 1)[0]
bridgeid = self.hass.data[DATA_DECONZ].config.bridgeid
return {
'connections': {(CONNECTION_ZIGBEE, serial)},
'identifiers': {(DECONZ_DOMAIN, serial)},
'manufacturer': self._light.manufacturer,
'model': self._light.modelid,
'name': self._light.name,
'sw_version': self._light.swversion,
'via_hub': (DECONZ_DOMAIN, bridgeid),
}
|
|
import logging
log = logging.getLogger('fabric.fabalicious.scripts')
from base import BaseMethod
from fabric.api import *
from fabric.contrib.files import exists
from fabric.network import *
from fabric.context_managers import settings as _settings
from lib import configuration
import re, copy
class ScriptMethod(BaseMethod):
@staticmethod
def supports(methodName):
return methodName == 'script'
@staticmethod
def getGlobalSettings():
return {
'scripts': {},
'executables': {}
}
def printReplacements(self, replacements):
for key in sorted(replacements.keys()):
value = replacements[key]
print "{key:<40} | {value}".format(key = key, value=value)
def cd(self, folder, runLocally):
if runLocally:
return lcd(folder)
else:
return cd(folder)
def runScriptImpl(self, rootFolder, commands, config, runLocally, callbacks= {}, environment = {}, replacements = {}):
pattern = re.compile('\%(\S*)\%')
state = {
'warnOnly': False,
'config': config,
'return_code': 0,
'runLocally': runLocally
}
# preflight
ok = True
for line in commands:
if pattern.search(line) != None:
log.error('Found replacement-pattern in script-line "%s", aborting ...' % line)
ok = False
for key in environment:
if pattern.search(environment[key]) != None:
log.error('Found replacement-pattern in environment "%s:%s", aborting ...' % (key, environment[key]))
ok = False
if not ok:
self.printReplacements(replacements)
exit(1)
saved_output_prefix = env.output_prefix
env.output_prefix = False
for line in commands:
with self.cd(rootFolder, runLocally), shell_env(**environment), hide('running'), show('output'):
handled = False
start_p = line.find('(')
end_p = line.rfind(')')
if start_p >= 0 and end_p > 0:
func_name = line[0:start_p]
if func_name in callbacks:
arguments = False
func_args = line[start_p+1: end_p]
if func_args.strip() != '':
arguments = func_args.split(',')
arguments = map(lambda x: x.strip(), arguments)
log.debug('Executing "%s"' % func_name)
if arguments:
callbacks[func_name](state, *arguments)
else:
callbacks[func_name](state)
handled = True
if not handled:
line = self.expandCommand(line)
log.debug('Running "%s"' % line)
if state['warnOnly']:
with warn_only():
result = local(line) if runLocally else run(line)
state['return_code'] = state['return_code'] or result.return_code
else:
result = local(line) if runLocally else run(line)
state['return_code'] = state['return_code'] or result.return_code
env.output_prefix = saved_output_prefix
return state['return_code']
def expandEnvironment(self, environment, replacements):
parsed_environment = {}
pattern = re.compile('|'.join(re.escape(key) for key in replacements.keys()))
for key in environment:
parsed_environment[key] = pattern.sub(lambda x: replacements[x.group()], environment[key])
return parsed_environment
def executeCallback(self, context, command, *args, **kwargs):
config = context['config']
if not config['runLocally']:
host_string = join_host_strings(config['user'], config['host'], config['port'])
kwargs['host'] = host_string
execute(command, *args, **kwargs)
def runTaskCallback(self, context, *args, **kwargs):
log.error('run_task is not supported anymore, use "execute(docker, <your_task>)"');
def failOnErrorCallback(self, context, flag):
if flag == '1':
context['warnOnly'] = False
else:
context['warnOnly'] = True
def failOnMissingDirectory(self, context, directory, message):
folder_exists = True
if context['runLocally']:
folder_exists = os.path.exists(directory)
else:
folder_exists = exists(directory)
if not folder_exists:
log.error(message)
log.error('Missing: %s' % directory)
exit(1);
def runScript(self, config, **kwargs):
self.setRunLocally(config)
script = kwargs['script']
callbacks = kwargs['callbacks'] if 'callbacks' in kwargs else {}
variables = kwargs['variables'] if 'variables' in kwargs else {}
environment = kwargs['environment'] if 'environment' in kwargs else {}
root_folder = kwargs['rootFolder'] if 'rootFolder' in kwargs else config['siteFolder'] if 'siteFolder' in config else '.'
runLocally = kwargs['runLocally'] if 'runLocally' in kwargs else self.run_locally
if 'environment' in config:
environment = configuration.data_merge(config['environment'], environment)
variables['host'] = config
settings = copy.deepcopy(configuration.getSettings())
map(lambda x: settings.pop(x,None), ['hosts', 'dockerHosts'])
variables['settings'] = settings
callbacks['execute'] = self.executeCallback
callbacks['run_task'] = self.runTaskCallback
callbacks['fail_on_error'] = self.failOnErrorCallback
callbacks['fail_on_missing_directory'] = self.failOnMissingDirectory
replacements = self.expandVariables(variables);
commands = self.expandCommands(script, replacements)
# Do it again to support replacements which needs to be replaced again.
commands = self.expandCommands(commands, replacements)
environment = self.expandEnvironment(environment, replacements)
for need in config['needs']:
environment[need.upper() + '_AVAILABLE'] = "1"
return_code = self.runScriptImpl(root_folder, commands, config, runLocally, callbacks, environment, replacements)
if return_code:
log.error('Due to earlier errors quitting now.')
exit(return_code)
def runTaskSpecificScript(self, taskName, config, **kwargs):
common_scripts = configuration.getSettings('common')
type = config['type']
if type in common_scripts and isinstance(common_scripts[type], list):
log.error("Found old-style common-scripts. Please regroup by common > taskName > type > commands.")
if taskName in common_scripts:
if type in common_scripts[taskName]:
script = common_scripts[taskName][type]
log.info('Running common script for task %s and type %s' % (taskName, type))
self.runScript(config, script=script)
if taskName in config:
script = config[taskName]
log.info('Running host-script for task %s and type %s' % (taskName, type))
self.runScript(config, script=script)
def fallback(self, taskName, configuration, **kwargs):
self.runTaskSpecificScript(taskName, configuration, **kwargs)
def preflight(self, taskName, configuration, **kwargs):
self.runTaskSpecificScript(taskName + "Prepare", configuration, **kwargs)
def postflight(self, taskName, configuration, **kwargs):
self.runTaskSpecificScript(taskName + "Finished", configuration, **kwargs)
|
|
# -*- coding: utf-8 -*-
from copy import copy
import sympy
from sympy import Matrix
from pysymoro.geometry import compute_screw_transform
from pysymoro.geometry import compute_rot_trans
from pysymoro.kinematics import compute_vel_acc
from pysymoro.kinematics import compute_omega
from symoroutils import tools
from symoroutils.paramsinit import ParamsInit
def inertia_spatial(inertia, ms_tensor, mass):
"""
Compute spatial inertia matrix (internal function).
"""
return Matrix([
(mass * sympy.eye(3)).row_join(tools.skew(ms_tensor).transpose()),
tools.skew(ms_tensor).row_join(inertia)
])
def compute_torque(robo, symo, j, jaj, react_wrench, torque):
"""
Compute torque (internal function).
Note:
torque is the output parameter.
"""
if robo.sigma[j] == 2:
tau_total = 0
else:
tau = react_wrench[j].transpose() * jaj[j]
fric_rotor = robo.fric_s(j) + robo.fric_v(j) + robo.tau_ia(j)
tau_total = tau[0, 0] + fric_rotor
torque[j] = symo.replace(tau_total, 'GAM', j, forced=True)
def compute_joint_torque(robo, symo, j, Fjnt, Njnt, torque):
"""
Compute actuator torques - projection of joint wrench on the joint
axis (internal function).
Note:
torque is the output parameter.
"""
if robo.sigma[j] == 2:
tau_total = 0
else:
tau = (robo.sigma[j] * Fjnt[j]) + ((1 - robo.sigma[j]) * Njnt[j])
fric_rotor = robo.fric_s(j) + robo.fric_v(j) + robo.tau_ia(j)
tau_total = tau[2] + fric_rotor
torque[j] = symo.replace(tau_total, 'GAM', j, forced=True)
def compute_dynamic_wrench(robo, symo, j, w, wdot, U, vdot, F, N):
"""
Compute total wrench of link j (internal function).
Note:
F, N are the output parameters
"""
F[j] = (robo.M[j] * vdot[j]) + (U[j] * robo.MS[j])
F[j] = symo.mat_replace(F[j], 'F', j)
Psi = robo.J[j] * w[j]
Psi = symo.mat_replace(Psi, 'PSI', j)
N[j] = (robo.J[j] * wdot[j]) + (tools.skew(w[j]) * Psi)
N[j] = symo.mat_replace(N[j], 'No', j)
def compute_joint_wrench(
robo, symo, j, antRj, antPj, vdot, F, N, Fjnt, Njnt, Fex, Nex
):
"""
Compute reaction wrench (for default Newton-Euler) of joint j
(internal function).
Note:
Fjnt, Njnt, Fex, Nex are the output parameters
"""
forced = True if j == 0 else False
i = robo.ant[j]
Fjnt[j] = F[j] + Fex[j]
Fjnt[j] = symo.mat_replace(Fjnt[j], 'E', j, forced=forced)
Njnt[j] = N[j] + Nex[j] + (tools.skew(robo.MS[j]) * vdot[j])
Njnt[j] = symo.mat_replace(Njnt[j], 'N', j, forced=forced)
f_ant = antRj[j] * Fjnt[j]
f_ant = symo.mat_replace(f_ant, 'FDI', j)
if i != -1:
Fex[i] = Fex[i] + f_ant
Nex[i] = Nex[i] + \
(antRj[j] * Njnt[j]) + (tools.skew(antPj[j]) * f_ant)
def compute_beta(robo, symo, j, w, beta):
"""
Compute beta wrench which is a combination of coriolis forces,
centrifugal forces and external forces (internal function).
Note:
beta is the output parameter
"""
expr1 = robo.J[j] * w[j]
expr1 = symo.mat_replace(expr1, 'JW', j)
expr2 = tools.skew(w[j]) * expr1
expr2 = symo.mat_replace(expr2, 'KW', j)
expr3 = tools.skew(w[j]) * robo.MS[j]
expr4 = tools.skew(w[j]) * expr3
expr4 = symo.mat_replace(expr4, 'SW', j)
expr5 = -robo.Nex[j] - expr2
expr6 = -robo.Fex[j] - expr4
beta[j] = Matrix([expr6, expr5])
beta[j] = symo.mat_replace(beta[j], 'BETA', j)
def compute_gamma(robo, symo, j, antRj, antPj, w, wi, gamma):
"""
Compute gyroscopic acceleration (internal function).
Note:
gamma is the output parameter
"""
i = robo.ant[j]
expr1 = tools.skew(wi[j]) * Matrix([0, 0, robo.qdot[j]])
expr1 = symo.mat_replace(expr1, 'WQ', j)
expr2 = (1 - robo.sigma[j]) * expr1
expr3 = 2 * robo.sigma[j] * expr1
expr4 = tools.skew(w[i]) * antPj[j]
expr5 = tools.skew(w[i]) * expr4
expr6 = antRj[j].transpose() * expr5
expr7 = expr6 + expr3
expr7 = symo.mat_replace(expr7, 'LW', j)
gamma[j] = Matrix([expr7, expr2])
gamma[j] = symo.mat_replace(gamma[j], 'GYACC', j)
def compute_zeta(robo, symo, j, gamma, jaj, zeta, qddot=None):
"""
Compute relative acceleration (internal function).
Note:
zeta is the output parameter
"""
if qddot == None:
qddot = robo.qddot
expr = gamma[j] + (qddot[j] * jaj[j])
zeta[j] = symo.mat_replace(expr, 'ZETA', j)
def compute_composite_inertia(
robo, symo, j, antRj, antPj,
comp_inertia3, comp_ms, comp_mass, composite_inertia
):
"""
Compute composite inertia (internal function).
Note:
comp_inertia3, comp_ms, comp_mass, composite_inertia are the
output parameters.
"""
i = robo.ant[j]
# update inertia3, ms, mass from inertia in order to have the
# intermediate variables
comp_inertia3[i] = composite_inertia[i][3:, 3:]
comp_ms[i] = tools.skew2vec(composite_inertia[i][3:, 0:3])
comp_mass[i] = composite_inertia[i][0, 0]
comp_inertia3[j] = composite_inertia[j][3:, 3:]
comp_ms[j] = tools.skew2vec(composite_inertia[j][3:, 0:3])
comp_mass[j] = composite_inertia[j][0, 0]
# actual computation
i_ms_j_c = antRj[j] * comp_ms[j]
i_ms_j_c = symo.mat_replace(i_ms_j_c, 'AS', j)
expr1 = antRj[j] * comp_inertia3[j]
expr1 = symo.mat_replace(expr1, 'AJ', j)
expr2 = expr1 * antRj[j].transpose()
expr2 = symo.mat_replace(expr2, 'AJA', j)
expr3 = tools.skew(antPj[j]) * tools.skew(i_ms_j_c)
expr3 = symo.mat_replace(expr3, 'PAS', j)
i_comp_inertia3_j = expr2 - (expr3 + expr3.transpose()) + \
(comp_mass[j] * tools.skew(antPj[j]) * \
tools.skew(antPj[j]).transpose())
i_comp_inertia3_j = symo.mat_replace(i_comp_inertia3_j, 'JJI', j)
comp_inertia3[i] = comp_inertia3[i] + i_comp_inertia3_j
i_comp_ms_j = i_ms_j_c + (antPj[j] * comp_mass[j])
i_comp_ms_j = symo.mat_replace(i_comp_ms_j, 'MSJI', j)
comp_ms[i] = comp_ms[i] + i_comp_ms_j
i_comp_mass_j = symo.replace(comp_mass[j], 'MJI', j)
comp_mass[i] = comp_mass[i] + i_comp_mass_j
composite_inertia[i] = inertia_spatial(
comp_inertia3[i], comp_ms[i], comp_mass[i]
)
def compute_composite_beta(
robo, symo, j, jTant, zeta, composite_inertia, composite_beta
):
"""
Compute composite beta (internal function).
Note:
composite_beta is the output parameter
"""
i = robo.ant[j]
expr1 = composite_inertia[j] * zeta[j]
expr1 = symo.mat_replace(expr1, 'IZ', j)
expr2 = jTant[j].transpose() * expr1
expr2 = symo.mat_replace(expr2, 'SIZ', j)
expr3 = jTant[j].transpose() * composite_beta[j]
expr3 = symo.mat_replace(expr3, 'SBE', j)
composite_beta[i] = composite_beta[i] + expr3 - expr2
def replace_composite_terms(
symo, grandJ, beta, j, composite_inertia,
composite_beta, replace=False
):
"""
Replace composite inertia and beta (internal function).
Note:
composite_inertia are composite_beta are the output parameters
"""
forced = False
if replace and j == 0: forced = False
composite_inertia[j] = symo.mat_replace(
grandJ[j], 'MJE', j, symmet=True, forced=forced
)
composite_beta[j] = symo.mat_replace(
beta[j], 'VBE', j, forced=forced
)
def replace_star_terms(
symo, grandJ, beta, j, star_inertia, star_beta, replace=False
):
"""
Replace star inertia and beta (internal function).
Note:
star_inertia are star_beta are the output parameters
"""
forced = False
if replace and j == 0: forced = False
star_inertia[j] = symo.mat_replace(
grandJ[j], 'MJE', j, symmet=True, forced=forced
)
star_beta[j] = symo.mat_replace(beta[j], 'VBE', j, forced=forced)
def compute_composite_terms(
robo, symo, j, jTant, zeta,
composite_inertia, composite_beta
):
"""
Compute composite inertia and beta (internal function).
Note:
composite_inertia are composite_beta are the output parameters
"""
i = robo.ant[j]
expr1 = jTant[j].transpose() * composite_inertia[j]
expr1 = symo.mat_replace(expr1, 'GX', j)
expr2 = expr1 * jTant[j]
expr2 = symo.mat_replace(expr2, 'TKT', j, symmet=True)
expr3 = expr1 * zeta[j]
expr3 = symo.mat_replace(expr3, 'SIZ', j)
expr4 = jTant[j].transpose() * composite_beta[j]
expr4 = symo.mat_replace(expr4, 'SBE', j)
composite_inertia[i] = composite_inertia[i] + expr2
composite_beta[i] = composite_beta[i] + expr4 - expr3
def compute_hinv(
robo, symo, j, jaj, star_inertia, jah, h_inv, flex=False
):
"""
Note:
h_inv and jah are the output parameters
"""
inertia_jaj = star_inertia[j] * jaj[j]
inertia_jaj = symo.mat_replace(inertia_jaj, 'JA', j)
h = jaj[j].dot(inertia_jaj)
if not flex:
h = h + robo.IA[j]
h_inv[j] = 1 / h
h_inv[j] = symo.replace(h_inv[j], 'JD', j)
jah[j] = inertia_jaj * h_inv[j]
jah[j] = symo.mat_replace(jah[j], 'JU', j)
def compute_tau(robo, symo, j, jaj, star_beta, tau, flex=False):
"""
Note:
tau is the output parameter
"""
if robo.sigma[j] == 2:
tau[j] = 0
else:
if flex:
joint_friction = 0
else:
joint_friction = robo.fric_s(j) + robo.fric_v(j)
tau[j] = jaj[j].dot(star_beta[j]) + robo.GAM[j] - joint_friction
tau[j] = symo.replace(tau[j], 'GW', j)
def compute_star_terms(
robo, symo, j, jaj, jTant, gamma, tau,
h_inv, jah, star_inertia, star_beta, flex=False
):
"""
Note:
h_inv, jah, star_inertia, star_beta are the output parameters
"""
i = robo.ant[j]
inertia_jaj = star_inertia[j] * jaj[j]
inertia_jaj = symo.mat_replace(inertia_jaj, 'JA', j)
h = jaj[j].dot(inertia_jaj)
if not flex:
h = h + robo.IA[j]
if not flex or robo.eta[j]:
h_inv[j] = 1 / h
h_inv[j] = symo.replace(h_inv[j], 'JD', j)
jah[j] = inertia_jaj * h_inv[j]
jah[j] = symo.mat_replace(jah[j], 'JU', j)
k_inertia = star_inertia[j] - (jah[j] * inertia_jaj.transpose())
k_inertia = symo.mat_replace(k_inertia, 'GK', j)
else:
k_inertia = star_inertia[j]
expr1 = k_inertia * gamma[j]
expr1 = symo.mat_replace(expr1, 'NG', j)
if not flex or robo.eta[j]:
expr2 = expr1 + (jah[j] * tau[j])
else:
expr2 = expr1 + (star_inertia[j] * jaj[j] * robo.qddot[j])
expr2 = symo.mat_replace(expr2, 'VS', j)
alpha = expr2 - star_beta[j]
alpha = symo.mat_replace(alpha, 'AP', j)
expr3 = jTant[j].transpose() * k_inertia
expr3 = symo.mat_replace(expr3, 'GX', j)
expr4 = expr3 * jTant[j]
expr4 = symo.mat_replace(expr4, 'TKT', j, symmet=True)
expr5 = jTant[j].transpose() * alpha
expr5 = symo.mat_replace(expr5, 'ALJI', j)
star_inertia[i] = star_inertia[i] + expr4
star_beta[i] = star_beta[i] - expr5
def compute_joint_accel(
robo, symo, j, jaj, jTant, h_inv, jah, gamma,
tau, grandVp, star_beta, star_inertia, qddot
):
"""
Compute joint acceleration (internal function)
Note:
qddot is the output parameter
"""
i = robo.ant[j]
expr1 = (jTant[j] * grandVp[i]) + gamma[j]
expr1 = symo.mat_replace(expr1, 'VR', j)
expr2 = jah[j].dot(expr1)
expr2 = symo.replace(expr2, 'GU', j)
if robo.sigma[j] == 2:
qddot[j] = 0
else:
qddot[j] = (h_inv[j] * tau[j]) - expr2
qddot[j] = symo.replace(qddot[j], 'QDP', j, forced=True)
def compute_link_accel(robo, symo, j, jTant, zeta, grandVp):
"""
Compute link acceleration (internal function).
Note:
grandVp is the output parameter
"""
i = robo.ant[j]
grandVp[j] = (jTant[j] * grandVp[i]) + zeta[j]
grandVp[j][:3, 0] = symo.mat_replace(grandVp[j][:3, 0], 'VP', j)
grandVp[j][3:, 0] = symo.mat_replace(grandVp[j][3:, 0], 'WP', j)
def write_numerical_base_acc(symo, inertia, beta_wrench, symmet=False):
"""
Write the base acceleration (6x1) vector to be computed numerically
using numpy in the output file.
"""
# write strating comments
symo.write_line("# SOLVE NUMERICALLY FOR BASE ACCELERATION - START")
symo.write_line("# REQUIRES numpy")
# setup matrix numMJE0
symo.write_line("# setup numMJE0 matrix in numpy format")
symo.write_equation('numMJE0', 'numpy.zeros((6, 6))')
for i in xrange(inertia.rows):
for j in xrange(inertia.cols):
if inertia[i, j] != 0:
symo.write_equation(
'numMJE0[{row}, {col}]'.format(row=i, col=j),
str(inertia[i, j])
)
# setup matrix numVBE0
symo.write_line("# setup numVBE0 matrix in numpy format")
symo.write_equation('numVBE0', 'numpy.zeros((6, 1))')
for i in xrange(beta_wrench.rows):
if beta_wrench[i, 0] != 0:
symo.write_equation(
'numVBE0[{row}, 0]'.format(row=i),
str(beta_wrench[i, 0])
)
# numVP0 = numpy.linalg.solve(numMJE0, numVBE0)
symo.write_line("# compute solution")
symo.write_line("# In Matlab use")
symo.write_line("# numVP0 = numMJE0 \ numVBE0")
symo.write_equation(
'numVP0',
'numpy.linalg.solve(numMJE0, numVBE0)'
)
# assign elements of the computed solution vector
symo.write_line("# assign each element of the computed solution")
symo.write_line("# vector to be compatible with future computation")
for i in xrange(beta_wrench.rows):
idx = i + 1
vp_sym = 'VP{row}0'.format(row=idx)
if i > 2:
idx = idx - 3
vp_sym = 'WP{row}0'.format(row=idx)
symo.write_equation(vp_sym, 'numVP0[{row}, 0]'.format(row=i))
# write ending comments
symo.write_line("# SOLVE NUMERICALLY FOR BASE ACCELERATION - END")
def get_numerical_base_acc_out(base_acc):
"""
Return the base acceleration as formed by strings.
"""
base_acc = sympy.zeros(base_acc.rows, base_acc.cols)
for i in xrange(base_acc.rows):
idx = i + 1
vp_sym = 'VP{row}0'.format(row=idx)
if i > 2:
idx = idx - 3
vp_sym = 'WP{row}0'.format(row=idx)
base_acc[i, 0] = sympy.var(vp_sym)
return base_acc
def compute_base_accel(robo, symo, star_inertia, star_beta, grandVp):
"""
Compute base acceleration (internal function).
Note:
grandVp is the output parameter
"""
forced = False
grandVp[0] = Matrix([robo.vdot0 - robo.G, robo.w0])
if robo.is_floating:
symo.flushout()
write_numerical_base_acc(
symo, star_inertia[0], star_beta[0], symmet=True
)
grandVp[0] = get_numerical_base_acc_out(grandVp[0])
grandVp[0][:3, 0] = symo.mat_replace(
grandVp[0][:3, 0], 'VP', 0, forced=forced
)
grandVp[0][3:, 0] = symo.mat_replace(
grandVp[0][3:, 0], 'WP', 0, forced=forced
)
def compute_base_accel_composite(
robo, symo, composite_inertia, composite_beta, grandVp
):
"""
Compute base acceleration when using composite inertia matrix
(internal function).
Note:
grandVp is the output parameter
"""
forced = False
grandVp[0] = Matrix([robo.vdot0 - robo.G, robo.w0])
if robo.is_floating:
symo.flushout()
write_numerical_base_acc(
symo, composite_inertia[0], composite_beta[0], symmet=True
)
grandVp[0] = get_numerical_base_acc_out(grandVp[0])
grandVp[0][:3, 0] = symo.mat_replace(
grandVp[0][:3, 0], 'VP', 0, forced=forced
)
grandVp[0][3:, 0] = symo.mat_replace(
grandVp[0][3:, 0], 'WP', 0, forced=forced
)
def compute_reaction_wrench(
robo, symo, j, grandVp, inertia, beta_wrench, react_wrench
):
"""
Compute reaction wrench (internal function).
Note:
react_wrench is the output parameter
"""
expr = inertia[j] * grandVp[j]
expr = symo.mat_replace(expr, 'DY', j)
wrench = expr - beta_wrench[j]
react_wrench[j][:3, 0] = symo.mat_replace(wrench[:3, 0], 'E', j)
react_wrench[j][3:, 0] = symo.mat_replace(wrench[3:, 0], 'N', j)
def fixed_inverse_dynmodel(robo, symo):
"""
Compute the Inverse Dynamic Model using Newton-Euler algorithm for
tree structure robots with fixed base.
Parameters:
robo: Robot - instance of robot description container
symo: symbolmgr.SymbolManager - instance of symbolic manager
"""
# init external forces
Fex = copy(robo.Fex)
Nex = copy(robo.Nex)
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
# init velocities and accelerations
w, wdot, vdot, U = compute_vel_acc(robo, symo, antRj, antPj)
# init forces vectors
F = ParamsInit.init_vec(robo)
N = ParamsInit.init_vec(robo)
Fjnt = ParamsInit.init_vec(robo)
Njnt = ParamsInit.init_vec(robo)
# init torque list
torque = ParamsInit.init_scalar(robo)
for j in xrange(1, robo.NL):
compute_dynamic_wrench(robo, symo, j, w, wdot, U, vdot, F, N)
for j in reversed(xrange(1, robo.NL)):
compute_joint_wrench(
robo, symo, j, antRj, antPj, vdot,
F, N, Fjnt, Njnt, Fex, Nex
)
for j in xrange(1, robo.NL):
compute_joint_torque(robo, symo, j, Fjnt, Njnt, torque)
def mobile_inverse_dynmodel(robo, symo):
"""
Compute the Inverse Dynamic Model using Newton-Euler algorithm for
mobile robots.
Parameters:
robo: Robot - instance of robot description container
symo: symbolmgr.SymbolManager - instance of symbol manager
"""
# init external forces
Fex = copy(robo.Fex)
Nex = copy(robo.Nex)
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
# init velocities and accelerations
w, wdot, vdot, U = compute_vel_acc(robo, symo, antRj, antPj)
# init forces vectors
F = ParamsInit.init_vec(robo)
N = ParamsInit.init_vec(robo)
Fjnt = ParamsInit.init_vec(robo)
Njnt = ParamsInit.init_vec(robo)
# init torque list
torque = ParamsInit.init_scalar(robo)
for j in xrange(0, robo.NL):
compute_dynamic_wrench(robo, symo, j, w, wdot, U, vdot, F, N)
for j in reversed(xrange(0, robo.NL)):
compute_joint_wrench(
robo, symo, j, antRj, antPj, vdot,
F, N, Fjnt, Njnt, Fex, Nex
)
for j in xrange(1, robo.NL):
compute_joint_torque(robo, symo, j, Fjnt, Njnt, torque)
def composite_inverse_dynmodel(robo, symo):
"""
Compute the Inverse Dynamic Model using Composite link Newton-Euler
algorithm for tree structure robots with fixed and floating base.
Parameters:
robo: Robot - instance of robot description container
symo: symbolmgr.SymbolManager - instance of symbol manager
"""
# antecedent angular velocity, projected into jth frame
# j^omega_i
wi = ParamsInit.init_vec(robo)
# j^omega_j
w = ParamsInit.init_w(robo)
# j^a_j -- joint axis in screw form
jaj = ParamsInit.init_vec(robo, 6)
# Twist transform list of Matrices 6x6
grandJ = ParamsInit.init_mat(robo, 6)
jTant = ParamsInit.init_mat(robo, 6)
gamma = ParamsInit.init_vec(robo, 6)
beta = ParamsInit.init_vec(robo, 6)
zeta = ParamsInit.init_vec(robo, 6)
composite_inertia = ParamsInit.init_mat(robo, 6)
composite_beta = ParamsInit.init_vec(robo, 6)
comp_inertia3, comp_ms, comp_mass = ParamsInit.init_jplus(robo)
grandVp = ParamsInit.init_vec(robo, 6)
react_wrench = ParamsInit.init_vec(robo, 6)
torque = ParamsInit.init_scalar(robo)
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
# first forward recursion
for j in xrange(1, robo.NL):
# compute spatial inertia matrix for use in backward recursion
grandJ[j] = inertia_spatial(robo.J[j], robo.MS[j], robo.M[j])
# set jaj vector
if robo.sigma[j] == 0:
jaj[j] = Matrix([0, 0, 0, 0, 0, 1])
elif robo.sigma[j] == 1:
jaj[j] = Matrix([0, 0, 1, 0, 0, 0])
# compute j^omega_j and j^omega_i
compute_omega(robo, symo, j, antRj, w, wi)
# compute j^S_i : screw transformation matrix
compute_screw_transform(robo, symo, j, antRj, antPj, jTant)
# first forward recursion (still)
for j in xrange(1, robo.NL):
# compute j^gamma_j : gyroscopic acceleration (6x1)
compute_gamma(robo, symo, j, antRj, antPj, w, wi, gamma)
# compute j^beta_j : external+coriolis+centrifugal wrench (6x1)
compute_beta(robo, symo, j, w, beta)
# compute j^zeta_j : relative acceleration (6x1)
compute_zeta(robo, symo, j, gamma, jaj, zeta)
# first backward recursion - initialisation step
for j in reversed(xrange(0, robo.NL)):
if j == 0:
# compute spatial inertia matrix for base
grandJ[j] = inertia_spatial(robo.J[j], robo.MS[j], robo.M[j])
# compute 0^beta_0
compute_beta(robo, symo, j, w, beta)
replace_composite_terms(
symo, grandJ, beta, j, composite_inertia, composite_beta
)
# second backward recursion - compute composite term
for j in reversed(xrange(0, robo.NL)):
replace_composite_terms(
symo, composite_inertia, composite_beta, j,
composite_inertia, composite_beta, replace=True
)
if j == 0:
continue
compute_composite_inertia(
robo, symo, j, antRj, antPj,
comp_inertia3, comp_ms, comp_mass, composite_inertia
)
compute_composite_beta(
robo, symo, j, jTant, zeta, composite_inertia, composite_beta
)
# compute base acceleration : this returns the correct value for
# fixed base and floating base robots
compute_base_accel_composite(
robo, symo, composite_inertia, composite_beta, grandVp
)
# second forward recursion
for j in xrange(1, robo.NL):
# compute j^Vdot_j : link acceleration
compute_link_accel(robo, symo, j, jTant, zeta, grandVp)
# compute j^F_j : reaction wrench
compute_reaction_wrench(
robo, symo, j, grandVp,
composite_inertia, composite_beta, react_wrench
)
# second forward recursion still - to make the output pretty
for j in xrange(1, robo.NL):
# compute torque
compute_torque(robo, symo, j, jaj, react_wrench, torque)
def flexible_inverse_dynmodel(robo, symo):
"""
Compute the Inverse Dynamic Model using Newton-Euler algorithm for
robots with flexible joints (fixed and floating base).
Parameters:
robo: Robot - instance of robot description container
symo: symbolmgr.SymbolManager - instance of symbol manager
"""
# antecedent angular velocity, projected into jth frame
# j^omega_i
wi = ParamsInit.init_vec(robo)
# j^omega_j
w = ParamsInit.init_w(robo)
# j^a_j -- joint axis in screw form
jaj = ParamsInit.init_vec(robo, 6)
# Twist transform list of Matrices 6x6
grandJ = ParamsInit.init_mat(robo, 6)
jTant = ParamsInit.init_mat(robo, 6)
gamma = ParamsInit.init_vec(robo, 6)
beta = ParamsInit.init_vec(robo, 6)
zeta = ParamsInit.init_vec(robo, 6)
h_inv = ParamsInit.init_scalar(robo)
jah = ParamsInit.init_vec(robo, 6) # Jj*aj*Hinv_j
tau = ParamsInit.init_scalar(robo)
star_inertia = ParamsInit.init_mat(robo, 6)
star_beta = ParamsInit.init_vec(robo, 6)
comp_inertia3, comp_ms, comp_mass = ParamsInit.init_jplus(robo)
qddot = ParamsInit.init_scalar(robo)
grandVp = ParamsInit.init_vec(robo, 6)
react_wrench = ParamsInit.init_vec(robo, 6)
torque = ParamsInit.init_scalar(robo)
# flag variables
use_composite = True
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
# first forward recursion
for j in xrange(1, robo.NL):
# compute spatial inertia matrix for use in backward recursion
grandJ[j] = inertia_spatial(robo.J[j], robo.MS[j], robo.M[j])
# set jaj vector
if robo.sigma[j] == 0:
jaj[j] = Matrix([0, 0, 0, 0, 0, 1])
elif robo.sigma[j] == 1:
jaj[j] = Matrix([0, 0, 1, 0, 0, 0])
# compute j^omega_j and j^omega_i
compute_omega(robo, symo, j, antRj, w, wi)
# compute j^S_i : screw transformation matrix
compute_screw_transform(robo, symo, j, antRj, antPj, jTant)
# compute j^gamma_j : gyroscopic acceleration (6x1)
compute_gamma(robo, symo, j, antRj, antPj, w, wi, gamma)
# compute j^beta_j : external+coriolis+centrifugal wrench (6x1)
compute_beta(robo, symo, j, w, beta)
if not robo.eta[j]:
# when rigid
# compute j^zeta_j : relative acceleration (6x1)
compute_zeta(robo, symo, j, gamma, jaj, zeta)
# decide first link
first_link = 0 if robo.is_floating else 1
# first backward recursion - initialisation step
for j in reversed(xrange(first_link, robo.NL)):
if j == first_link and robo.is_floating:
# compute spatial inertia matrix for base
grandJ[j] = inertia_spatial(robo.J[j], robo.MS[j], robo.M[j])
# compute 0^beta_0
compute_beta(robo, symo, j, w, beta)
replace_star_terms(
symo, grandJ, beta, j, star_inertia, star_beta
)
# second backward recursion - compute star terms
for j in reversed(xrange(first_link, robo.NL)):
replace_star_terms(
symo, star_inertia, star_beta, j,
star_inertia, star_beta
)
if j == first_link:
continue
# set composite flag to false when flexible
if robo.eta[j]: use_composite = False
if use_composite:
# use composite
compute_composite_inertia(
robo, symo, j, antRj, antPj,
comp_inertia3, comp_ms, comp_mass, star_inertia
)
compute_composite_beta(
robo, symo, j, jTant, zeta, star_inertia, star_beta
)
else:
# use star
if robo.eta[j]:
compute_tau(
robo, symo, j, jaj, star_beta, tau, flex=True
)
compute_star_terms(
robo, symo, j, jaj, jTant, gamma, tau,
h_inv, jah, star_inertia, star_beta, flex=True
)
# compute base acceleration : this returns the correct value for
# fixed base and floating base robots
compute_base_accel(
robo, symo, star_inertia, star_beta, grandVp
)
# second forward recursion
for j in xrange(1, robo.NL):
if robo.eta[j]:
# when flexible
# compute qddot_j : joint acceleration
compute_joint_accel(
robo, symo, j, jaj, jTant, h_inv, jah, gamma,
tau, grandVp, star_beta, star_inertia, qddot
)
# compute j^zeta_j : relative acceleration (6x1)
compute_zeta(robo, symo, j, gamma, jaj, zeta, qddot)
# compute j^Vdot_j : link acceleration
compute_link_accel(robo, symo, j, jTant, zeta, grandVp)
# compute j^F_j : reaction wrench
compute_reaction_wrench(
robo, symo, j, grandVp,
star_inertia, star_beta, react_wrench
)
if not robo.eta[j]:
# when rigid compute torque
compute_torque(robo, symo, j, jaj, react_wrench, torque)
def direct_dynmodel(robo, symo):
"""
Compute the Direct Dynamic Model using Newton-Euler algorithm for
robots with floating and fixed base.
Parameters:
robo: Robot - instance of robot description container
symo: symbolmgr.SymbolManager - instance of symbol manager
"""
# antecedent angular velocity, projected into jth frame
# j^omega_i
wi = ParamsInit.init_vec(robo)
# j^omega_j
w = ParamsInit.init_w(robo)
# j^a_j -- joint axis in screw form
jaj = ParamsInit.init_vec(robo, 6)
# Twist transform list of Matrices 6x6
grandJ = ParamsInit.init_mat(robo, 6)
jTant = ParamsInit.init_mat(robo, 6)
gamma = ParamsInit.init_vec(robo, 6)
beta = ParamsInit.init_vec(robo, 6)
zeta = ParamsInit.init_vec(robo, 6)
h_inv = ParamsInit.init_scalar(robo)
jah = ParamsInit.init_vec(robo, 6) # Jj*aj*Hinv_j
tau = ParamsInit.init_scalar(robo)
star_inertia = ParamsInit.init_mat(robo, 6)
star_beta = ParamsInit.init_vec(robo, 6)
qddot = ParamsInit.init_scalar(robo)
grandVp = ParamsInit.init_vec(robo, 6)
react_wrench = ParamsInit.init_vec(robo, 6)
torque = ParamsInit.init_scalar(robo)
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
# first forward recursion
for j in xrange(1, robo.NL):
# compute spatial inertia matrix for use in backward recursion
grandJ[j] = inertia_spatial(robo.J[j], robo.MS[j], robo.M[j])
# set jaj vector
if robo.sigma[j] == 0:
jaj[j] = Matrix([0, 0, 0, 0, 0, 1])
elif robo.sigma[j] == 1:
jaj[j] = Matrix([0, 0, 1, 0, 0, 0])
# compute j^omega_j and j^omega_i
compute_omega(robo, symo, j, antRj, w, wi)
# compute j^S_i : screw transformation matrix
compute_screw_transform(robo, symo, j, antRj, antPj, jTant)
# compute j^gamma_j : gyroscopic acceleration (6x1)
compute_gamma(robo, symo, j, antRj, antPj, w, wi, gamma)
# compute j^beta_j : external+coriolis+centrifugal wrench (6x1)
compute_beta(robo, symo, j, w, beta)
# decide first link
first_link = 0 if robo.is_floating else 1
# first backward recursion - initialisation step
for j in reversed(xrange(first_link, robo.NL)):
if j == first_link and robo.is_floating:
# compute spatial inertia matrix for base
grandJ[j] = inertia_spatial(robo.J[j], robo.MS[j], robo.M[j])
# compute 0^beta_0
compute_beta(robo, symo, j, w, beta)
replace_star_terms(
symo, grandJ, beta, j, star_inertia, star_beta
)
# second backward recursion - compute star terms
for j in reversed(xrange(first_link, robo.NL)):
replace_star_terms(
symo, star_inertia, star_beta, j,
star_inertia, star_beta, replace=True
)
if j == 0:
continue
compute_tau(robo, symo, j, jaj, star_beta, tau)
compute_star_terms(
robo, symo, j, jaj, jTant, gamma, tau,
h_inv, jah, star_inertia, star_beta
)
if j == first_link:
continue
# compute base acceleration : this returns the correct value for
# fixed base and floating base robots
compute_base_accel(
robo, symo, star_inertia, star_beta, grandVp
)
# second forward recursion
for j in xrange(1, robo.NL):
# compute qddot_j : joint acceleration
compute_joint_accel(
robo, symo, j, jaj, jTant, h_inv, jah, gamma,
tau, grandVp, star_beta, star_inertia, qddot
)
# compute j^zeta_j : relative acceleration (6x1)
compute_zeta(robo, symo, j, gamma, jaj, zeta, qddot)
# compute j^Vdot_j : link acceleration
compute_link_accel(robo, symo, j, jTant, zeta, grandVp)
# compute j^F_j : reaction wrench
compute_reaction_wrench(
robo, symo, j, grandVp,
star_inertia, star_beta, react_wrench
)
|
|
#!/usr/bin/env python3
# jsontotable5.py
# Written December, 2015. Rewritten June 2017.
# Ted Underwood.
# This translates the jsons produced by
# David Bamman's BookNLP pipeline into a simpler text file.
# As the telltale 5 implies, this is the last in a series
# of versions. This version is designed to work only with
# HathiTrust data. Earlier versions also used Chicago.
# It unpacks each json into a string of words
# separated by spaces, folding grammatical roles together except
# that passive verbs get prefixed by "was-" and dialogue
# gets prefixed by "said-". My observation is that in practice
# mod and poss are pretty legible from the part of speech.
# usage python jsontotable5.py -folder infolder outfile
# or
# python jsontotable5.py -jsonl infile outfile
#
# the first expects separate volume jsons in a folder
# the second expects a JSON-lines file with one volume
# json per line
import pandas as pd
import ujson, csv, sys, os
# date functions taken from https://github.com/tedunderwood/library/blob/master/SonicScrewdriver.py
def infer_date(datetype, firstdate, seconddate, textdate):
'''Receives a date type and three dates, as strings, with no guarantee that any
of the dates will be numeric. The logic of the data here is defined by
MARC standards for controlfield 008:
http://www.loc.gov/marc/bibliographic/concise/bd008a.html
Returns a date that represents either a shaky consensus
about the earliest attested date for this item, or 0, indicating no
consensus.
'''
try:
intdate = int(firstdate)
except:
# No readable date
if firstdate.endswith('uu'):
# Two missing places is too many.
intdate = 0
elif firstdate.endswith('u'):
# but one is okay
try:
decade = int(firstdate[0:3])
intdate = decade * 10
except:
# something's weird. fail.
intdate = 0
else:
intdate = 0
if intdate == 0:
try:
intdate = int(textdate)
except:
intdate = 0
try:
intsecond = int(seconddate)
except:
intsecond = 0
if intsecond - intdate > 80 and intsecond < 2100:
# A gap of more than eighty years is too much.
# This is usually an estimated date that could be anywhere within
# the nineteenth century.
# note that we specify intsecond < 2100 because otherwise things
# dated 9999 throw an error
intdate = 0
if datetype == 't' and intsecond > 0 and intsecond < intdate:
intdate = intsecond
# This is a case where we have both a publication date and
# a copyright date. Accept the copyright date. We're going
# for 'the earliest attested date for the item.'
if intdate < 1000 and intsecond > 1700 and intsecond < 2100:
intdate = intsecond
return intdate
def date_row(row):
datetype = row["datetype"]
firstdate = row["startdate"]
secondate = row["enddate"]
if "imprintdate" in row:
textdate = row["imprintdate"]
else:
textdate = row["textdate"]
intdate = infer_date(datetype, firstdate, secondate, textdate)
return intdate
##bio_df = pd.read_csv('./biographies/metadata/hathi_ic_biog.tsv', sep='\t')
##
### add infer_date to the df
##bio_dict = bio_df.to_dict(orient='records')
##for row in bio_dict:
## row['inferreddate'] = date_row(row)
##
##new_bioindex = pd.DataFrame()
##new_bioindex = new_bioindex.from_records(bio_dict)
##
##new_bioindex.to_csv('hathi_ic_date.tsv', index=False, sep='\t')
##
def add_dicts_to_list(alistofdicts, alist, prefix):
global variants
for word in alistofdicts:
wordval = word["w"].lower()
if wordval in variants:
wordval = variants[wordval]
if len(prefix) > 1:
wordval = prefix + '-' + wordval
alist.append(wordval)
return alist
outlist = list()
counter = 0
usedalready = set()
# GLOBAL VARIABLES below
# Python doesn't make us declare globals, but I wish it did.
unknowndate = 0 # GLOBAL counter for number of characters without dates
thingsunknown = set()
id2date = dict() # GLOBAL dict translating docids to dates
# One of the main functions of this transformation is to pair each
# character with a publication date. That will allow us to easily
# select subsets of characters
with open('../metadata/pre23biometa.tsv', encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
# there are two different forms of id volumes can have,
# because 19c stories are often multi-volume
# we're going to treat them all as valid
#docid = row['docid']
#recordid = row['recordid']
htid = row['HTid']
if row['inferreddate'] != '':
inferreddate = int(row['inferreddate'].split('.')[0])
id2date[htid] = inferreddate
#id2date[docid] = inferreddate
#id2date[recordid] = inferreddate
variants = dict() # GLOBAL dictionary translating US -> UK spelling.
# We don't want classification accuracy to be thrown off by deviant American spellings;
# the VariantSpellings file will normalise them all to a standard centred on the UK.
with open('../lexicons/VariantSpellings.txt', encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
if len(fields) < 2:
continue
else:
variants[fields[0]] = fields[1]
stopwords = set() # GLOBAL list of stopwords only used to filter dialogue
stoppath = '../lexicons/stopwords.txt'
with open(stoppath, encoding = 'utf-8') as f:
for line in f:
stopwords.add(line.lower().strip())
morestops = {'just', 'right', 'mr', 'mr.', "n't", "ca"}
for stop in morestops:
stopwords.add(stop)
def append_characters(jsonstring, outfile, expectedid):
''' Given a single json string produced by BookNLP, this extracts the
characters, and prints them in simpler format to outfile.
jsonstring: what we will parse
outfile: where it gets written
expectedid: a docid implied by the filename where we got jsonstring
'''
global id2date, unknowndate, stopwords, thingsunknown
jsonobject = ujson.loads(jsonstring)
storyid = jsonobject["id"]
if storyid in id2date:
date = id2date[storyid]
docid = storyid
elif expectedid in id2date:
date = id2date[expectedid]
docid = expectedid
else:
unknowndate += 1
thingsunknown.add(expectedid)
date = -1
return 0
# I'm not writing books I can't date
characterlist = jsonobject["characters"]
usedalready = set()
# just to confirm that there are no duplicate names
# within a single book
outlist = [] # gather lines to write to file
writtenchars = 0 # num actually written may not == num in characterlist
for character in characterlist:
# what is this character's name?
# take the most common name
names = character["names"]
maxcount = 0
thename = "nobody"
for name in names:
if name["c"] > maxcount:
maxcount = name["c"]
thename = name["n"].replace(" ", "")
namestring = thename
while namestring in usedalready:
namestring = namestring + "*"
usedalready.add(namestring)
gender = character["g"]
if gender == 1:
genderstring = "f"
elif gender == 2:
genderstring = "m"
else:
genderstring = 'u'
thesewords = [] # gathering all words for this character
thesewords = add_dicts_to_list(character["agent"], thesewords, '')
thesewords = add_dicts_to_list(character["poss"], thesewords, '')
thesewords = add_dicts_to_list(character["mod"], thesewords, '')
thesewords = add_dicts_to_list(character["patient"], thesewords, 'was')
for spoken in character["speaking"]:
wlist = spoken['w'].lower().split()
for w in wlist:
if w in stopwords:
continue
if len(w) > 0 and w[0].isalpha():
word = 'said-' + w
thesewords.append(word)
if len(thesewords) > 10:
# we only write characters that have more than ten
# words associated
writtenchars += 1
textstring = ' '.join(thesewords)
outline = '\t'.join([docid, namestring, docid + '|' + namestring, genderstring, str(date), textstring])
outlist.append(outline)
with open(outfile, mode="a", encoding="utf-8") as outfile:
for line in outlist:
outfile.write(line + '\n')
return writtenchars
## MAIN EXECUTION BEGINS HERE
arguments = sys.argv
datasource = arguments[2]
outfile = arguments[3]
command = arguments[1]
# Whichever command / data source is being used,
# we need to create a header for the outfile if
# one does not yet exist.
if os.path.isfile(outfile):
print('Hey, you know ' + outfile)
print('already exists, right? Just confirming.')
print('I will append to it.')
else:
with open(outfile, mode = 'w', encoding = 'utf-8') as f:
f.write('docid\tcharname\tcharid\tgender\tpubdate\twords\n')
# tab-separated, five fields
ctr = 0
totalchars = 0
if command == '-folder':
assert os.path.isdir(datasource)
# otherwise we have an error
sourcefiles = [x for x in os.listdir(datasource) if x.endswith('.book')]
# the data files produced by BookNLP all end with '.book'
for sf in sourcefiles:
path = os.path.join(datasource, sf)
with open(path, encoding = 'utf-8') as f:
jsonstring = f.read()
expectedid = sf.replace('.book', '')
totalchars += append_characters(jsonstring, outfile, expectedid)
ctr += 1
if ctr % 100 == 1:
print(ctr)
elif command == '-jsonl':
assert os.path.isfile(datasource)
# otherwise we have an error
with open(datasource, encoding = 'utf-8') as f:
for jsonstring in f:
totalchars += append_characters(jsonstring, outfile, 'dummystring')
ctr += 1
if ctr % 100 == 1:
print(ctr)
else:
print("Usage for jsontotable5 is either:")
print("python jsontotable5.py -folder infolder outfile")
print("or")
print("python jsontotable5.py -jsonl infile outfile")
## DONE.
print('Unknown dates: ' + str(unknowndate))
|
|
import logging
import re
from google.appengine.api import mail
from twilio import twiml
import webapp2
# Update these values as appropriate
# These are the different number that the caller can choose from.
# The array corresponds to the number that the caller enters on
# there keypad. If there is only one number in the list the
# OPTION_MESSAGE is not played and the caller is directly connected
# to the only outgoing number.
OUTGOING_NUMBERS = ["sip:*[email protected]", "+18005558355"]
# This is the message that a caller will here.
OPTION_MESSAGE = "To use SIP, press 0. To use D.I.D., press 1. Or, please hold a moment."
# Notification email is sent to the configured email address(es). If
# want to send to more than one email address, separate the address
# with a comma and space, like this: "[email protected], [email protected]"
VOICEMAIL_EMAIL_NOTIFICATION = "[email protected]"
# Source email address for the voicemail notification email.
VOICEMAIL_EMAIL_NOTIFICATION_SENDER = "[email protected]"
# constants
SIP_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
class Utility():
@staticmethod
def is_sip(phone):
"""Determine if the phone number is a SIP number.
Args:
phone: The phone number to check.
Returns:
True if the phone number is a SIP number, false otherwise.
"""
return SIP_REGEX.match(phone)
class IncomingCall(webapp2.RequestHandler):
def post(self):
r = twiml.Response()
logging.debug('IncomingCall, CallSid[{}],AccountSid[{}],From[{}],To[{}],CallStatus[{}],ApiVersion[{}],Direction[{}],ForwardedFrom[{}],FromCity[{}],FromState[{}],FromZip[{}],FromCountry[{}],ToCity[{}],ToState[{}],ToZip[{}],ToCountry[{}]'.format(
self.request.get('CallSid'),
self.request.get('AccountSid'),
self.request.get('From'),
self.request.get('To'),
self.request.get('CallStatus'),
self.request.get('ApiVersion'),
self.request.get('Direction'),
self.request.get('ForwardedFrom'),
self.request.get('FromCity'),
self.request.get('FromState'),
self.request.get('FromZip'),
self.request.get('FromCountry'),
self.request.get('ToCity'),
self.request.get('ToState'),
self.request.get('ToZip'),
self.request.get('ToCountry')))
to = self.request.get('To')
logging.debug('IncomingCall, to: %s', to)
if to:
if OUTGOING_NUMBERS is None:
r.say('There is no configured treatment for the called number: {number}.'.format(number=to))
else:
# determine if there are enough numbers to provide option treatment
if (len(OUTGOING_NUMBERS) > 1):
# option treatment is the only current treatment
with r.gather(action=webapp2.uri_for('choice-selection'), timeout=30, numDigits=1) as g:
g.say(OPTION_MESSAGE)
dial = r.dial(action=webapp2.uri_for('call-end'), timeout=30)
if (Utility.is_sip(OUTGOING_NUMBERS[0])):
dial.sip(OUTGOING_NUMBERS[0])
else:
dial.number(OUTGOING_NUMBERS[0])
else:
r.say('There was no indicated called number.')
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
class ChoiceSelection(webapp2.RequestHandler):
def post(self):
r = twiml.Response()
logging.debug('IncomingCall, CallSid[{}],AccountSid[{}],From[{}],To[{}],CallStatus[{}],ApiVersion[{}],Direction[{}],ForwardedFrom[{}],FromCity[{}],FromState[{}],FromZip[{}],FromCountry[{}],ToCity[{}],ToState[{}],ToZip[{}],ToCountry[{}],Digits[{}]'.format(
self.request.get('CallSid'),
self.request.get('AccountSid'),
self.request.get('From'),
self.request.get('To'),
self.request.get('CallStatus'),
self.request.get('ApiVersion'),
self.request.get('Direction'),
self.request.get('ForwardedFrom'),
self.request.get('FromCity'),
self.request.get('FromState'),
self.request.get('FromZip'),
self.request.get('FromCountry'),
self.request.get('ToCity'),
self.request.get('ToState'),
self.request.get('ToZip'),
self.request.get('ToCountry'),
self.request.get('Digits')))
digits = int(self.request.get('Digits'))
logging.debug('ChoiceSelection, digits: %s', digits)
dial = r.dial(action=webapp2.uri_for('call-end'), timeout=30)
if OUTGOING_NUMBERS[digits]:
if (Utility.is_sip(OUTGOING_NUMBERS[digits])):
dial.sip(OUTGOING_NUMBERS[digits])
else:
dial.number(OUTGOING_NUMBERS[digits])
else:
# no phone number for that choice, send to default
if (Utility.is_sip(OUTGOING_NUMBERS[0])):
dial.sip(OUTGOING_NUMBERS[0])
else:
dial.number(OUTGOING_NUMBERS[0])
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
class CallEnd(webapp2.RequestHandler):
def post(self):
r = twiml.Response()
logging.debug('CallEnd, CallSid[{}],AccountSid[{}],From[{}],To[{}],CallStatus[{}],ApiVersion[{}],Direction[{}],ForwardedFrom[{}],FromCity[{}],FromState[{}],FromZip[{}],FromCountry[{}],ToCity[{}],ToState[{}],ToZip[{}],ToCountry[{}],DialCallStatus[{}],DialCallSid[{}],DialCallDuration[{}]'.format(
self.request.get('CallSid'),
self.request.get('AccountSid'),
self.request.get('From'),
self.request.get('To'),
self.request.get('CallStatus'),
self.request.get('ApiVersion'),
self.request.get('Direction'),
self.request.get('ForwardedFrom'),
self.request.get('FromCity'),
self.request.get('FromState'),
self.request.get('FromZip'),
self.request.get('FromCountry'),
self.request.get('ToCity'),
self.request.get('ToState'),
self.request.get('ToZip'),
self.request.get('ToCountry'),
self.request.get('DialCallStatus'),
self.request.get('DialCallSid'),
self.request.get('DialCallDuration')))
dial_call_status = self.request.get('DialCallStatus')
if dial_call_status != 'completed':
r.say('You have reached my telco. Please leave a message after the tone.')
r.record(action=webapp2.uri_for('voicemail'), )
r.say('I did not receive a recording')
else:
# TODO: put call record
pass
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
class Voicemail(webapp2.RequestHandler):
def post(self):
r = twiml.Response()
logging.debug('IncomingCall, CallSid[{}],AccountSid[{}],From[{}],To[{}],CallStatus[{}],ApiVersion[{}],Direction[{}],ForwardedFrom[{}],FromCity[{}],FromState[{}],FromZip[{}],FromCountry[{}],ToCity[{}],ToState[{}],ToZip[{}],ToCountry[{}],RecordingUrl[{}],RecordingDuration[{}],Digits[{}]'.format(
self.request.get('CallSid'),
self.request.get('AccountSid'),
self.request.get('From'),
self.request.get('To'),
self.request.get('CallStatus'),
self.request.get('ApiVersion'),
self.request.get('Direction'),
self.request.get('ForwardedFrom'),
self.request.get('FromCity'),
self.request.get('FromState'),
self.request.get('FromZip'),
self.request.get('FromCountry'),
self.request.get('ToCity'),
self.request.get('ToState'),
self.request.get('ToZip'),
self.request.get('ToCountry'),
self.request.get('RecordingUrl'),
self.request.get('RecordingDuration'),
self.request.get('Digits')))
# send notification
if ((VOICEMAIL_EMAIL_NOTIFICATION is not None) and (VOICEMAIL_EMAIL_NOTIFICATION)):
mail.send_mail(sender="myTelco <{}>".format(VOICEMAIL_EMAIL_NOTIFICATION_SENDER),
to=VOICEMAIL_EMAIL_NOTIFICATION,
subject="myTelco New Voicemail",
body="""
You have a new voicemail message from {}:
{}.mp3
""".format(self.request.get('From'),
self.request.get('RecordingUrl')))
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
application = webapp2.WSGIApplication([
webapp2.Route('/twiml/incomingCall', handler=IncomingCall, name='incoming-call'),
webapp2.Route('/twiml/choiceSelection', handler=ChoiceSelection, name='choice-selection'),
webapp2.Route('/twiml/callEnd', handler=CallEnd, name='call-end'),
webapp2.Route('/twiml/voicemail', handler=Voicemail, name='voicemail'),
], debug=True)
|
|
import Bybop_NetworkAL
import struct
import threading
class NetworkStatus:
OK = 0
ERROR = 1
TIMEOUT = 2
class Network(object):
"""
Simple implementation of the ARNetwork protocol.
This implementation does not support intenal fifos. If multiple threads
tries to send data on the same buffer at the same time, the actual send
order is undefined.
The 'send_data' call is blocking to allow simpler implementation, but is
not doing busy waiting so it can be called from a thread without locking
the GIL in python implementations that use one.
This implementation use a listener to warn the application of newly
received data. The listener should implement a 'data_received' function
accepting the following arguments:
- buf : The buffer on which this data was retrieved
- recv_data : The actual data, as a packed string (use the struct module
to unpack)
And a 'did_disconnect' function, without arguments, which will be called
if the product does not send any data on the network (probably because we
lost the network link, or because the product has run out of battery)
"""
def __init__(self, ip, c2d_port, d2c_port,
send_buffers, recv_buffers, listener):
"""
Create a new instance of ARNetwork.
The instance will manage internally its ARNetworkAL backend.
Arguments:
- ip (string) : The device address
- c2d_port : The remove reading port
- d2c_port : The local reading port
- send_buffers : List of buffers which should accept data from the
application (i.e. which will be given to the send_data
function)
- recv_buffers : List of buffers which should accept incoming data
"""
self._netal = Bybop_NetworkAL.NetworkAL(ip, c2d_port, d2c_port, self)
self._listener = listener
# The application writed to these (send to network)
self._send_buffers = list(send_buffers)
# The application reads from these (read from network)
self._recv_buffers = list(recv_buffers)
self._send_seq = {}
self._recv_seq = {}
self._ack_events = {}
self._ack_seq = {}
self._buf_locks = {}
self._ack_events_lock = threading.Lock()
for sndb in self._send_buffers:
self._send_seq[sndb] = 0
self._buf_locks[sndb] = threading.Lock()
self._ack_events[sndb] = threading.Event()
self._ack_seq[sndb] = 0
for rcvb in self._recv_buffers:
self._recv_seq[rcvb] = 255
def stop(self):
"""
Stop the ARNetwork instance.
This also stops the ARNetworkAL backend.
This function has no effect on a stopped instance.
"""
self._netal.stop()
def restart(self):
"""
Restart the ARNetwork instance.
This also restarts the ARNetworkAL backend.
This function has no effect on a started instance.
"""
self._netal.start()
def _get_seq(self, buf):
if buf not in self._send_seq:
self._send_seq[buf] = 0
ret = self._send_seq[buf]
self._send_seq[buf] += 1
self._send_seq[buf] %= 256
return ret
def send_data(self, buf, data, type, timeout=0.15, tries=5):
"""
Send some data over the network, and return an ARNetworkStatus.
The keyword arguments are only used for acknowledged data.
For other data, the timeout is irrelevant, and only one try will be
made.
For acknowledged data, this function will block until either the
acknowledge is received, or all the tries have been consumed in
timeouts. For other data, this function returns almost immediately.
Arguments:
- buf : The target buffer for the data (must be part of the
send_buffers list given to __init__)
- data : The data to send
- type : The type of the data (needs ack or not)
Keyword arguments:
- timeout : Timeout in floating point number of seconds, or None if no
timeout (default 0.15)
- tries : Total number of tries before considering a data as lost
(default 5)
"""
if buf not in self._send_buffers:
return NetworkStatus.ERROR
seqnum = self._get_seq(buf)
needack = type == Bybop_NetworkAL.DataType.DATA_WITH_ACK
status = NetworkStatus.TIMEOUT
with self._buf_locks[buf]:
# If we need an ack, clear any pending ack event,
# and set the requested seqnum
if needack:
with self._ack_events_lock:
self._ack_events[buf].clear()
self._ack_seq[buf] = seqnum
# Try 'retries' times in case of timeouts
while tries > 0 and status == NetworkStatus.TIMEOUT:
tries -= 1
status = NetworkStatus.OK if self._netal.send_data(
type, buf, seqnum, data) else NetworkStatus.ERROR
# We only set TIMEOUT status for acknowledged data
if needack and status == NetworkStatus.OK:
# Data with ack properly sent
status = NetworkStatus.OK if self._ack_events[buf].wait(
timeout) else NetworkStatus.TIMEOUT
return status
def _send_ack(self, buf, seq):
answer = struct.pack('<B', seq)
abuf = buf + 128
self._netal.send_data(Bybop_NetworkAL.DataType.ACK,
abuf, self._get_seq(abuf), answer)
def _send_pong(self, data):
self._netal.send_data(Bybop_NetworkAL.DataType.DATA,
1, self._get_seq(1), data)
def _should_accept(self, buf, seq):
if buf not in self._recv_seq:
return False
prev = self._recv_seq[buf]
diff = seq - prev
ok = diff >= 0 or diff <= -10
if ok:
self._recv_seq[buf] = seq
return ok
def data_received(self, type, buf, seq, recv_data):
"""
Implementation of the NetworkAL listener.
This function should not be called direcly by application code !
"""
if buf == 0: # This is a ping, send a pong !
self._send_pong(recv_data)
if type == Bybop_NetworkAL.DataType.ACK:
ackbuf = buf - 128
if ackbuf in self._send_buffers:
seq = struct.unpack('<B', recv_data)[0]
with self._ack_events_lock:
if seq == self._ack_seq[ackbuf]:
self._ack_events[ackbuf].set()
elif type == Bybop_NetworkAL.DataType.DATA:
self._process_data(buf, seq, recv_data)
elif type == Bybop_NetworkAL.DataType.DATA_LOW_LATENCY:
self._process_data(buf, seq, recv_data)
elif type == Bybop_NetworkAL.DataType.DATA_WITH_ACK:
self._process_data(buf, seq, recv_data)
# And send ack !
self._send_ack(buf, seq)
def _process_data(self, buf, seq, recv_data):
if self._should_accept(buf, seq):
self._listener.data_received(buf, recv_data)
def did_disconnect(self):
"""
Implementation of the NetworkAL listener.
This function should not be called directly by application code !
"""
self._listener.did_disconnect()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_random_ops import *
# pylint: enable=wildcard-import
def _ShapeTensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
# pylint: disable=protected-access
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._random_standard_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("RandomStandardNormal")
def parameterized_truncated_normal(shape,
means=0.0,
stddevs=1.0,
minvals=-2.0,
maxvals=2.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
means: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddevs: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the truncated normal distribution.
minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of
the truncated normal distribution.
maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of
the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "parameterized_truncated_normal",
[shape, means, stddevs, minvals, maxvals]) as name:
shape_tensor = _ShapeTensor(shape)
means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means")
stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs")
minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals")
maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._parameterized_truncated_normal(
shape_tensor,
means_tensor,
stddevs_tensor,
minvals_tensor,
maxvals_tensor,
seed=seed1,
seed2=seed2)
return rnd
def truncated_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._truncated_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("ParameterizedTruncatedNormal")
ops.NotDifferentiable("TruncatedNormal")
def random_uniform(shape,
minval=0,
maxval=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
the range of random values to generate. Defaults to 1 if `dtype` is
floating point.
dtype: The type of the output: 'float16`, `float32`, `float64`, `int32`,
or `int64`.
seed: A Python integer. Used to create a random seed for the distribution.
See @{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
raise ValueError("Invalid dtype %r" % dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name:
shape = _ShapeTensor(shape)
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
seed1, seed2 = random_seed.get_seed(seed)
if dtype.is_integer:
return gen_random_ops._random_uniform_int(
shape, minval, maxval, seed=seed1, seed2=seed2, name=name)
else:
rnd = gen_random_ops._random_uniform(
shape, dtype, seed=seed1, seed2=seed2)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
ops.NotDifferentiable("RandomUniform")
def random_shuffle(value, seed=None, name=None):
"""Randomly shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
to one and only one `output[i]`. For example, a mapping that might occur for a
3x2 tensor is:
```python
[[1, 2], [[5, 6],
[3, 4], ==> [1, 2],
[5, 6]] [3, 4]]
```
Args:
value: A Tensor to be shuffled.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of same shape and type as `value`, shuffled along its first
dimension.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops._random_shuffle(
value, seed=seed1, seed2=seed2, name=name)
def random_crop(value, size, seed=None, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
@{tf.set_random_seed}
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
with ops.name_scope(name, "random_crop", [value, size]) as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = control_flow_ops.Assert(
math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size],
summarize=1000)
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = random_uniform(
array_ops.shape(shape),
dtype=size.dtype,
maxval=size.dtype.max,
seed=seed) % limit
return array_ops.slice(value, offset, size, name=name)
def multinomial(logits, num_samples, seed=None, name=None):
"""Draws samples from a multinomial distribution.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.multinomial(tf.log([[10., 10.]]), 5)
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "multinomial", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.multinomial(
logits, num_samples, seed=seed1, seed2=seed2)
ops.NotDifferentiable("Multinomial")
def random_gamma(shape,
alpha,
beta=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Draws `shape` samples from each of the given Gamma distribution(s).
`alpha` is the shape parameter describing the distribution(s), and `beta` is
the inverse scale parameter(s).
Example:
samples = tf.random_gamma([10], [0.5, 1.5])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random_gamma([7, 5], [0.5, 1.5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
samples = tf.random_gamma([30], [[1.],[3.],[5.]], beta=[[3., 4.]])
# samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.
Note: Because internal calculations are done using `float64` and casting has
`floor` semantics, we must manually map zero outcomes to the smallest
possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This
means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise
should. This bias can only happen for small values of `alpha`, i.e.,
`alpha << 1` or large values of `beta`, i.e., `beta >> 1`.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output samples
to be drawn per alpha/beta-parameterized distribution.
alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha`
provides the shape parameter(s) describing the gamma distribution(s) to
sample. Must be broadcastable with `beta`.
beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1.
`beta` provides the inverse scale parameter(s) of the gamma
distribution(s) to sample. Must be broadcastable with `alpha`.
dtype: The type of alpha, beta, and the output: `float16`, `float32`, or
`float64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
@{tf.set_random_seed}
for behavior.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `tf.concat(shape, tf.shape(alpha + beta))`
with values of type `dtype`.
"""
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
alpha = ops.convert_to_tensor(alpha, name="alpha", dtype=dtype)
beta = ops.convert_to_tensor(
beta if beta is not None else 1, name="beta", dtype=dtype)
alpha_broadcast = alpha + array_ops.zeros_like(beta)
seed1, seed2 = random_seed.get_seed(seed)
return math_ops.maximum(
np.finfo(dtype.as_numpy_dtype).tiny,
gen_random_ops._random_gamma(
shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta)
ops.NotDifferentiable("RandomGamma")
def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):
"""Draws `shape` samples from each of the given Poisson distribution(s).
`lam` is the rate parameter describing the distribution(s).
Example:
samples = tf.random_poisson([0.5, 1.5], [10])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random_poisson([12.2, 3.3], [7, 5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
Args:
lam: A Tensor or Python value or N-D array of type `dtype`.
`lam` provides the rate parameter(s) describing the poisson
distribution(s) to sample.
shape: A 1-D integer Tensor or Python array. The shape of the output samples
to be drawn per "rate"-parameterized distribution.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or
`int64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
@{tf.set_random_seed}
for behavior.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `tf.concat(shape, tf.shape(lam))` with
values of type `dtype`.
"""
with ops.name_scope(name, "random_poisson", [lam, shape]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.random_poisson_v2(
shape, lam, dtype=dtype, seed=seed1, seed2=seed2)
|
|
import glob
import logging
import os
import numpy as np
import re
import soundfile
from keras.models import model_from_json
from numpy.lib.stride_tricks import as_strided
from python_speech_features import mfcc
from char_map import char_map_en, index_map_en, char_map_lt, index_map_lt
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window**2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x)**2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
return np.transpose(np.log(pxx[:ind, :] + eps))
def save_model(save_dir, model, train_costs, validation_costs, index=None):
""" Save the model and costs into a directory
Params:
save_dir (str): Directory used to store the model
model (keras.models.Model)
train_costs (list(float))
validation_costs (list(float))
index (int): If this is provided, add this index as a suffix to
the weights (useful for checkpointing during training)
"""
logger.info("Checkpointing model to: {}".format(save_dir))
model_config_path = os.path.join(save_dir, 'model_config.json')
with open(model_config_path, 'w') as model_config_file:
model_json = model.to_json()
model_config_file.write(model_json)
if index is None:
weights_format = 'model_weights.h5'
else:
weights_format = 'model_{}_weights.h5'.format(index)
model_weights_file = os.path.join(save_dir, weights_format)
model.save_weights(model_weights_file, overwrite=True)
np.savez(os.path.join(save_dir, 'costs.npz'), train=train_costs,
validation=validation_costs)
def load_model(load_dir, weights_file=None):
""" Load a model and its weights from a directory
Params:
load_dir (str): Path the model directory
weights_file (str): If this is not passed in, try to load the latest
model_*weights.h5 file in the directory
Returns:
model (keras.models.Model)
"""
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
# From http://stackoverflow.com/questions/5967500
return [atoi(c) for c in re.split('(\d+)', text)]
model_config_file = os.path.join(load_dir, 'model_config.json')
model_config = open(model_config_file).read()
model = model_from_json(model_config)
if weights_file is None:
# This will find all files of name model_*weights.h5
# We try to use the latest one saved
weights_files = glob.glob(os.path.join(load_dir, 'model_*weights.h5'))
weights_files.sort(key=natural_keys)
model_weights_file = weights_files[-1] # Use the latest model
else:
model_weights_file = weights_file
model.load_weights(model_weights_file)
return model
def argmax_decode(prediction, language):
""" Decode a prediction using the highest probable character at each
timestep. Then, simply convert the integer sequence to text
Params:
prediction (np.array): timestep * num_characters
"""
int_sequence = []
for timestep in prediction:
int_sequence.append(np.argmax(timestep))
tokens = []
c_prev = -1
for c in int_sequence:
if c == c_prev:
continue
if c != 0: # Blank
tokens.append(c)
c_prev = c
if(language=="lt"):
text = ''.join([index_map_lt[i] for i in tokens])
else:
text = ''.join([index_map_en[i] for i in tokens])
return text
def text_to_int_sequence(text, language):
""" Use a character map and convert text to an integer sequence """
char_map = char_map_en
if(language=="lt"):
char_map = char_map_lt
int_sequence = []
for c in text:
if c == ' ':
ch = char_map['<SPACE>']
else:
ch = char_map[c]
if(not ch):
raise NameError('There is no such character: ' + c)
int_sequence.append(ch)
return int_sequence
def configure_logging(console_log_level=logging.INFO,
console_log_format=None,
file_log_path=None,
file_log_level=logging.INFO,
file_log_format=None,
clear_handlers=False):
"""Setup logging.
This configures either a console handler, a file handler, or both and
adds them to the root logger.
Args:
console_log_level (logging level): logging level for console logger
console_log_format (str): log format string for console logger
file_log_path (str): full filepath for file logger output
file_log_level (logging level): logging level for file logger
file_log_format (str): log format string for file logger
clear_handlers (bool): clear existing handlers from the root logger
Note:
A logging level of `None` will disable the handler.
"""
if file_log_format is None:
file_log_format = \
'%(asctime)s %(levelname)-7s (%(name)s) %(message)s'
if console_log_format is None:
console_log_format = \
'%(asctime)s %(levelname)-7s (%(name)s) %(message)s'
# configure root logger level
root_logger = logging.getLogger()
root_level = root_logger.level
if console_log_level is not None:
root_level = min(console_log_level, root_level)
if file_log_level is not None:
root_level = min(file_log_level, root_level)
root_logger.setLevel(root_level)
# clear existing handlers
if clear_handlers and len(root_logger.handlers) > 0:
print("Clearing {} handlers from root logger."
.format(len(root_logger.handlers)))
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
# file logger
if file_log_path is not None and file_log_level is not None:
log_dir = os.path.dirname(os.path.abspath(file_log_path))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
file_handler = logging.FileHandler(file_log_path)
file_handler.setLevel(file_log_level)
file_handler.setFormatter(logging.Formatter(file_log_format))
root_logger.addHandler(file_handler)
# console logger
if console_log_level is not None:
console_handler = logging.StreamHandler()
console_handler.setLevel(console_log_level)
console_handler.setFormatter(logging.Formatter(console_log_format))
root_logger.addHandler(console_handler)
|
|
"""
Agent smoketest code.
This python script is meant to be invoked within a docker image in which the proper python version is activated (e.g.
via pyenv). In this way, the agent can be validated against different python versions.
Concept:
This code serves as a common code-base for different types of smoketest "processes" (i.e. same code runs in
different modes). Examples of modes are (uploader, verifier).
Uploader (a.k.a Producer):
Waits for Scalyr agent to be up and running (by querying scalyr backend).
Produces 1000 lines of dummy data very quickly, then produces one additional line of data every second.
If the agent is working correctly, this data will be correctly ingested and uploaded to Scalyr (by the agent)
and can subsequently be verified (by the Verifier).
Verifier:
Waits for Scalyr agent to be up and running.
Keeps polling until max_wait for the expected uploader data.
Usage:
smoketest.py ${process_name} ${max_wait} \
--mode verifier \
--scalyr_server ${SCALYR_SERVER} \
--read_api_key ${READ_API_KEY} \
--agent_hostname ${agent_hostname} \
--uploader_hostname ${uploader_hostname} \
--debug true"
where:
process_name: A means by which the invoker script can inform this script what the current process name is.
The process_name is important as it is parsed/modified to construct verifying queries.
E.g. process_name is used to construct a logfile to be queried such as "/docker/<process_name>-uploader.log".
Moreover, any given CI build should not conflict with other builds and therefore should have a unique
process name (e.g. /docker/ci-agent-docker-json-5986-uploader.log where "ci-agent-docker-json-5986" is a unique
identifier specific to a CI build.
Additionally, the process name determines which class to instantiate (see
CONTAINER_PREFIX_2_VERIFIER_CLASS). The invoker can choose different implementations (e.g. for LogStash)
by using on of the prefixes defined CONTAINER_PREFIX_2_VERIFIER_CLASS. An object of that class is then
instantiated and begins running in the specified mode (either as an Uploader or Verifier).
max_wait: Maximum time to run until exiting with failure.
mode: Operational mode which determines what this process does. Must be one of (uploader, verifier, agent).
scalyr_server: Scalyr backend server to connect to (typically qatesting.scalyr.com when testing)
monitored_logfile: Absolute path of the data file to write which the agent then ingests. Logstash producers also
write to this file which is then configured to as an input into the Logstash aggregator.
python_version: Python version that the agent is running on (becomes part of the Uploader data)
read_api_key: Read API key to use when querying the Scalyr backend to verify expected data has been uploaded.
agent_hostname: Uploaders and Verifiers need to know the agent_hostname of the agent process in order to construct
a proper verifying query (because they query for a log line uploaded by the agent in order to know when it has
successfully started. This agent_hostname is typically passed in by the invoker script that starts the Uploader
or Verifier.
uploader_hostname: Similar to agent_hostname, Verifiers need to wait for Uploaders to finish uploading before
performing their verifying queries. The uploader_hostname is a necessary piece of information typically passed
in by the invoker script that starts the Uploader and Verifier.
debug: true|false . If true, prints out all Scalyr api queries (useful for debugging)
Note:
This test code require python 3 with specific packages installed (i.e. requests)
"""
from __future__ import print_function
from __future__ import absolute_import
__author__ = "[email protected]"
import argparse
import os
import json
import time
import requests
import socket
import sys
import threading
import shlex
from io import open
from copy import deepcopy
from pprint import pprint
try:
from urllib.parse import urlencode, quote_plus, unquote_plus
except ImportError:
from urllib import urlencode, quote_plus, unquote_plus
NAME_SUFFIX_UPLOADER = "uploader"
NAME_SUFFIX_VERIFIER = "verifier"
# no actual Actor will run as the this name but the constant is needed for logic that checks on the Agent container
NAME_SUFFIX_AGENT = "agent"
NAME_SUFFIXES = [NAME_SUFFIX_UPLOADER, NAME_SUFFIX_VERIFIER, NAME_SUFFIX_AGENT]
def _pretty_print(header="", message="", file=sys.stdout):
if header:
print("", file=file)
print("=" * 79, file=file)
print(header, file=file)
print("=" * 79, file=file)
if len(message) > 0: # message can be spaces
print(message, file=file)
def _exit(code, show_agent_status=True, header="", message=""):
"""Prints agent status before exiting"""
file = sys.stdout if code == 0 else sys.stderr
if show_agent_status:
_pretty_print(header="BEGIN AGENT STATUS")
agent_exec = "/usr/share/scalyr-agent-2/bin/scalyr-agent-2"
if os.path.isfile(agent_exec):
os.system("{} status -v".format(shlex.quote(agent_exec)))
_pretty_print(header="END AGENT STATUS")
_pretty_print(message=" ")
_pretty_print(header, message, file=file)
# exit even if other threads are running
os._exit(code)
class SmokeTestActor(object):
"""
Abstract base class for all verifiers.
Some objects may only upload.
Others may only verify.
Some may do both, in which case we may need a barrier
"""
DEFAULT_POLL_INTERVAL_SEC = 10
def __init__(self, **kwargs):
self._process_name = kwargs.get("process_name")
self._scalyr_server = kwargs.get("scalyr_server")
self._read_api_key = kwargs.get("read_api_key")
self._max_wait = float(kwargs.get("max_wait"))
self._localhostname = socket.gethostname()
self._barrier = None
self._barrier_lock = threading.Lock()
self._lines_to_upload = 1000
self.__init_time = time.time()
self._agent_started_lock = threading.Lock()
self._agent_started = False
self._debug = (kwargs.get("debug") or "").lower() in (
"true",
"y",
"yes",
"t",
"1",
)
def _get_uploader_output_streams(self):
"""Returns list of streams to write log data"""
raise NotImplementedError
def _get_uploader_stream_names(self):
"""Returns list of streams to write log data"""
raise NotImplementedError
def _get_stream_name_from_stream(self, stream):
return stream.name[1:-1]
def get_hard_kill_time(self):
"""Returns time in epoch seconds for when this process must terminate"""
return self.__init_time + self._max_wait
def verifier_type(self):
raise NotImplementedError
def is_verifier(self):
raise NotImplementedError
def is_uploader(self):
raise NotImplementedError
def _get_barrier(self, parties=2):
"""Lazy-instantiate a barrier"""
with self._barrier_lock:
if not self._barrier:
self._barrier = threading.Barrier(parties, timeout=self._max_wait)
return self._barrier
def __wait_at_barrier(self):
"""
For coordinating processes.
Currently only used to prevent uploader OR verifier from proceeding until agent is verified up and running.
Note: uploader and verifier do not block each other, regardless of whether they
run within same process or in different processes.
"""
barrier = self._get_barrier()
if barrier:
print("... Blocking at barrier")
barrier.wait()
print("... Unblocked")
def exit(self, code, **kwargs):
_exit(code, **kwargs)
def verify_logs_uploaded(self):
"""Query scalyr to verify presence of uploaded data"""
raise NotImplementedError
def verify_agent_started_or_die(self):
"""Verify state or processes that should be present or running if agent is running"""
raise NotImplementedError
def wait_for_agent_to_start(self):
"""Both upload or verification should not begin until agent is confirmed started"""
with self._agent_started_lock:
if not self._agent_started:
self.verify_agent_started_or_die()
self._agent_started = True
def verify_or_die(self):
"""
Query the Scalyr backend in search for what we know we uploaded.
Error out after a certain time limit.
Returns:
Nothing. Exits with status 0 or 1
"""
self.wait_for_agent_to_start()
self.verify_logs_uploaded()
def _make_log_line(self, count, stream):
"""Return a line of text to be written to the log. Don't include trailing newline
Args:
count: line number (concrete class may choose to incorporate into line content for verification)
stream: output stream (concrete class may choose to incorporate into line content for verification)
"""
raise NotImplementedError
def trigger_log_upload(self):
self.wait_for_agent_to_start()
streams = self._get_uploader_output_streams()
count = 0
while time.time() < self.get_hard_kill_time():
for stream in streams:
stream.write(self._make_log_line(count, stream))
stream.write("\n")
stream.flush()
if count >= self._lines_to_upload:
time.sleep(1) # slow down if threshold is reached
# Write to all streams for a given count
count += 1
def _make_query_url(
self,
filter_dict=None,
message="",
override_serverHost=None,
override_log=None,
override_log_regex=None,
override_max_count=1,
):
"""
Make url for querying Scalyr server. Any str filter values will be url-encoded
"""
base_params = sorted(self._get_base_query_params(override_max_count).items())
url = "https://" if not self._scalyr_server.startswith("http") else ""
url += "{0}/api/query?queryType=log&{1}".format(
self._scalyr_server, urlencode(base_params)
)
# NOTE: In theory we could also escape $, but API doesn't require it. It does appear to work
# both ways though.
# Set serverHost/logfile from object state if not overridden
if not filter_dict:
filter_dict = {}
filter_dict["$serverHost"] = override_serverHost or self._process_name
# only if no log regex is provided do we then add an exact logfile match
if not override_log_regex:
filter_dict["$logfile"] = (
override_log or self._logfile # pylint: disable=no-member
)
filter_frags = []
for k, v in sorted(filter_dict.items()):
if type(v) == str:
v = quote_plus('"{0}"'.format(v))
elif type(v) == bool:
v = quote_plus('"{0}"'.format(str(v).lower()))
filter_frags.append("{0}=={1}".format(k, v))
# If log regex is provided, add a regex matches clause
if override_log_regex:
filter_frags.append(
'{0} matches "{1}"'.format("$logfile", override_log_regex)
)
# Add message
if message:
filter_frags.append(
"$message{0}".format(quote_plus(' contains "{0}"'.format(message)))
)
url += "&filter={0}".format("+and+".join(filter_frags))
if self._debug:
print("\nURL quoted: {0}".format(url))
print(" unquoted: {0}".format(unquote_plus(url)))
print(" curl command: curl -v '{0}'".format(url))
return url
def _get_base_query_params(self, max_count):
"""Get base query params (not including filter)"""
params = {
"maxCount": max_count,
"startTime": "10m",
"token": self._read_api_key,
}
return params
def poll_until_max_wait(
self,
verify_func,
description,
success_mesg,
fail_mesg,
exit_on_success=False,
exit_on_fail=False,
poll_interval=None,
):
"""
Template design pattern method for polling until a maximum time. Each poll executes the provided verify_func().
fail/success messages are parameterized, as well as whether to exit.
Args:
verify_func: Function to execute for each check. Must return True/False
description: Text to print at beginning of check
success_mesg: Text to print on success
fail_mesg: Text to print on failure
exit_on_success: If success, exit (with code 0)
exit_on_fail: If fail, exit (with code 1)
"""
_pretty_print(description)
verified = False
prev = time.time()
while time.time() < self.get_hard_kill_time():
# Try to verify upload by querying Scalyr server
sys.stdout.write(". ")
sys.stdout.flush()
verified = verify_func()
# query backend to confirm.
if verified:
success_mesg = "\nSUCCESS !!. " + success_mesg
if exit_on_success:
self.exit(0, message=success_mesg)
else:
_pretty_print(message=success_mesg, file=sys.stdout)
break
# Sleep a bit before trying again
time.sleep(poll_interval or SmokeTestActor.DEFAULT_POLL_INTERVAL_SEC)
cur = time.time()
if cur - prev > 10:
print(
"{} seconds remaining".format(int(self.get_hard_kill_time() - cur))
)
prev = cur
else:
fail_mesg = "FAILED. Time limit reached. " + fail_mesg
if exit_on_fail:
self.exit(1, message=fail_mesg)
else:
_pretty_print(message=fail_mesg, file=sys.stderr)
class StandaloneSmokeTestActor(SmokeTestActor):
"""
Standalone agent verifier.
A single process performs both Uploader and Verifier tasks.
Therefore, the logfile that we Upload to is the same file that is verified (filename queried for verification).
Waits for same-host Agent to be up and running (by watching for local agent.pid/log files).
Then writes to a Json file which is picked up by Agent.
Finally, queries Scalyr backend to condfirm Json file was uploaded.
"""
VERIFIER_TYPE = "Standalone"
def __init__(self, **kwargs):
super(StandaloneSmokeTestActor, self).__init__(**kwargs)
self._logfile = kwargs.get("monitored_logfile")
self._python_version = kwargs.get("python_version")
def is_verifier(self):
return True
def is_uploader(self):
return True
def _get_uploader_output_streams(self):
"""Returns stream to write log data into"""
return [open(self._logfile, "w+")]
def _get_uploader_stream_names(self):
"""Returns stream to read log data from"""
return [self._logfile]
def _make_log_line(self, count, stream):
"""Return a line of JSON for data.json (which is uploaded by the Agent)"""
obj = {
"verifier_type": self.VERIFIER_TYPE,
"count": count,
"hostname": self._localhostname,
"python_version": "python{}".format(self._python_version),
"line_stream": stream.name,
}
return json.dumps(obj)
def verify_agent_started_or_die(self):
"""Poll for agent pid and log file"""
def _check_agent_pid_and_log_files():
# If agent is not started, print agent.log if it exists
agent_logfile = "/var/log/scalyr-agent-2/agent.log"
agent_pid_file = "/var/log/scalyr-agent-2/agent.pid"
if not os.path.isfile(agent_pid_file) or not os.path.isfile(agent_logfile):
return False
return True
self.poll_until_max_wait(
_check_agent_pid_and_log_files,
"Checking for agent pid and log files",
"Agent is running.",
"No agent running.",
poll_interval=1,
)
def verify_logs_uploaded(self):
"""
For standalone agent, confirmation of log upload impinges on successful poll
of a single matching row as follows:
python_version matches the standalone agent python version
hostname matches the docker container hostname running the standalone agent
"""
def _query_scalyr_for_monitored_log_upload():
# TODO: This should be self._lines_to_upload (i.e. 1000, but it doesn't work
# for logstash where for some reason only 300-600 lines are uploaded most
# of the time. Once that bug is fixed, change this back to self._lines_to_upload
expected_count = 1000
resp = requests.get(
self._make_query_url(
{
"$verifier_type": self.VERIFIER_TYPE,
"$python_version": "python{}".format(self._python_version),
"$hostname": self._localhostname,
"$count": expected_count,
}
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
pprint(matches[0])
print("")
att = matches[0]["attributes"]
verifier_type = att["verifier_type"]
python_version = att["python_version"]
hostname = att["hostname"]
cnt = att["count"]
if all(
[
verifier_type == self.VERIFIER_TYPE,
python_version == "python{}".format(self._python_version),
hostname == self._localhostname,
cnt == expected_count,
]
):
return True
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False
self.poll_until_max_wait(
_query_scalyr_for_monitored_log_upload,
"Querying server to verify monitored logfile was uploaded.",
"Monitored logfile upload verified",
"Monitored logfile upload not verified",
exit_on_success=True,
exit_on_fail=True,
)
class DockerSmokeTestActor(SmokeTestActor):
"""
Base Docker actor.
Some containers will write logs to Scalyr but only one container will verify.
(The current setup has only one uploader + one verifier)
Because there are multiple processes (containers) running, it is necessary to synchronize them for the Smoketest
to correctly work.
Upload / Verify will not begin until the remote agent is confirmed to be up and running. This is done by querying
Scalyr.
For clarity/maintainability of the Upload/Verifier code, an actor should only upload or verify, not both. (This is
different from the Standalone actor where a single process runs both upload and verify and checks the local agent
via file system).
"""
def __init__(self, **kwargs):
"""
:param max_wait: Max seconds before exiting
:param mode: One of 'query', 'upload_and_ verify'
"""
super().__init__(**kwargs)
self.mode = kwargs.get("mode")
self._logfile = "/docker/{}.log".format(self._process_name)
self._agent_hostname = kwargs.get("agent_hostname")
self._uploader_hostname = kwargs.get("uploader_hostname")
_pretty_print('Agent hostname="{}"'.format(self._agent_hostname))
_pretty_print('Uploader hostname="{}"'.format(self._uploader_hostname))
def is_verifier(self):
return self.mode == NAME_SUFFIX_VERIFIER
def is_uploader(self):
return self.mode == NAME_SUFFIX_UPLOADER
def _serialize_row(self, obj):
"""Write a single row of key=value, separated by commas. Standardize by sorting keys"""
keyvals = [(key, obj.get(key)) for key in sorted(obj.keys())]
return ",".join(["{}={}".format(k, v) for k, v in keyvals])
def _make_log_line(self, count, stream):
return self._serialize_row(
{
"verifier_type": self.VERIFIER_TYPE, # pylint: disable=no-member
"count": count,
"line_stream": self._get_stream_name_from_stream(stream),
# No need hostname in logline. The agent_container_id & remote-container-logfile name uniquely identify the
# correct log.
# "hostname": self._localhostname,
}
)
def _get_process_name_for_suffix(self, suffix):
assert suffix in [
NAME_SUFFIX_AGENT,
NAME_SUFFIX_UPLOADER,
NAME_SUFFIX_VERIFIER,
]
parts = self._process_name.split("-")[:-1]
parts.append(suffix)
return "-".join(parts)
def _get_stream_name_from_stream(self, stream):
return stream.name[1:-1]
def _get_uploader_output_streams(self):
return [sys.stderr, sys.stdout]
def _get_uploader_stream_names(self):
"""Docker and k8s subclasses all verify by querying stream names of 'stderr' and 'stdout'"""
return [stream.name[1:-1] for stream in [sys.stderr, sys.stdout]]
def verify_agent_started_or_die(self):
"""
Docker agent is not running in same container as Verifier.
Verifier must query Scalyr to determine presence of these 2 files:
serverHost=<agent_short_container_id>, logfile=/var/log/scalyr-agent-2/agent.log
serverHost=<agent_short_container_id>, logfile=/var/log/scalyr-agent-2/docker_monitor.log
filter="Starting monitor docker_monitor()"
"""
def _query_scalyr_for_agent_logfile(logfile):
def _func():
resp = requests.get(
self._make_query_url(
override_serverHost=self._agent_hostname,
override_log=logfile,
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
return False
matches = data["matches"]
if len(matches) == 0:
return False
return True
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False
return _func
for filename in self._get_expected_agent_logfiles():
self.poll_until_max_wait(
_query_scalyr_for_agent_logfile(filename),
"Check if Agent is running: query scalyr for agent container file: {}".format(
filename
),
"{} found".format(filename),
"Time limit reached. Could not verify liveness of Agent Docker Container.",
exit_on_success=False,
exit_on_fail=True,
)
def _get_expected_agent_logfiles(self):
return [
"/var/log/scalyr-agent-2/agent.log",
"/var/log/scalyr-agent-2/docker_monitor.log",
]
def _get_uploader_override_logfilename_regex(self, process_name):
"""All logfile filters are exact and therefore we return None in the general case"""
return None
def _get_mapped_logfile_prefix(self):
raise NotImplementedError
def _get_extra_query_attributes(self, stream_name, process_name):
"""Dictionary of query field key-vals (besides serverHost, logfile, filters)"""
raise NotImplementedError
def _verify_queried_attributes(self, att, stream_name, process_name):
if att.get("containerName") != process_name:
print(
"containerName attribute doesn't match process name. Expected '%s' got '%s'"
% (process_name, att.get("containerName"))
)
return False
return True
def verify_logs_uploaded(self):
"""
For docker agent, confirmation requires verification that all uploaders were able to uploaded.
There are 2 separate types of containers.
1. uploader: uploads data to Scalyr (can easily support multiple but for now, just 1)
2. verifier: verifies data was uploaded by uploader
"""
def _query_scalyr_for_upload_activity(contname_suffix, stream_name):
def _func():
process_name = self._get_process_name_for_suffix(contname_suffix)
resp = requests.get(
self._make_query_url(
self._get_extra_query_attributes(stream_name, process_name),
override_serverHost=self._agent_hostname,
override_log="{}/{}.log".format(
self._get_mapped_logfile_prefix(), process_name
),
override_log_regex=self._get_uploader_override_logfilename_regex(
process_name
),
message=self._serialize_row(
{
"verifier_type": self.VERIFIER_TYPE, # pylint: disable=no-member
"count": self._lines_to_upload,
"line_stream": stream_name,
}
),
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
pprint(matches[0])
print("")
att = matches[0]["attributes"]
return self._verify_queried_attributes(
att, stream_name, process_name
)
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False # Non-ok response
return _func
suffixes_to_check = [NAME_SUFFIX_UPLOADER]
for count, suffix in enumerate(suffixes_to_check):
for stream_name in self._get_uploader_stream_names():
self.poll_until_max_wait(
_query_scalyr_for_upload_activity(suffix, stream_name),
"Querying server to verify upload: container[stream]='{}[{}].".format(
self._get_process_name_for_suffix(suffix), stream_name
),
"Upload verified for {}[{}].".format(suffix, stream_name),
"Upload not verified for {}[{}].".format(suffix, stream_name),
exit_on_success=count == len(suffixes_to_check),
exit_on_fail=True,
)
class DockerJsonActor(DockerSmokeTestActor):
"""These subclasses capture differences between JSON and Syslog implementations"""
VERIFIER_TYPE = "Docker JSON"
def _get_mapped_logfile_prefix(self):
return "/docker"
def _get_extra_query_attributes(self, stream_name, process_name):
return {"$stream": stream_name}
def _verify_queried_attributes(self, att, stream_name, process_name):
if not super()._verify_queried_attributes(att, stream_name, process_name):
return False
if not all(
[att.get("stream") in stream_name, att.get("monitor") == "agentDocker"]
):
return False
return True
class DockerAPIActor(DockerSmokeTestActor):
"""
Verifier to be used when Docker monitor utilizes Docker API mode for ingesting log (aka
docker_raw_logs config option is False).
It verifies both streams - stdout and stderr.
"""
VERIFIER_TYPE = "Docker API (docker_raw_logs: false)"
def __init__(self, *args, **kwargs):
super(DockerAPIActor, self).__init__(*args, **kwargs)
# Stores a list of objects for matching lines we've seen
self._seen_matching_lines = set()
self._last_seen_timestamp = 0
def _get_base_query_params(self, max_count=100):
# NOTE: We can't really use last timestamp based querying since sometimes data appears to
# come in out of order so we miss messages that away
if self._last_seen_timestamp:
start_time = str(self._last_seen_timestamp)
else:
start_time = "10m"
params = {
"maxCount": max_count,
"startTime": start_time,
"token": self._read_api_key,
}
return params
def verify_logs_uploaded(self):
"""
Function which verifies container logs were indeed correctly ingested into Scalyr.
"""
def _query_scalyr_for_monitored_log_upload(contname_suffix, stream_name):
def _func():
process_name = self._get_process_name_for_suffix(contname_suffix)
resp = requests.get(
self._make_query_url(
self._get_extra_query_attributes(stream_name, process_name),
override_serverHost=self._agent_hostname,
override_log="{}/{}.log".format(
self._get_mapped_logfile_prefix(), process_name
),
override_log_regex=self._get_uploader_override_logfilename_regex(
stream_name=stream_name, process_name=process_name
),
override_max_count=100,
message=None,
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
pprint(matches[0])
print("")
self._last_seen_timestamp = int(matches[0]["timestamp"])
return self._verify_response_matches(
matches=matches,
stream_name=stream_name,
process_name=process_name,
)
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False # Non-ok response
return _func
self.poll_until_max_wait(
_query_scalyr_for_monitored_log_upload("agent", "stdout"),
"Querying server to verify monitored logfile was uploaded.",
"Monitored logfile upload verified",
"Monitored logfile upload not verified",
exit_on_success=True,
exit_on_fail=True,
)
def _get_uploader_stream_names(self):
return ["stdout", "stderr"]
def _get_mapped_logfile_prefix(self):
return "/var/log/scalyr-agent-2"
def _serialize_row(self, obj):
return ""
def _get_uploader_override_logfilename_regex(self, stream_name, process_name):
# $logfile will look something like this:
# "/var/log/scalyr-agent-2/docker-ci-agent-docker-api-56640-agent-stdout.log"
# process name will contain a value similar to this one:
# ci-agent-docker-api-56644-agent
logname_suffix = process_name + "-" + stream_name
return "{}/docker-{}.log".format(
self._get_mapped_logfile_prefix(), logname_suffix
)
def _get_extra_query_attributes(self, stream_name, process_name):
return {}
def _verify_response_matches(self, matches, stream_name, process_name):
for item in matches:
attributes = item["attributes"]
message = item.get("message", "") or ""
self._verify_queried_attributes(
att=attributes,
message=message,
stream_name=stream_name,
process_name=process_name,
)
success = len(self._seen_matching_lines) >= 1 + 2 + 2
if success:
print(
"Found all the required log lines (%s)"
% (str(self._seen_matching_lines))
)
return success
def _verify_queried_attributes(self, att, message, stream_name, process_name):
log_path = self._get_uploader_override_logfilename_regex(
stream_name=stream_name, process_name=process_name
)
if (
"Docker API (docker_raw_logs: false)" in message
or "Starting docker monitor (raw_logs=False)" in message
):
self._seen_matching_lines.add(message)
return
log_path = self._get_uploader_override_logfilename_regex(
stream_name=stream_name, process_name=process_name
)
# Message should look something like this:
# INFO [core] [copying_manager.py:423] Adding new log file
# '/var/log/scalyr-agent-2/docker-ci-agent-docker-api-57068-agent-stdout.log' for monitor
# 'scalyr_agent.builtin_monitors.docker_monitor'
if (
"Adding new log file" in message
and log_path in message
and "-stdout.log" in message
):
self._seen_matching_lines.add(message)
return
if (
"Adding new log file" in message
and log_path in message
and "-stderr.log" in message
):
self._seen_matching_lines.add(message)
return
# Message should look something like this:
# INFO [monitor:docker_monitor] [docker_monitor.py:1308] File
# /var/log/scalyr-agent-2/docker-ci-agent-docker-api-57087-verifier-stdout.log doesn't
# exist on disk. This likely means a new container has been started and no existing logs
# are available for it on disk. Original error: [Errno 2] No such file or directory:
# '/var/log/scalyr-agent-2/docker-ci-agent-docker-api-57087-verifier-stdout.log'
if (
"-stdout.log doesn't exist on disk. This likely means a new container has been started"
in message
):
self._seen_matching_lines.add(message)
return
if (
"-stderr.log doesn't exist on disk. This likely means a new container has been started"
in message
):
self._seen_matching_lines.add(message)
return
class DockerSyslogActor(DockerSmokeTestActor):
VERIFIER_TYPE = "Docker Syslog"
def _get_extra_query_attributes(self, stream_name, process_name):
return {}
def _get_mapped_logfile_prefix(self):
return "/var/log/scalyr-agent-2/containers"
def _verify_queried_attributes(self, att, stream_name, process_name):
if not super()._verify_queried_attributes(att, stream_name, process_name):
return False
expected_monitor = "agentSyslog"
# expected_parser = "agentSyslogDocker"
actual_monitor = att.get("monitor")
# actual_parser = att.get("parser")
# NOTE: "parser" attribute is not returned by the API anymore since early July 2020 so we
# only assert on the monitor name
if actual_monitor != expected_monitor:
print(
"Expected(monitor): '%s', got '%s'" % (expected_monitor, actual_monitor)
)
return False
return True
class K8sActor(DockerSmokeTestActor):
"""
Uploaders write to std output/error
Verifiers query for 'stdout', 'stderr'
"""
VERIFIER_TYPE = "Kubernetes"
EXPECTED_DOCKER_METRICS = [
"docker.cpu.throttling.throttled_time",
"docker.cpu.throttling.periods",
"docker.cpu.throttling.throttled_periods",
"docker.cpu.system_cpu_usage",
"docker.cpu.total_usage",
"docker.cpu.usage_in_usermode",
"docker.cpu.usage_in_kernelmode",
"docker.mem.limit",
"docker.mem.usage",
"docker.mem.max_usage",
"docker.mem.stat.active_file",
"docker.mem.stat.total_writeback",
"docker.mem.stat.active_anon",
"docker.mem.stat.total_pgpgout",
"docker.mem.stat.total_pgmajfault",
"docker.mem.stat.total_rss_huge",
"docker.mem.stat.total_inactive_file",
"docker.mem.stat.inactive_file",
"docker.mem.stat.pgfault",
"docker.mem.stat.total_cache",
"docker.mem.stat.total_pgfault",
"docker.mem.stat.total_mapped_file",
"docker.mem.stat.inactive_anon",
"docker.mem.stat.pgmajfault",
"docker.mem.stat.pgpgin",
"docker.mem.stat.rss_huge",
"docker.mem.stat.rss",
"docker.mem.stat.hierarchical_memory_limit",
"docker.mem.stat.unevictable",
"docker.mem.stat.total_unevictable",
"docker.mem.stat.cache",
"docker.mem.stat.mapped_file",
"docker.mem.stat.total_rss",
"docker.mem.stat.total_active_anon",
"docker.mem.stat.total_active_file",
"docker.mem.stat.writeback",
"docker.mem.stat.pgpgout",
"docker.mem.stat.total_inactive_anon",
"docker.mem.stat.total_pgpgin",
"docker.net.rx_packets",
"docker.net.tx_packets",
"docker.net.rx_bytes",
"docker.net.tx_errors",
"docker.net.rx_errors",
"docker.net.tx_bytes",
"docker.net.rx_dropped",
"docker.net.tx_dropped",
]
def _get_expected_agent_logfiles(self):
return [
"/var/log/scalyr-agent-2/agent.log",
"/var/log/scalyr-agent-2/kubernetes_monitor.log",
]
def _get_mapped_logfile_prefix(self):
return "/docker"
def _get_extra_query_attributes(self, stream_name, process_name):
return {"$stream": stream_name}
def _verify_queried_attributes(self, att, stream_name, process_name):
"""
Here's example JSON response for k8s
"matches": [
{
"severity": 3,
"session": "log_session_5645060384390470634",
"attributes": {
"pod_namespace": "default",
"scalyr-category": "log",
"stream": "stderr",
"pod_uid": "f2d1d738-9a0c-11e9-9b04-080027029126",
"pod-template-hash": "76bcb9cf9",
"run": "ci-agent-k8s-7777-uploader",
"monitor": "agentKubernetes",
"k8s_node": "minikube",
"serverHost": "scalyr-agent-2-z5c8l",
"container_id": "6eb4215ac1589de13089419e90cdfe08c01262e6cfb821f18061a63ab4188a87",
"raw_timestamp": "2019-06-29T03:16:28.058676421Z",
"pod_name": "ci-agent-k8s-7777-uploader-76bcb9cf9-cb96t",
"container_name": "ci-agent-k8s-7777-uploader",
},
"thread": "default",
"message": "count=1000,line_stream=<stderr>,verifier_type=Kubernetes\n",
"timestamp": "1561778193736899060"
}
],
"""
if not all(
[
att.get("stream") in stream_name,
att.get("monitor") == "agentKubernetes",
process_name in att.get("pod_name"),
"ci-agent-k8s" in att.get("container_name"),
"uploader" in att.get("container_name"),
]
):
return False
return True
def _get_uploader_override_logfilename_regex(self, process_name):
"""For k8s, return a logfile regex because it too difficult to construct an exact logfile filter.
The regex clause becomes: $logfile+matches+"/docker/k8s_ci-agent-k8s-7777-uploader.*"
"""
return "{}/k8s_{}*".format(self._get_mapped_logfile_prefix(), process_name)
def verify_logs_uploaded(self):
"""
For docker agent, confirmation requires verification that all uploaders were able to uploaded.
There are 2 separate types of containers.
1. uploader: uploads data to Scalyr (can easily support multiple but for now, just 1)
2. verifier: verifies data was uploaded by uploader
"""
def _query_scalyr_for_upload_activity(contname_suffix, stream_name):
def _func():
process_name = self._get_process_name_for_suffix(contname_suffix)
resp = requests.get(
self._make_query_url(
self._get_extra_query_attributes(stream_name, process_name),
override_serverHost=self._agent_hostname,
override_log="{}/{}.log".format(
self._get_mapped_logfile_prefix(), process_name
),
override_log_regex=self._get_uploader_override_logfilename_regex(
process_name
),
message=self._serialize_row(
{
"verifier_type": self.VERIFIER_TYPE, # pylint: disable=no-member
"count": self._lines_to_upload,
"line_stream": stream_name,
}
),
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
pprint(matches[0])
print("")
att = matches[0]["attributes"]
return self._verify_queried_attributes(
att, stream_name, process_name
)
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False # Non-ok response
return _func
def _query_scalyr_for_metrics(metrics):
def _func():
resp = requests.get(
self._make_query_url(
{},
override_serverHost=self._agent_hostname,
override_log="/var/log/scalyr-agent-2/kubernetes_monitor.log",
override_max_count=100,
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
pprint(matches[0])
print("")
for expected_metric in metrics:
found_match = False
for match in matches:
if (
match.get("attributes", {}).get("metric", "")
== expected_metric
):
found_match = True
break
if not found_match:
print("Failed to find expected metric %s" % expected_metric)
return False
return True
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False # Non-ok response
return _func
suffixes_to_check = [NAME_SUFFIX_UPLOADER]
for count, suffix in enumerate(suffixes_to_check):
for stream_name in self._get_uploader_stream_names():
self.poll_until_max_wait(
_query_scalyr_for_upload_activity(suffix, stream_name),
"Querying server to verify upload: container[stream]='{}[{}].".format(
self._get_process_name_for_suffix(suffix), stream_name
),
"Upload verified for {}[{}].".format(suffix, stream_name),
"Upload not verified for {}[{}].".format(suffix, stream_name),
exit_on_success=count == len(suffixes_to_check),
exit_on_fail=True,
)
metrics_to_check = (
self.EXPECTED_DOCKER_METRICS
) # TODO: if running in CRI use a different list
self.poll_until_max_wait(
_query_scalyr_for_metrics(metrics_to_check),
"Querying server to verify upload of metrics.",
"Upload verified for all metrics.",
"Upload not verified for all metrics.",
exit_on_success=True,
exit_on_fail=True,
)
class LogstashActor(DockerSmokeTestActor):
"""
Uploader writes to a common shared logfile that is bind-mounted in a shared volume (not local disk)
Verifier reads from common shareed logfile
"""
VERIFIER_TYPE = "Logstash"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._monitored_logfile = kwargs.get("monitored_logfile")
def _get_uploader_output_streams(self):
"""Returns stream for Uploader to write log data into"""
return [open(self._monitored_logfile, "w+")]
def _get_uploader_stream_names(self):
"""Returns stream to read log data from"""
return [self._monitored_logfile]
def _get_stream_name_from_stream(self, stream):
return stream.name
def _get_expected_agent_logfiles(self):
return ["scalyr_logstash.log"]
def _get_mapped_logfile_prefix(self):
return "/logstash"
def _get_extra_query_attributes(self, stream_name, process_name):
# {'$stream': stream.name}
# no server-side parser has been defined so cannot filter on $stream
return {}
def _verify_queried_attributes(self, att, stream_name, process_name):
if not all(
[
# att.get('stream') in stream.name, # we haven't setup server-side parser so $stream is not available
# Since the input streams are locally mounted, the event origins are all the same as the agent hostname
att.get("serverHost") == self._agent_hostname,
# the following fields are added on in the logstash pipeline config
# and should appear in every event
att.get("output_attribute1") == "output_value1",
att.get("output_attribute2") == "output_value2",
att.get("output_attribute3") == "output_value3",
# TODO: adjust if these are eventually split into "booleans"a
att.get("tags") == "[tag_t1, tag_t2]",
]
):
return False
return True
def _get_uploader_override_logfilename_regex(self, process_name):
"""For logstash setup, the input is a local file mounted to the logstash container, hence the fields are
host=container_id, path=/tmp/ci-plugin-logstash-7778-uploader.log
host/path are mapped to origin/logfile
"""
return self._monitored_logfile
# Select verifier class based on containers name (prefix)
CONTAINER_PREFIX_2_VERIFIER_CLASS = {
"ci-agent-standalone": StandaloneSmokeTestActor,
"ci-agent-docker-json": DockerJsonActor,
"ci-agent-docker-api": DockerAPIActor,
"ci-agent-docker-syslog": DockerSyslogActor,
"ci-agent-k8s": K8sActor,
"ci-plugin-logstash": LogstashActor,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"process_name",
type=str,
help="name of process running this instance of test code. Prefix should be a key in "
"CONTAINER_PREFIX_2_VERIFIER_CLASS so that the correct verifier can be chosen.",
)
parser.add_argument(
"max_wait", type=int, help="max seconds this test will run (will force-quit)"
)
# Generic param that can be used by any test as needed
parser.add_argument("--mode", type=str, help="mode switch", choices=NAME_SUFFIXES)
# For connecting to Scalyr. Note that we need not supply SCALYR_API_KEY as the Agent gets it from it's own config
# or the environment.
parser.add_argument(
"--scalyr_server",
type=str,
help="Scalyr backend server (required by Agent or Verifier containers)",
)
parser.add_argument(
"--read_api_key",
type=str,
help="read api key (required all Verifier containers)",
)
# For Standalone testing
parser.add_argument(
"--monitored_logfile",
type=str,
help="absolute path of data file to write to (must match Agent config). "
"Logstash producers also write to this, which are then picked up by the Logstash agent.",
)
parser.add_argument(
"--python_version",
type=str,
help="python version agent is running on (will be added into generated test data)",
)
# For Docker testing
parser.add_argument(
"--agent_hostname",
type=str,
help="hostname of Agent container (required by Docker/k8s Verifier containers",
)
parser.add_argument(
"--uploader_hostname",
type=str,
help="hostname of Uploader container (required by Docker/k8s Verifier containers",
)
parser.add_argument("--debug", type=str, help="turn on debugging")
args = parser.parse_args()
klass = None
for key, val in CONTAINER_PREFIX_2_VERIFIER_CLASS.items():
if args.process_name.startswith(key):
klass = CONTAINER_PREFIX_2_VERIFIER_CLASS.get(key)
break
# Display args to stdout, redacting sensitive keys
_pretty_print("Launching actor", message="Class={}".format(klass))
if not klass:
_exit(
1,
message="Bad test config: process_name must start with one of {}".format(
list(CONTAINER_PREFIX_2_VERIFIER_CLASS.keys())
),
)
args_copy = deepcopy(vars(args))
if "read_api_key" in args_copy:
args_copy["read_api_key"] = args_copy["read_api_key"][:4] + "xxxxxxxxx"
_pretty_print("smoketest.py command line args", str(args_copy))
actor = klass(**vars(args)) # type: ignore
# Optionally start upload in a separate thread. Verifiers should not upload.
uploader_thread = None
if actor.is_uploader():
_pretty_print("START UPLOAD", actor._process_name)
uploader_thread = threading.Thread(target=actor.trigger_log_upload, args=())
uploader_thread.start()
if actor.is_verifier():
_pretty_print("START VERIFIER", actor._process_name)
actor.verify_or_die()
# If verify_or_die hasn't force-killed the program, wait for uploader to finish
if uploader_thread:
uploader_thread.join()
|
|
# -*- coding: utf-8 -*-
# based on original @nephila plugin https://github.com/nephila/djangocms-blog
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.utils import timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.conf import settings
from django.contrib.auth import get_user_model
from cms.models import CMSPlugin, PlaceholderField
from djangocms_text_ckeditor.fields import HTMLField
from filer.fields.image import FilerImageField
from taggit_autosuggest.managers import TaggableManager
from taggit.models import TaggedItem
from oscar.core.utils import slugify
from oscar.core.compat import AUTH_USER_MODEL
from oscar.models.fields import AutoSlugField
from oscar.apps.catalogue.abstract_models import AbstractCategory
# TODO: move defaults to the app's settings module
ENABLE_COMMENTS = getattr(settings, 'ENABLE_COMMENTS', False)
AUTHOR_DEFAULT = getattr(settings, 'AUTHOR_DEFAULT', True)
LATEST_NEWS = getattr(settings, 'LATEST_NEWS', 3)
# it's not the best place for this method but i've not found yet better way to show
# annotated tags for each instance and keep it DRY :)
def get_tag_cloud(model, queryset, tags_filter=None):
if tags_filter is None:
tags_filter = set()
for item in queryset.all():
tags_filter.update(item.tags.all())
tags_filter = set([tag.id for tag in tags_filter])
tags = set(TaggedItem.objects.filter(
content_type__model=model.__name__.lower()
).values_list('tag_id', flat=True))
if tags_filter is not None:
tags = tags.intersection(tags_filter)
tag_ids = list(tags)
kwargs = TaggedItem.bulk_lookup_kwargs(queryset)
kwargs['tag_id__in'] = tag_ids
counted_tags = dict(TaggedItem.objects
.filter(**kwargs)
.values('tag')
.annotate(count=models.Count('tag'))
.values_list('tag', 'count'))
tags = TaggedItem.tag_model().objects.filter(pk__in=counted_tags.keys())
for tag in tags:
tag.count = counted_tags[tag.pk]
return sorted(tags, key=lambda x: -x.count)
class NewsManager(models.Manager):
def __init__(self, *args, **kwargs):
self.published_query = Q(publish=True) & Q(date_published__lte=timezone.now()) & \
(Q(date_published_end__isnull=True) | Q(date_published_end__gte=timezone.now()))
super(NewsManager, self).__init__(*args, **kwargs)
def tag_cloud(self):
queryset = self.get_queryset()
return get_tag_cloud(self.model, queryset)
class PublishedNewsManager(NewsManager):
"""
Filters out all unpublished and items with a publication
date in the future
"""
def get_queryset(self):
return super(PublishedNewsManager, self).get_queryset().filter(self.published_query)
class News(models.Model):
"""
News
"""
author = models.ForeignKey(AUTH_USER_MODEL,
verbose_name=_('author'), null=True, blank=True,
related_name='news_author')
category = models.ForeignKey("NewsCategory",
verbose_name=_('category'),
related_name="news_category",
null=True, blank=True, default=None)
meta_description = models.TextField(verbose_name=_('news meta description'),
blank=True, default='')
meta_keywords = models.TextField(verbose_name=_('news meta keywords'),
blank=True, default='')
meta_title = models.CharField(verbose_name=_('news meta title'),
help_text=_('used in title tag and social sharing'),
max_length=255,
blank=True, default='')
title = models.CharField(_('Title'), max_length=255)
slug = AutoSlugField(_('Slug'), max_length=128, unique=True, editable=True,
populate_from='title',
help_text=_('A slug is a short name which uniquely'
' identifies the news item'))
description = HTMLField(_('Description'), blank=True, configuration='CKEDITOR_SETTINGS_NEWS')
content = PlaceholderField('news_content', related_name='news_content')
publish = models.BooleanField(_('Published'), default=False)
date_created = models.DateTimeField(_('created'), auto_now_add=True)
date_modified = models.DateTimeField(_('last modified'), auto_now=True)
date_published = models.DateTimeField(_('published since'), default=timezone.now)
date_published_end = models.DateTimeField(_('published until'), null=True, blank=True)
enable_comments = models.BooleanField(verbose_name=_('enable comments on post'),
default=ENABLE_COMMENTS)
images = models.ManyToManyField('filer.Image', through='NewsImages',
verbose_name=_("News images"),)
# Oscar links
linked_products = models.ManyToManyField(
'catalogue.Product', blank=True,
verbose_name=_("Linked products"),
help_text=_("These are products that can be shown with news post "
"or news post can be shown on the specific product's page."))
linked_categories = models.ManyToManyField(
'catalogue.Category', blank=True,
verbose_name=_("Linked product's categories"),
help_text=_("Show news for that categories "
"or display news on the category page"))
linked_classes = models.ManyToManyField(
'catalogue.ProductClass', blank=True,
verbose_name=_("Linked product's classes"),
help_text=_("Show news for that classes "
"or display news on the specific class product's pages"))
sites = models.ManyToManyField('sites.Site', verbose_name=_('Site(s)'), blank=True,
help_text=_('Select sites in which to show the post. '
'If none is set it will be '
'visible in all the configured sites.'))
tags = TaggableManager(blank=True, related_name='news_tags')
objects = NewsManager()
published = PublishedNewsManager()
class Meta:
app_label = 'oscar_news'
verbose_name = _('News')
verbose_name_plural = _('News')
ordering = ('-date_published', )
def __unicode__(self):
return self.title
@property
def is_published(self):
"""
Checks whether the news entry is *really* published by checking publishing dates too
"""
return (self.publish and
(self.date_published and self.date_published <= timezone.now()) and
(self.date_published_end is None or self.date_published_end > timezone.now())
)
def _set_default_author(self, current_user):
if not self.author_id:
if AUTHOR_DEFAULT is True:
user = current_user
else:
user = get_user_model().objects.get(username=AUTHOR_DEFAULT)
self.author = user
def get_absolute_url(self):
"""
method below inherited and slightly customized
"""
cache_key = 'NEWS_ENTRY_URL_%s' % self.pk
url = cache.get(cache_key)
if not url:
# temporarily use link to news detail
url = reverse(
'oscar_news:entry-detail',
kwargs={'slug': self.slug})
cache.set(cache_key, url)
return url
def get_tags(self, queryset=None):
"""
:return: the list of object's tags annotated with counters.
Tags are limited by published news.
"""
queryset = queryset or News.published.get_queryset()
return get_tag_cloud(self.__class__, queryset, set(self.tags.all()))
def get_all_tags(self):
"""
:return: List of all object's tags including unpublished
"""
return self.get_tags(News.objects.all())
def _get_next_or_previous_published(self, is_next):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
field = 'date_published'
param = force_text(getattr(self, field))
q = Q(**{'%s__%s' % (field, op): param})
q = q | Q(**{field: param, 'pk__%s' % op: self.pk})
qs = self.__class__.published.using(self._state.db).filter(q).order_by(
'%s%s' % (order, field), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def get_next_published(self):
return self._get_next_or_previous_published(is_next=True)
def get_previous_published(self):
return self._get_next_or_previous_published(is_next=False)
class NewsCategory(AbstractCategory):
def get_absolute_url(self):
"""
method below inherited and slightly customized
"""
cache_key = 'NEWS_CATEGORY_URL_%s' % self.pk
url = cache.get(cache_key)
if not url:
# temporarily use link to category list
url = reverse(
'oscar_news:category-list',
kwargs={'category': self.slug})
cache.set(cache_key, url)
return url
# it's the hack from http://stackoverflow.com/a/6379556
# so, if further django's version will provide a field inheritance
# in proper way this constructor can be removed
def __init__(self, *args, **kwargs):
super(NewsCategory, self).__init__(*args, **kwargs)
self._meta.get_field('image').upload_to = 'news/categories'
class Meta:
app_label = 'oscar_news'
ordering = ['path']
verbose_name = _('News Category')
verbose_name_plural = _('News Categories')
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(NewsCategory, self).save(*args, **kwargs)
def __unicode__(self):
return self.full_name
class NewsImages(models.Model):
news = models.ForeignKey('News', verbose_name=_("News"))
image = FilerImageField(null=True, blank=True,
related_name="news_images")
class BaseNewsPlugin(CMSPlugin):
app_name = 'oscar_news'
class Meta:
abstract = True
def post_queryset(self, request=None):
entries = News._default_manager
if not request or not getattr(request, 'toolbar', False) or not request.toolbar.edit_mode:
entries = News.published
return entries.all()
@python_2_unicode_compatible
class LatestNewsPlugin(BaseNewsPlugin):
latest_posts = models.IntegerField(_('articles'), default=LATEST_NEWS,
help_text=_(u'The number of latests '
u'articles to be displayed.'))
tags = TaggableManager(_('filter by tag'), blank=True,
help_text=_('Show only the blog articles tagged with chosen tags.'),
related_name='oscar_news_latest_entry')
category = models.ForeignKey("oscar_news.NewsCategory",
verbose_name=_('filter by category'),
help_text=_('Show only the blog articles tagged '
u'with chosen categories.'),
null=True, blank=True, default=None)
def __str__(self):
return force_text(_('%s latest articles by tag') % self.latest_posts)
def copy_relations(self, oldinstance):
for tag in oldinstance.tags.all():
self.tags.add(tag)
self.category = oldinstance.category
def get_posts(self, request):
posts = self.post_queryset(request)
if self.tags.exists():
posts = posts.filter(tags__in=list(self.tags.all()))
if self.category is not None:
posts = posts.filter(category=self.category)
return posts.distinct()[:self.latest_posts]
@python_2_unicode_compatible
class GenericNewsPlugin(BaseNewsPlugin):
class Meta:
abstract = False
def __str__(self):
return force_text(_('generic news plugin'))
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installs an Android application, possibly in an incremental way."""
import collections
import hashlib
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
from third_party.py import gflags
from third_party.py.concurrent import futures
gflags.DEFINE_string("split_main_apk", None, "The main APK for split install")
gflags.DEFINE_multistring("split_apk", [], "Split APKs to install")
gflags.DEFINE_string("dexmanifest", None, "The .dex manifest")
gflags.DEFINE_multistring("native_lib", None, "Native libraries to install")
gflags.DEFINE_string("resource_apk", None, "The resource .apk")
gflags.DEFINE_string("apk", None, "The app .apk. If not specified, "
"do incremental deployment")
gflags.DEFINE_string("adb", None, "ADB to use")
gflags.DEFINE_string("stub_datafile", None, "The stub data file")
gflags.DEFINE_string("output_marker", None, "The output marker file")
gflags.DEFINE_multistring("extra_adb_arg", [], "Extra arguments to adb")
gflags.DEFINE_string("execroot", ".", "The exec root")
gflags.DEFINE_integer("adb_jobs", 2,
"The number of instances of adb to use in parallel to "
"update files on the device",
lower_bound=1)
gflags.DEFINE_enum("start", "no", ["no", "cold", "warm"], "Whether/how to "
"start the app after installing it. 'cold' and 'warm' will "
"both cause the app to be started, 'warm' will start it "
"with previously saved application state.")
gflags.DEFINE_boolean("start_app", False, "Deprecated, use 'start'.")
gflags.DEFINE_string("user_home_dir", None, "Path to the user's home directory")
gflags.DEFINE_string("flagfile", None,
"Path to a file to read additional flags from")
gflags.DEFINE_string("verbosity", None, "Logging verbosity")
FLAGS = gflags.FLAGS
DEVICE_DIRECTORY = "/data/local/tmp/incrementaldeployment"
class AdbError(Exception):
"""An exception class signaling an error in an adb invocation."""
def __init__(self, args, returncode, stdout, stderr):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
details = "\n".join([
"adb command: %s" % args,
"return code: %s" % returncode,
"stdout: %s" % stdout,
"stderr: %s" % stderr,
])
super(AdbError, self).__init__(details)
class DeviceNotFoundError(Exception):
"""Raised when the device could not be found."""
class MultipleDevicesError(Exception):
"""Raised when > 1 device is attached and no device serial was given."""
@staticmethod
def CheckError(s):
return re.search("more than one (device and emulator|device|emulator)", s)
class DeviceUnauthorizedError(Exception):
"""Raised when the local machine is not authorized to the device."""
class TimestampException(Exception):
"""Raised when there is a problem with timestamp reading/writing."""
class Adb(object):
"""A class to handle interaction with adb."""
def __init__(self, adb_path, temp_dir, adb_jobs, user_home_dir):
self._adb_path = adb_path
self._temp_dir = temp_dir
self._user_home_dir = user_home_dir
self._file_counter = 1
self._executor = futures.ThreadPoolExecutor(max_workers=adb_jobs)
def _Exec(self, adb_args):
"""Executes the given adb command + args."""
args = [self._adb_path] + FLAGS.extra_adb_arg + adb_args
# TODO(ahumesky): Because multiple instances of adb are executed in
# parallel, these debug logging lines will get interleaved.
logging.debug("Executing: %s", " ".join(args))
# adb sometimes requires the user's home directory to access things in
# $HOME/.android (e.g. keys to authorize with the device). To avoid any
# potential problems with python picking up things in the user's home
# directory, HOME is not set in the environment around python and is instead
# passed explicitly as a flag.
env = {}
if self._user_home_dir:
env["HOME"] = self._user_home_dir
adb = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
stdout, stderr = adb.communicate()
stdout = stdout.strip()
stderr = stderr.strip()
logging.debug("adb ret: %s", adb.returncode)
logging.debug("adb out: %s", stdout)
logging.debug("adb err: %s", stderr)
# Check these first so that the more specific error gets raised instead of
# the more generic AdbError.
if "device not found" in stderr:
raise DeviceNotFoundError()
elif "device unauthorized" in stderr:
raise DeviceUnauthorizedError()
elif MultipleDevicesError.CheckError(stderr):
# The error messages are from adb's transport.c, but something adds
# "error: " to the beginning, so take it off so that we don't end up
# printing "Error: error: ..."
raise MultipleDevicesError(re.sub("^error: ", "", stderr))
if adb.returncode != 0:
raise AdbError(args, adb.returncode, stdout, stderr)
return adb.returncode, stdout, stderr, args
def _ExecParallel(self, adb_args):
return self._executor.submit(self._Exec, adb_args)
def _CreateLocalFile(self):
"""Returns a path to a temporary local file in the temp directory."""
local = os.path.join(self._temp_dir, "adbfile_%d" % self._file_counter)
self._file_counter += 1
return local
def GetInstallTime(self, package):
"""Get the installation time of a package."""
_, stdout, _, _ = self._Shell("dumpsys package %s" % package)
match = re.search("lastUpdateTime=(.*)$", stdout, re.MULTILINE)
if match:
return match.group(1)
else:
raise TimestampException(
"Package '%s' is not installed on the device. At least one "
"non-incremental 'mobile-install' must precede incremental "
"installs." % package)
def GetAbi(self):
"""Returns the ABI the device supports."""
_, stdout, _, _ = self._Shell("getprop ro.product.cpu.abi")
return stdout
def Push(self, local, remote):
"""Invoke 'adb push' in parallel."""
return self._ExecParallel(["push", local, remote])
def PushString(self, contents, remote):
"""Push a given string to a given path on the device in parallel."""
local = self._CreateLocalFile()
with file(local, "w") as f:
f.write(contents)
return self.Push(local, remote)
def Pull(self, remote):
"""Invoke 'adb pull'.
Args:
remote: The path to the remote file to pull.
Returns:
The contents of a file or None if the file didn't exist.
"""
local = self._CreateLocalFile()
try:
self._Exec(["pull", remote, local])
with file(local) as f:
return f.read()
except (AdbError, IOError):
return None
def InstallMultiple(self, apk, pkg=None):
"""Invoke 'adb install-multiple'."""
pkg_args = ["-p", pkg] if pkg else []
ret, stdout, stderr, args = self._Exec(
["install-multiple", "-r"] + pkg_args + [apk])
if "Success" not in stderr and "Success" not in stdout:
raise AdbError(args, ret, stdout, stderr)
def Install(self, apk):
"""Invoke 'adb install'."""
ret, stdout, stderr, args = self._Exec(["install", "-r", apk])
# adb install could fail with a message on stdout like this:
#
# pkg: /data/local/tmp/Gmail_dev_sharded_incremental.apk
# Failure [INSTALL_PARSE_FAILED_INCONSISTENT_CERTIFICATES]
#
# and yet it will still have a return code of 0. At least for the install
# command, it will print "Success" if it succeeded, so check for that in
# standard out instead of relying on the return code.
if "Success" not in stderr and "Success" not in stdout:
raise AdbError(args, ret, stdout, stderr)
def Delete(self, remote):
"""Delete the given file (or directory) on the device."""
self.DeleteMultiple([remote])
def DeleteMultiple(self, remote_files):
"""Delete the given files (or directories) on the device."""
files_str = " ".join(remote_files)
if files_str:
self._Shell("rm -fr %s" % files_str)
def Mkdir(self, d):
"""Invokes mkdir with the specified directory on the device."""
self._Shell("mkdir -p %s" % d)
def StopApp(self, package):
"""Force stops the app with the given package."""
self._Shell("am force-stop %s" % package)
def StopAppAndSaveState(self, package):
"""Stops the app with the given package, saving state for the next run."""
# 'am kill' will only kill processes in the background, so we must make sure
# our process is in the background first. We accomplish this by bringing up
# the app switcher.
self._Shell("input keyevent KEYCODE_APP_SWITCH")
self._Shell("am kill %s" % package)
def StartApp(self, package):
"""Starts the app with the given package."""
self._Shell("monkey -p %s -c android.intent.category.LAUNCHER 1" % package)
def _Shell(self, cmd):
"""Invoke 'adb shell'."""
return self._Exec(["shell", cmd])
ManifestEntry = collections.namedtuple(
"ManifestEntry", ["input_file", "zippath", "installpath", "sha256"])
def ParseManifest(contents):
"""Parses a dexmanifest file.
Args:
contents: the contents of the manifest file to be parsed.
Returns:
A dict of install path -> ManifestEntry.
"""
result = {}
for l in contents.split("\n"):
entry = ManifestEntry(*(l.strip().split(" ")))
result[entry.installpath] = entry
return result
def GetAppPackage(stub_datafile):
"""Returns the app package specified in a stub data file."""
with file(stub_datafile) as f:
return f.readlines()[1].strip()
def UploadDexes(adb, execroot, app_dir, temp_dir, dexmanifest, full_install):
"""Uploads dexes to the device so that the state.
Does the minimum amount of work necessary to make the state of the device
consistent with what was built.
Args:
adb: the Adb instance representing the device to install to
execroot: the execroot
app_dir: the directory things should be installed under on the device
temp_dir: a local temporary directory
dexmanifest: contents of the dex manifest
full_install: whether to do a full install
Returns:
None.
"""
# Fetch the manifest on the device
dex_dir = os.path.join(app_dir, "dex")
adb.Mkdir(dex_dir)
old_manifest = None
if not full_install:
logging.info("Fetching dex manifest from device...")
old_manifest_contents = adb.Pull("%s/manifest" % dex_dir)
if old_manifest_contents:
old_manifest = ParseManifest(old_manifest_contents)
else:
logging.info("Dex manifest not found on device")
if old_manifest is None:
# If the manifest is not found, maybe a previous installation attempt
# was interrupted. Wipe the slate clean. Do this also in case we do a full
# installation.
old_manifest = {}
adb.Delete("%s/*" % dex_dir)
new_manifest = ParseManifest(dexmanifest)
dexes_to_delete = set(old_manifest) - set(new_manifest)
# Figure out which dexes to upload: those that are present in the new manifest
# but not in the old one and those whose checksum was changed
common_dexes = set(new_manifest).intersection(old_manifest)
dexes_to_upload = set(d for d in common_dexes
if new_manifest[d].sha256 != old_manifest[d].sha256)
dexes_to_upload.update(set(new_manifest) - set(old_manifest))
if not dexes_to_delete and not dexes_to_upload:
# If we have nothing to do, don't bother removing and rewriting the manifest
logging.info("Application dexes up-to-date")
return
# Delete the manifest so that we know how to get back to a consistent state
# if we are interrupted.
adb.Delete("%s/manifest" % dex_dir)
# Tuple of (local, remote) files to push to the device.
files_to_push = []
# Sort dexes to be uploaded by the zip file they are in so that we only need
# to open each zip only once.
dexzips_in_upload = set(new_manifest[d].input_file for d in dexes_to_upload
if new_manifest[d].zippath != "-")
for i, dexzip_name in enumerate(dexzips_in_upload):
zip_dexes = [
d for d in dexes_to_upload if new_manifest[d].input_file == dexzip_name]
dexzip_tempdir = os.path.join(temp_dir, "dex", str(i))
with zipfile.ZipFile(os.path.join(execroot, dexzip_name)) as dexzip:
for dex in zip_dexes:
zippath = new_manifest[dex].zippath
dexzip.extract(zippath, dexzip_tempdir)
files_to_push.append(
(os.path.join(dexzip_tempdir, zippath), "%s/%s" % (dex_dir, dex)))
# Now gather all the dexes that are not within a .zip file.
dexes_to_upload = set(
d for d in dexes_to_upload if new_manifest[d].zippath == "-")
for dex in dexes_to_upload:
files_to_push.append(
(new_manifest[dex].input_file, "%s/%s" % (dex_dir, dex)))
num_files = len(dexes_to_delete) + len(files_to_push)
logging.info("Updating %d dex%s...", num_files, "es" if num_files > 1 else "")
# Delete the dexes that are not in the new manifest
adb.DeleteMultiple(os.path.join(dex_dir, dex) for dex in dexes_to_delete)
# Upload all the files.
upload_walltime_start = time.time()
fs = [adb.Push(local, remote) for local, remote in files_to_push]
done, not_done = futures.wait(fs, return_when=futures.FIRST_EXCEPTION)
upload_walltime = time.time() - upload_walltime_start
logging.debug("Dex upload walltime: %s seconds", upload_walltime)
# If there is anything in not_done, then some adb call failed and we
# can cancel the rest.
if not_done:
for f in not_done:
f.cancel()
# If any adb call resulted in an exception, re-raise it.
for f in done:
f.result()
# If no dex upload failed, upload the manifest. If any upload failed, the
# exception should have been re-raised above.
# Call result() to raise the exception if there was one.
adb.PushString(dexmanifest, "%s/manifest" % dex_dir).result()
def Checksum(filename):
"""Compute the SHA-256 checksum of a file."""
h = hashlib.sha256()
with file(filename, "r") as f:
while True:
data = f.read(65536)
if not data:
break
h.update(data)
return h.hexdigest()
def UploadResources(adb, resource_apk, app_dir):
"""Uploads resources to the device.
Args:
adb: The Adb instance representing the device to install to.
resource_apk: Path to the resource apk.
app_dir: The directory things should be installed under on the device.
Returns:
None.
"""
# Compute the checksum of the new resources file
new_checksum = Checksum(resource_apk)
# Fetch the checksum of the resources file on the device, if it exists
device_checksum_file = "%s/%s" % (app_dir, "resources_checksum")
old_checksum = adb.Pull(device_checksum_file)
if old_checksum == new_checksum:
logging.info("Application resources up-to-date")
return
logging.info("Updating application resources...")
# Remove the checksum file on the device so that if the transfer is
# interrupted, we know how to get the device back to a consistent state.
adb.Delete(device_checksum_file)
adb.Push(resource_apk, "%s/%s" % (app_dir, "resources.ap_")).result()
# Write the new checksum to the device.
adb.PushString(new_checksum, device_checksum_file).result()
def ConvertNativeLibs(args):
"""Converts the --native_libs command line argument to an arch -> libs map."""
native_libs = {}
if args is not None:
for native_lib in args:
abi, path = native_lib.split(":")
if abi not in native_libs:
native_libs[abi] = set()
native_libs[abi].add(path)
return native_libs
def UploadNativeLibs(adb, native_lib_args, app_dir, full_install):
"""Uploads native libraries to the device."""
native_libs = ConvertNativeLibs(native_lib_args)
libs = set()
if native_libs:
abi = adb.GetAbi()
if abi not in native_libs:
logging.warn("No native libs for device ABI '%s'. Available ABIs: %s",
abi, ", ".join(native_libs))
else:
libs = native_libs[abi]
basename_to_path = {}
install_checksums = {}
for lib in sorted(libs):
install_checksums[os.path.basename(lib)] = Checksum(lib)
basename_to_path[os.path.basename(lib)] = lib
device_manifest = None
if not full_install:
device_manifest = adb.Pull("%s/native/native_manifest" % app_dir)
device_checksums = {}
if device_manifest is None:
# If we couldn't fetch the device manifest or if this is a non-incremental
# install, wipe the slate clean
adb.Delete("%s/native" % app_dir)
else:
# Otherwise, parse the manifest. Note that this branch is also taken if the
# manifest is empty.
for name, checksum in [
l.split(" ") for l in device_manifest.split("\n") if l]:
device_checksums[name] = checksum
libs_to_delete = set(device_checksums) - set(install_checksums)
libs_to_upload = set(install_checksums) - set(device_checksums)
common_libs = set(install_checksums).intersection(set(device_checksums))
libs_to_upload.update([l for l in common_libs
if install_checksums[l] != device_checksums[l]])
libs_to_push = [(basename_to_path[lib], "%s/native/%s" % (app_dir, lib))
for lib in libs_to_upload]
if not libs_to_delete and not libs_to_push and device_manifest is not None:
logging.info("Native libs up-to-date")
return
num_files = len(libs_to_delete) + len(libs_to_push)
logging.info("Updating %d native lib%s...",
num_files, "s" if num_files != 1 else "")
adb.Delete("%s/native/native_manifest" % app_dir)
if libs_to_delete:
adb.DeleteMultiple([
"%s/native/%s" % (app_dir, lib) for lib in libs_to_delete])
upload_walltime_start = time.time()
fs = [adb.Push(local, remote) for local, remote in libs_to_push]
done, not_done = futures.wait(fs, return_when=futures.FIRST_EXCEPTION)
upload_walltime = time.time() - upload_walltime_start
logging.debug("Native library upload walltime: %s seconds", upload_walltime)
# If there is anything in not_done, then some adb call failed and we
# can cancel the rest.
if not_done:
for f in not_done:
f.cancel()
# If any adb call resulted in an exception, re-raise it.
for f in done:
f.result()
install_manifest = [
name + " " + checksum for name, checksum in install_checksums.iteritems()]
adb.PushString("\n".join(install_manifest),
"%s/native/native_manifest" % app_dir).result()
def VerifyInstallTimestamp(adb, app_package):
"""Verifies that the app is unchanged since the last mobile-install."""
expected_timestamp = adb.Pull("%s/%s/install_timestamp" % (
DEVICE_DIRECTORY, app_package))
if not expected_timestamp:
raise TimestampException(
"Cannot verify last mobile install. At least one non-incremental "
"'mobile-install' must precede incremental installs")
actual_timestamp = adb.GetInstallTime(app_package)
if actual_timestamp != expected_timestamp:
raise TimestampException("Installed app '%s' has an unexpected timestamp. "
"Did you last install the app in a way other than "
"'mobile-install'?" % app_package)
def IncrementalInstall(adb_path, execroot, stub_datafile, output_marker,
adb_jobs, start_type, dexmanifest=None, apk=None,
native_libs=None, resource_apk=None,
split_main_apk=None, split_apks=None,
user_home_dir=None):
"""Performs an incremental install.
Args:
adb_path: Path to the adb executable.
execroot: Exec root.
stub_datafile: The stub datafile containing the app's package name.
output_marker: Path to the output marker file.
adb_jobs: The number of instances of adb to use in parallel.
start_type: A string describing whether/how to start the app after
installing it. Can be 'no', 'cold', or 'warm'.
dexmanifest: Path to the .dex manifest file.
apk: Path to the .apk file. May be None to perform an incremental install.
native_libs: Native libraries to install.
resource_apk: Path to the apk containing the app's resources.
split_main_apk: the split main .apk if split installation is desired.
split_apks: the list of split .apks to be installed.
user_home_dir: Path to the user's home directory.
"""
temp_dir = tempfile.mkdtemp()
try:
adb = Adb(adb_path, temp_dir, adb_jobs, user_home_dir)
app_package = GetAppPackage(os.path.join(execroot, stub_datafile))
app_dir = os.path.join(DEVICE_DIRECTORY, app_package)
if split_main_apk:
adb.InstallMultiple(os.path.join(execroot, split_main_apk))
for split_apk in split_apks:
# TODO(build-team): This always reinstalls everything, which defeats the
# purpose of this whole system.
adb.InstallMultiple(os.path.join(execroot, split_apk), app_package)
else:
if not apk:
VerifyInstallTimestamp(adb, app_package)
with file(os.path.join(execroot, dexmanifest)) as f:
dexmanifest = f.read()
UploadDexes(adb, execroot, app_dir, temp_dir, dexmanifest, bool(apk))
# TODO(ahumesky): UploadDexes waits for all the dexes to be uploaded, and
# then UploadResources is called. We could instead enqueue everything
# onto the threadpool so that uploading resources happens sooner.
UploadResources(adb, os.path.join(execroot, resource_apk), app_dir)
UploadNativeLibs(adb, native_libs, app_dir, bool(apk))
if apk:
apk_path = os.path.join(execroot, apk)
adb.Install(apk_path)
future = adb.PushString(
adb.GetInstallTime(app_package),
"%s/%s/install_timestamp" % (DEVICE_DIRECTORY, app_package))
future.result()
else:
if start_type == "warm":
adb.StopAppAndSaveState(app_package)
else:
adb.StopApp(app_package)
if start_type in ["cold", "warm"]:
logging.info("Starting application %s", app_package)
adb.StartApp(app_package)
with file(output_marker, "w") as _:
pass
except DeviceNotFoundError:
sys.exit("Error: Device not found")
except DeviceUnauthorizedError:
sys.exit("Error: Device unauthorized. Please check the confirmation "
"dialog on your device.")
except MultipleDevicesError as e:
sys.exit(
"Error: " + e.message + "\nTry specifying a device serial with " +
"\"blaze mobile-install --adb_arg=-s --adb_arg=$ANDROID_SERIAL\"")
except TimestampException as e:
sys.exit("Error:\n%s" % e.message)
except AdbError as e:
sys.exit("Error:\n%s" % e.message)
finally:
shutil.rmtree(temp_dir, True)
def main():
if FLAGS.verbosity == "1":
level = logging.DEBUG
fmt = "%(levelname)-5s %(asctime)s %(module)s:%(lineno)3d] %(message)s"
else:
level = logging.INFO
fmt = "%(message)s"
logging.basicConfig(stream=sys.stdout, level=level, format=fmt)
start_type = FLAGS.start
if FLAGS.start_app and start_type == "no":
start_type = "cold"
IncrementalInstall(
adb_path=FLAGS.adb,
adb_jobs=FLAGS.adb_jobs,
execroot=FLAGS.execroot,
stub_datafile=FLAGS.stub_datafile,
output_marker=FLAGS.output_marker,
start_type=start_type,
native_libs=FLAGS.native_lib,
split_main_apk=FLAGS.split_main_apk,
split_apks=FLAGS.split_apk,
dexmanifest=FLAGS.dexmanifest,
apk=FLAGS.apk,
resource_apk=FLAGS.resource_apk,
user_home_dir=FLAGS.user_home_dir)
if __name__ == "__main__":
FLAGS(sys.argv)
# process any additional flags in --flagfile
if FLAGS.flagfile:
with open(FLAGS.flagfile) as flagsfile:
FLAGS.Reset()
FLAGS(sys.argv + [line.strip() for line in flagsfile.readlines()])
main()
|
|
import argparse
import codecs
import logging
import pickle
import xml.etree.ElementTree as ET
import os
import sys
import pprint
import itertools
from nltk.corpus import stopwords
import nltk
import numpy as np
import progressbar as pb
import time
from pycorenlp import StanfordCoreNLP
from sklearn import metrics, svm, feature_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from config.seedev_types import ds_pair_types, all_entity_groups, all_entity_types, pair_types
from config import config
from text.corpus import Corpus
from text.document import Document
from text.sentence import Sentence
pp = pprint.PrettyPrinter(indent=4)
text_clf = Pipeline([('vect', CountVectorizer(analyzer='char_wb', ngram_range=(7,20), min_df=0.2, max_df=0.5)),
#('vect', CountVectorizer(analyzer='word', ngram_range=(1,5), stop_words="english", min_df=0.1)),
# ('tfidf', TfidfTransformer(use_idf=True, norm="l2")),
#('tfidf', TfidfVectorizer(analyzer='char_wb', ngram_range=(6,20))),
#('clf', SGDClassifier(loss='hinge', penalty='l1', alpha=0.01, n_iter=5, random_state=42)),
#('clf', SGDClassifier())
#('clf', svm.SVC(kernel='rbf', C=10, verbose=True, tol=1e-5))
#('clf', RandomForestClassifier(n_estimators=10))
#('feature_selection', feature_selection.SelectFromModel(LinearSVC(penalty="l1"))),
('clf', MultinomialNB(alpha=0.1, fit_prior=False))
#('clf', DummyClassifier(strategy="constant", constant=True))
])
class SeeDevCorpus(Corpus):
"""
Corpus for the BioNLP SeeDev task
self.path is the base directory of the files of this corpus.
"""
def __init__(self, corpusdir, **kwargs):
super(SeeDevCorpus, self).__init__(corpusdir, **kwargs)
self.subtypes = []
self.train_sentences = [] # sentences used for training the sentence classifier
self.type_sentences = {} # sentences classified as true for each type
def load_corpus(self, corenlpserver, process=True):
# self.path is the base directory of the files of this corpus
trainfiles = [self.path + '/' + f for f in os.listdir(self.path) if f.endswith('.txt')]
total = len(trainfiles)
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.AdaptiveETA(), ' ', pb.Timer()]
pbar = pb.ProgressBar(widgets=widgets, maxval=total, redirect_stdout=True).start()
time_per_abs = []
for current, f in enumerate(trainfiles):
#logging.debug('%s:%s/%s', f, current + 1, total)
print '{}:{}/{}'.format(f, current + 1, total)
did = f.split(".")[0].split("/")[-1]
t = time.time()
with codecs.open(f, 'r', 'utf-8') as txt:
doctext = txt.read()
doctext = doctext.replace("\n", " ")
newdoc = Document(doctext, process=False, did=did)
newdoc.sentence_tokenize("biomedical")
if process:
newdoc.process_document(corenlpserver, "biomedical")
self.documents[newdoc.did] = newdoc
abs_time = time.time() - t
time_per_abs.append(abs_time)
#logging.info("%s sentences, %ss processing time" % (len(newdoc.sentences), abs_time))
pbar.update(current+1)
pbar.finish()
abs_avg = sum(time_per_abs)*1.0/len(time_per_abs)
logging.info("average time per abstract: %ss" % abs_avg)
def load_annotations(self, ann_dir, etype, pairtype="all"):
self.clear_annotations("all")
for rtype in all_entity_types:
self.clear_annotations(rtype)
annfiles = [ann_dir + '/' + f for f in os.listdir(ann_dir) if f.endswith('.a1')]
total = len(annfiles)
time_per_abs = []
originalid_to_eid = {}
for current, f in enumerate(annfiles):
logging.debug('%s:%s/%s', f, current + 1, total)
did = f.split(".")[0].split("/")[-1]
with codecs.open(f, 'r', 'utf-8') as txt:
for line in txt:
# print line
tid, ann, etext = line.strip().split("\t")
if ";" in ann:
# print "multiple offsets:", ann
# ann = ann.split(";")[0] # ignore the second part for now
ann_elements = ann.split(" ")
entity_type, dstart, dend = ann_elements[0], int(ann_elements[1]), int(ann_elements[-1])
dexclude = [e.split(";") for e in ann_elements[2:-1]]
dexclude = [(int(dex[0]), int(dex[1])) for dex in dexclude]
# print ann, dstart, dend
else:
entity_type, dstart, dend = ann.split(" ")
dexclude = None
dstart, dend = int(dstart), int(dend)
# load all entities
#if etype == "all" or (etype != "all" and etype == type_match[entity_type]):
sentence = self.documents[did].find_sentence_containing(dstart, dend, chemdner=False)
if sentence is not None:
# e[0] and e[1] are relative to the document, so subtract sentence offset
start = dstart - sentence.offset
end = dend - sentence.offset
if dexclude is not None:
exclude = [(dex[0] - sentence.offset, dex[1] - sentence.offset) for dex in dexclude]
else:
exclude = None
eid = sentence.tag_entity(start, end, entity_type, text=etext, original_id=tid, exclude=exclude)
if eid is None:
print "no eid!", sentence.sid, start, end, exclude, etext, sentence.text
continue
# sys.exit()
originalid_to_eid[did + "." + tid] = eid
else:
print "{}: could not find sentence for this span: {}-{}|{}".format(did, dstart, dend, etext.encode("utf-8"))
print
#sys.exit()
self.load_relations(ann_dir, originalid_to_eid)
def load_relations(self, ann_dir, originalid_to_eid):
print "loading relations..."
relations_stats = {}
annfiles = [ann_dir + '/' + f for f in os.listdir(ann_dir) if f.endswith('.a2')]
total = len(annfiles)
time_per_abs = []
default_stopwords = set(nltk.corpus.stopwords.words('english'))
custom_stopwords = set(('-', '.', ',', '-lrb-', '-rrb-', 'et', 'al', ';', ':', '/'))
all_stopwords = default_stopwords | custom_stopwords
unique_relations = set()
reltype_texts = {}
for current, f in enumerate(annfiles):
logging.debug('%s:%s/%s', f, current + 1, total)
did = f.split(".")[0].split("/")[-1]
with codecs.open(f, 'r', 'utf-8') as txt:
for line in txt:
eid, ann = line.strip().split("\t")
rtype, sourceid, targetid = ann.split(" ")
if rtype not in relations_stats:
relations_stats[rtype] = {"count": 0}
relations_stats[rtype]["count"] += 1
sourceid = did + "." + sourceid.split(":")[-1]
targetid = did + "." + targetid.split(":")[-1]
if sourceid not in originalid_to_eid or targetid not in originalid_to_eid:
print "{}: entity not found: {}=>{}".format(did, sourceid, targetid)
# print sorted([e.split(".")[-1] for e in originalid_to_eid if e.startswith(did)])
print "skipped relation {}".format(rtype)
continue
sourceid, targetid = originalid_to_eid[sourceid], originalid_to_eid[targetid]
sid1 = '.'.join(sourceid.split(".")[:-1])
sid2 = '.'.join(targetid.split(".")[:-1])
sn1 = int(sid1.split("s")[-1])
sn2 = int(sid2.split("s")[-1])
# if abs(sn2 - sn1) > 3:
# print "relation {} between entities on distant sentences: {}=>{}".format(rtype, sourceid, targetid)
# continue
sentence1 = self.documents[did].get_sentence(sid1)
sentence2 = self.documents[did].get_sentence(sid2)
if sentence1 is None:
print "sentence not found:", did, sid1, sourceid, targetid, len(self.documents[did].sentences)
continue
else:
entity1 = sentence1.entities.get_entity(sourceid)
entity2 = sentence2.entities.get_entity(targetid)
entity1.targets.append((targetid, rtype))
if entity1.type + "_source" not in relations_stats[rtype]:
relations_stats[rtype][entity1.type + "_source"] = 0
relations_stats[rtype][entity1.type + "_source"] += 1
if entity2.type + "_target" not in relations_stats[rtype]:
relations_stats[rtype][entity2.type + "_target"] = 0
relations_stats[rtype][entity2.type + "_target"] += 1
entity1_text = entity1.text.encode("utf-8")
entity2_text = entity2.text.encode("utf-8")
sentence_text = []
entity1_orders = [t.order for t in entity1.tokens]
entity2_orders = [t.order for t in entity2.tokens]
entity_orders = entity1_orders + entity2_orders
entity_orders.sort()
# print entity_orders
for t in sentence1.tokens:
if t.order in entity1_orders and (len(sentence_text) == 0 or sentence_text[-1] != "ARG1"):
#sentence_text.append("ARG1-" + entity1.type)
continue
elif t.order in entity2_orders and (len(sentence_text) == 0 or sentence_text[-1] != "ARG2"):
#sentence_text.append("ARG2-" + entity1.type)
continue
elif "goldstandard" in t.tags and (len(sentence_text) == 0 or sentence_text[-1] != "ENTITY"):
#sentence_text.append("ENTITY")
continue
elif t.text.lower() not in all_stopwords and not t.text.istitle() and t.text.isalpha():
sentence_text.append(t.text)
if rtype not in reltype_texts:
reltype_texts[rtype] = []
reltype_texts[rtype].append(sentence_text)
#print " ".join(sentence_text)
#print
rel_text = "{}#{}\t{}\t{}#{}".format(entity1.type, entity1_text, rtype, entity2.type, entity2_text)
unique_relations.add(rel_text)
# if rel_text not in unique_relations:
# unique_relations[rel_text] = set()
# print
# print "{}-{}={}>{}-{}".format(entity1.type, entity1_text, rtype, entity2.type, entity2_text)
# sentence1_text = sentence1.text.encode("utf-8")
# sentence1_text = sentence1_text.replace(entity1_text, "|{}|".format(entity1_text))
# sentence1_text = sentence1_text.replace(entity2_text, "|{}|".format(entity2_text))
# print sentence1_text
#if sid1 != sid2:
# sentence2_text = sentence1.text.encode("utf-8").replace(entity2_text, "|{}|".format(entity2_text))
# print sentence2_text
# print
# print "{}: {}=>{}".format(etype, entity1.text.encode("utf-8"), targetid)
with codecs.open("seedev_relation.txt", 'w', 'utf-8') as relfile:
for r in unique_relations:
relfile.write(r.decode("utf-8") + '\n')
#pp.pprint(relations_stats)
alltokens = []
for rtype in reltype_texts:
alltokens += list(itertools.chain(*reltype_texts[rtype]))
alldist = nltk.FreqDist(alltokens)
allmc = alldist.most_common(150)
allmc = set([x[0] for x in allmc])
for rtype in reltype_texts:
fdist1 = nltk.FreqDist(list(itertools.chain(*reltype_texts[rtype])))
mc = fdist1.most_common(150)
mc = set([x[0] for x in mc])
int_words = mc - allmc
with codecs.open("seedev_int_words_{}.txt".format(rtype), 'w', 'utf-8') as relfile:
print
print rtype
for i in int_words:
relfile.write(i + '\n')
print i
#print rtype, len(int_words), int_words
#print
def get_features(self, pairtype):
f = []
labels = []
sids = []
for sentence in self.get_sentences("goldstandard"):
hasrel = False
hassource = False
hastarget = False
sids.append(sentence.sid)
for e in sentence.entities.elist["goldstandard"]:
if e.type in pair_types[pairtype]["source_types"]:
hassource = True
if e.type in pair_types[pairtype]["target_types"]:
hastarget = True
if any([target[1] == pairtype for target in e.targets]):
# print pairtype, sentence.text
hasrel = True
break
if not hassource or not hastarget:
continue
tokens_text = [t.text for t in sentence.tokens]
stokens = []
for it, t in enumerate(sentence.tokens):
#print tokens_text[:it], tokens_text[it:]
if "-LRB-" in tokens_text[:it] and "-RRB-" in tokens_text[it:] and "-RRB-" not in tokens_text[:it] and "-LRB-" not in tokens_text[it:]:
#if "(" in t.text or ")" in t.text:
# print "skipped between ()", t.text
continue
elif t.lemma.isdigit():
# print "digit:", t.lemma, t.text
continue
elif t.text == "-LRB-" or t.text == "-RRB-":
continue
elif "goldstandard" in t.tags and (len(stokens) == 0 or stokens[-1] != t.tags["goldstandard_subtype"]):
stokens.append(t.tags["goldstandard_subtype"])
#elif not t.text.isalpha():
# print "not alpha:", t.text
# continue
else:
stokens.append(t.pos + "-" + t.lemma)
f.append(" ".join(stokens))
if hasrel:
labels.append(True)
else:
labels.append(False)
return f, labels, sids
def train_sentence_classifier(self, pairtype):
self.text_clf = Pipeline([('vect', CountVectorizer(analyzer='char_wb', ngram_range=(7,20), min_df=0.2, max_df=0.5)),
#('vect', CountVectorizer(analyzer='word', ngram_range=(1,5), stop_words="english", min_df=0.1)),
# ('tfidf', TfidfTransformer(use_idf=True, norm="l2")),
#('tfidf', TfidfVectorizer(analyzer='char_wb', ngram_range=(6,20))),
#('clf', SGDClassifier(loss='hinge', penalty='l1', alpha=0.01, n_iter=5, random_state=42)),
#('clf', SGDClassifier())
#('clf', svm.SVC(kernel='rbf', C=10, verbose=True, tol=1e-5))
#('clf', RandomForestClassifier(n_estimators=10))
#('feature_selection', feature_selection.SelectFromModel(LinearSVC(penalty="l1"))),
('clf', MultinomialNB(alpha=0.1, fit_prior=False))
#('clf', DummyClassifier(strategy="constant", constant=True))
])
f, labels, sids = self.get_features(pairtype)
half_point = int(len(f)*0.5)
self.train_sentences = sids[:half_point]
"""ch2 = SelectKBest(chi2, k=20)
X_train = text_clf.named_steps["vect"].fit_transform(f[:half_point])
X_test = text_clf.named_steps["vect"].transform(f[half_point:])
X_train = ch2.fit_transform(X_train, labels[:half_point])
X_test = ch2.transform(X_test)
feature_names = text_clf.named_steps["vect"].get_feature_names()
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
# print feature_names"""
# train
text_clf = self.text_clf.fit(f[:half_point], labels[:half_point])
#save model
if not os.path.exists("models/kernel_models/" + pairtype + "_sentence_classifier/"):
os.makedirs("models/kernel_models/" + pairtype + "_sentence_classifier/")
logging.info("Training complete, saving to {}/{}/{}.pkl".format("models/kernel_models/",
pairtype + "_sentence_classifier/", pairtype))
joblib.dump(text_clf, "{}/{}/{}.pkl".format("models/kernel_models/",
pairtype + "_sentence_classifier/", pairtype))
# evaluate
pred = text_clf.predict(f[half_point:])
# print len(pred), sum(pred)
self.type_sentences[pairtype] = []
for ip, p in enumerate(pred):
if p:
self.type_sentences[pairtype].append(sids[half_point + ip])
res = metrics.confusion_matrix(labels[half_point:], pred)
return res[1][1], res[0][1], res[1][0]
def test_sentence_classifier(self, pairtype):
text_clf = joblib.load("{}/{}/{}.pkl".format("models/kernel_models/",
pairtype + "_sentence_classifier/", pairtype))
f, labels, sids = self.get_features(pairtype)
pred = text_clf.predict(f)
self.type_sentences[pairtype] = []
for ip, p in enumerate(pred):
if p:
self.type_sentences[pairtype].append(sids[ip])
# print self.type_sentences.keys()
res = metrics.confusion_matrix(labels, pred)
return res[1][1], res[0][1], res[1][0]
def add_more_sentences(self, corpuspath):
"""
Load sentences with relations from another corpus
:param corpuspath: corpus path
:return:
"""
nsentences = 0
for did in self.documents:
nsentences += len(self.documents[did].sentences)
print "base corpus has {} sentences".format(nsentences)
corpus2 = pickle.load(open(corpuspath, 'rb'))
nsentences = 0
for did in corpus2.documents:
if did in self.documents:
print "repeated did:", did
else:
self.documents[did] = corpus2.documents[did]
nsentences += len(corpus2.documents[did].sentences)
#for sentence in corpus2.documents[did].sentences:
#if any([len(e.targets)> 1 for e in sentence.entities.elist["goldstandard"]]):
# print "found sentence with relations:", sentence.sid
#if len(sentence.entities.elist["goldstandard"]) > 1:
#self.documents[sentence.sid] = Document(sentence.text, sentences=[sentence])
print "added {} sentences".format(nsentences)
self.save("corpora/Thaliana/seedev-extended.pickle")
def convert_entities_to_goldstandard(self, basemodel="models/seedev_train_entity"):
for did in self.documents:
for sentence in self.documents[did].sentences:
sentence.entities.elist["goldstandard"] = []
for source in sentence.entities.elist:
if source.startswith(basemodel):
# logging.info("adding to goldstandard: {}, {} entities".format(source, len(sentence.entities.elist[source])))
sentence.entities.elist["goldstandard"] += sentence.entities.elist[source]
def find_ds_relations(self):
rtypes = ds_pair_types
#del rtypes["Has_Sequence_Identical_To"]
#del rtypes["Is_Functionally_Equivalent_To"]
rel_words = get_relwords(rtypes)
rtypes_count = {}
for did in self.documents:
for sentence in self.documents[did].sentences:
sentence_entities = [entity for entity in sentence.entities.elist["goldstandard"]]
sentence_words = set([t.text for t in sentence.tokens])
# logging.debug("sentence {} has {} entities ({})".format(sentence.sid, len(sentence_entities), len(sentence.entities.elist["goldstandard"])))
for rtype in rtypes:
if rtype not in rtypes_count:
rtypes_count[rtype] = [0, 0]
if len(sentence_words & rel_words[rtype]) > -1 and len(sentence_entities) < 20:
pairtypes = (pair_types[rtype]["source_types"], pair_types[rtype]["target_types"])
for pair in itertools.permutations(sentence_entities, 2):
# print pair[0].type in pairtypes[0], pair[1].type in pairtypes[1]
if pair[0].type in pairtypes[0] and pair[1].type in pairtypes[1] and pair[0].text != pair[1].text:
logging.info(u"found relation {0}: {1.text}.{1.type}=>{2.text}.{2.type} because of {3}".
format(rtype, pair[0], pair[1], str(sentence_words & rel_words[rtype])))
logging.info("context: {}".format(sentence.text.encode("utf-8")))
pair[0].targets.append((pair[1].eid, rtype))
rtypes_count[rtype][0] += 1
else:
rtypes_count[rtype][1] += 1
for rtype in rtypes_count:
print rtype, (1.0*rtypes_count[rtype][0])/(rtypes_count[rtype][0]+rtypes_count[rtype][1]), rtypes_count[rtype][0], rtypes_count[rtype][1]
def get_relwords(rtypes, basedir="seedev_int_words"):
relwords = {}
for rtype in rtypes:
relwords[rtype] = set()
with open(basedir + "_{}.txt".format(rtype), 'r') as wfile:
for l in wfile:
relwords[rtype].add(l.strip())
return relwords
def get_seedev_gold_ann_set(goldpath, entitytype, pairtype):
logging.info("loading gold standard annotations... {}".format(goldpath))
annfiles = [goldpath + '/' + f for f in os.listdir(goldpath) if f.endswith('.a1')]
gold_offsets = set()
tid_to_offsets = {}
for current, f in enumerate(annfiles):
did = f.split(".")[0].split("/")[-1]
with codecs.open(f, 'r', "utf-8") as txt:
for line in txt:
tid, ann, etext = line.strip().split("\t")
if ";" in ann:
# print "multiple offsets:", ann
# TODO: use the two parts
ann_elements = ann.split(" ")
entity_type, dstart, dend = ann_elements[0], int(ann_elements[1]), int(ann_elements[-1])
else:
etype, dstart, dend = ann.split(" ")
dstart, dend = int(dstart), int(dend)
if etype == entitytype or entitytype == "all":
gold_offsets.add((did, dstart, dend, etext))
tid_to_offsets[did + "." + tid] = (dstart, dend, etext)
gold_relations = set()
annfiles = [goldpath + '/' + f for f in os.listdir(goldpath) if f.endswith('.a2')]
for current, f in enumerate(annfiles):
did = f.split(".")[0].split("/")[-1]
with open(f, 'r') as txt:
for line in txt:
eid, ann = line.strip().split("\t")
ptype, sourceid, targetid = ann.split(" ")
if ptype == pairtype or pairtype == "all":
sourceid = sourceid.split(":")[-1]
targetid = targetid.split(":")[-1]
source = tid_to_offsets[did + "." + sourceid]
target = tid_to_offsets[did + "." + targetid]
gold_relations.add((did, source[:2], target[:2], u"{}={}>{}".format(source[2], ptype, target[2])))
#gold_relations.add((did, source[:2], target[:2], u"{}=>{}".format(source[2], target[2])))
return gold_offsets, gold_relations
|
|
import os
import types
import shlex
import sys
import codecs
import tempfile
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import re
from tkinter import *
from tkinter.simpledialog import askstring
from idlelib.configHandler import idleConf
from codecs import BOM_UTF8
# Try setting the locale, so that we can find out
# what encoding to use
try:
import locale
locale.setlocale(locale.LC_CTYPE, "")
except (ImportError, locale.Error):
pass
# Encoding for file names
filesystemencoding = sys.getfilesystemencoding() ### currently unused
locale_encoding = 'ascii'
if sys.platform == 'win32':
# On Windows, we could use "mbcs". However, to give the user
# a portable encoding name, we need to find the code page
try:
locale_encoding = locale.getdefaultlocale()[1]
codecs.lookup(locale_encoding)
except LookupError:
pass
else:
try:
# Different things can fail here: the locale module may not be
# loaded, it may not offer nl_langinfo, or CODESET, or the
# resulting codeset may be unknown to Python. We ignore all
# these problems, falling back to ASCII
locale_encoding = locale.nl_langinfo(locale.CODESET)
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (NameError, AttributeError, LookupError):
# Try getdefaultlocale: it parses environment variables,
# which may give a clue. Unfortunately, getdefaultlocale has
# bugs that can cause ValueError.
try:
locale_encoding = locale.getdefaultlocale()[1]
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (ValueError, LookupError):
pass
locale_encoding = locale_encoding.lower()
encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check!
### 'encoding' is used below in encode(), check!
coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def coding_spec(data):
"""Return the encoding declaration according to PEP 263.
When checking encoded data, only the first two lines should be passed
in to avoid a UnicodeDecodeError if the rest of the data is not unicode.
The first two lines would contain the encoding specification.
Raise a LookupError if the encoding is declared but unknown.
"""
if isinstance(data, bytes):
# This encoding might be wrong. However, the coding
# spec must be ASCII-only, so any non-ASCII characters
# around here will be ignored. Decoding to Latin-1 should
# never fail (except for memory outage)
lines = data.decode('iso-8859-1')
else:
lines = data
# consider only the first two lines
if '\n' in lines:
lst = lines.split('\n', 2)[:2]
elif '\r' in lines:
lst = lines.split('\r', 2)[:2]
else:
lst = [lines]
for line in lst:
match = coding_re.match(line)
if match is not None:
break
if not blank_re.match(line):
return None
else:
return None
name = match.group(1)
try:
codecs.lookup(name)
except LookupError:
# The standard encoding error does not indicate the encoding
raise LookupError("Unknown encoding: "+name)
return name
class IOBinding:
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.__id_open = self.text.bind("<<open-window-from-file>>", self.open)
self.__id_save = self.text.bind("<<save-window>>", self.save)
self.__id_saveas = self.text.bind("<<save-window-as-file>>",
self.save_as)
self.__id_savecopy = self.text.bind("<<save-copy-of-window-as-file>>",
self.save_a_copy)
self.fileencoding = None
self.__id_print = self.text.bind("<<print-window>>", self.print_window)
def close(self):
# Undo command bindings
self.text.unbind("<<open-window-from-file>>", self.__id_open)
self.text.unbind("<<save-window>>", self.__id_save)
self.text.unbind("<<save-window-as-file>>",self.__id_saveas)
self.text.unbind("<<save-copy-of-window-as-file>>", self.__id_savecopy)
self.text.unbind("<<print-window>>", self.__id_print)
# Break cycles
self.editwin = None
self.text = None
self.filename_change_hook = None
def get_saved(self):
return self.editwin.get_saved()
def set_saved(self, flag):
self.editwin.set_saved(flag)
def reset_undo(self):
self.editwin.reset_undo()
filename_change_hook = None
def set_filename_change_hook(self, hook):
self.filename_change_hook = hook
filename = None
dirname = None
def set_filename(self, filename):
if filename and os.path.isdir(filename):
self.filename = None
self.dirname = filename
else:
self.filename = filename
self.dirname = None
self.set_saved(1)
if self.filename_change_hook:
self.filename_change_hook()
def open(self, event=None, editFile=None):
flist = self.editwin.flist
# Save in case parent window is closed (ie, during askopenfile()).
if flist:
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
# If editFile is valid and already open, flist.open will
# shift focus to its existing window.
# If the current window exists and is a fresh unnamed,
# unmodified editor window (not an interpreter shell),
# pass self.loadfile to flist.open so it will load the file
# in the current window (if the file is not already open)
# instead of a new window.
if (self.editwin and
not getattr(self.editwin, 'interp', None) and
not self.filename and
self.get_saved()):
flist.open(filename, self.loadfile)
else:
flist.open(filename)
else:
if self.text:
self.text.focus_set()
return "break"
# Code for use outside IDLE:
if self.get_saved():
reply = self.maybesave()
if reply == "cancel":
self.text.focus_set()
return "break"
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
self.loadfile(filename)
else:
self.text.focus_set()
return "break"
eol = r"(\r\n)|\n|\r" # \r\n (Windows), \n (UNIX), or \r (Mac)
eol_re = re.compile(eol)
eol_convention = os.linesep # default
def loadfile(self, filename):
try:
# open the file in binary mode so that we can handle
# end-of-line convention ourselves.
with open(filename, 'rb') as f:
two_lines = f.readline() + f.readline()
f.seek(0)
bytes = f.read()
except OSError as msg:
tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
return False
chars, converted = self._decode(two_lines, bytes)
if chars is None:
tkMessageBox.showerror("Decoding Error",
"File %s\nFailed to Decode" % filename,
parent=self.text)
return False
# We now convert all end-of-lines to '\n's
firsteol = self.eol_re.search(chars)
if firsteol:
self.eol_convention = firsteol.group(0)
chars = self.eol_re.sub(r"\n", chars)
self.text.delete("1.0", "end")
self.set_filename(None)
self.text.insert("1.0", chars)
self.reset_undo()
self.set_filename(filename)
if converted:
# We need to save the conversion results first
# before being able to execute the code
self.set_saved(False)
self.text.mark_set("insert", "1.0")
self.text.yview("insert")
self.updaterecentfileslist(filename)
return True
def _decode(self, two_lines, bytes):
"Create a Unicode string."
chars = None
# Check presence of a UTF-8 signature first
if bytes.startswith(BOM_UTF8):
try:
chars = bytes[3:].decode("utf-8")
except UnicodeDecodeError:
# has UTF-8 signature, but fails to decode...
return None, False
else:
# Indicates that this file originally had a BOM
self.fileencoding = 'BOM'
return chars, False
# Next look for coding specification
try:
enc = coding_spec(two_lines)
except LookupError as name:
tkMessageBox.showerror(
title="Error loading the file",
message="The encoding '%s' is not known to this Python "\
"installation. The file may not display correctly" % name,
master = self.text)
enc = None
except UnicodeDecodeError:
return None, False
if enc:
try:
chars = str(bytes, enc)
self.fileencoding = enc
return chars, False
except UnicodeDecodeError:
pass
# Try ascii:
try:
chars = str(bytes, 'ascii')
self.fileencoding = None
return chars, False
except UnicodeDecodeError:
pass
# Try utf-8:
try:
chars = str(bytes, 'utf-8')
self.fileencoding = 'utf-8'
return chars, False
except UnicodeDecodeError:
pass
# Finally, try the locale's encoding. This is deprecated;
# the user should declare a non-ASCII encoding
try:
# Wait for the editor window to appear
self.editwin.text.update()
enc = askstring(
"Specify file encoding",
"The file's encoding is invalid for Python 3.x.\n"
"IDLE will convert it to UTF-8.\n"
"What is the current encoding of the file?",
initialvalue = locale_encoding,
parent = self.editwin.text)
if enc:
chars = str(bytes, enc)
self.fileencoding = None
return chars, True
except (UnicodeDecodeError, LookupError):
pass
return None, False # None on failure
def maybesave(self):
if self.get_saved():
return "yes"
message = "Do you want to save %s before closing?" % (
self.filename or "this untitled document")
confirm = tkMessageBox.askyesnocancel(
title="Save On Close",
message=message,
default=tkMessageBox.YES,
master=self.text)
if confirm:
reply = "yes"
self.save(None)
if not self.get_saved():
reply = "cancel"
elif confirm is None:
reply = "cancel"
else:
reply = "no"
self.text.focus_set()
return reply
def save(self, event):
if not self.filename:
self.save_as(event)
else:
if self.writefile(self.filename):
self.set_saved(True)
try:
self.editwin.store_file_breaks()
except AttributeError: # may be a PyShell
pass
self.text.focus_set()
return "break"
def save_as(self, event):
filename = self.asksavefile()
if filename:
if self.writefile(filename):
self.set_filename(filename)
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError:
pass
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def save_a_copy(self, event):
filename = self.asksavefile()
if filename:
self.writefile(filename)
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def writefile(self, filename):
self.fixlastline()
text = self.text.get("1.0", "end-1c")
if self.eol_convention != "\n":
text = text.replace("\n", self.eol_convention)
chars = self.encode(text)
try:
with open(filename, "wb") as f:
f.write(chars)
return True
except OSError as msg:
tkMessageBox.showerror("I/O Error", str(msg),
master=self.text)
return False
def encode(self, chars):
if isinstance(chars, bytes):
# This is either plain ASCII, or Tk was returning mixed-encoding
# text to us. Don't try to guess further.
return chars
# Preserve a BOM that might have been present on opening
if self.fileencoding == 'BOM':
return BOM_UTF8 + chars.encode("utf-8")
# See whether there is anything non-ASCII in it.
# If not, no need to figure out the encoding.
try:
return chars.encode('ascii')
except UnicodeError:
pass
# Check if there is an encoding declared
try:
# a string, let coding_spec slice it to the first two lines
enc = coding_spec(chars)
failed = None
except LookupError as msg:
failed = msg
enc = None
else:
if not enc:
# PEP 3120: default source encoding is UTF-8
enc = 'utf-8'
if enc:
try:
return chars.encode(enc)
except UnicodeError:
failed = "Invalid encoding '%s'" % enc
tkMessageBox.showerror(
"I/O Error",
"%s.\nSaving as UTF-8" % failed,
master = self.text)
# Fallback: save as UTF-8, with BOM - ignoring the incorrect
# declared encoding
return BOM_UTF8 + chars.encode("utf-8")
def fixlastline(self):
c = self.text.get("end-2c")
if c != '\n':
self.text.insert("end-1c", "\n")
def print_window(self, event):
confirm = tkMessageBox.askokcancel(
title="Print",
message="Print to Default Printer",
default=tkMessageBox.OK,
master=self.text)
if not confirm:
self.text.focus_set()
return "break"
tempfilename = None
saved = self.get_saved()
if saved:
filename = self.filename
# shell undo is reset after every prompt, looks saved, probably isn't
if not saved or filename is None:
(tfd, tempfilename) = tempfile.mkstemp(prefix='IDLE_tmp_')
filename = tempfilename
os.close(tfd)
if not self.writefile(tempfilename):
os.unlink(tempfilename)
return "break"
platform = os.name
printPlatform = True
if platform == 'posix': #posix platform
command = idleConf.GetOption('main','General',
'print-command-posix')
command = command + " 2>&1"
elif platform == 'nt': #win32 platform
command = idleConf.GetOption('main','General','print-command-win')
else: #no printing for this platform
printPlatform = False
if printPlatform: #we can try to print for this platform
command = command % shlex.quote(filename)
pipe = os.popen(command, "r")
# things can get ugly on NT if there is no printer available.
output = pipe.read().strip()
status = pipe.close()
if status:
output = "Printing failed (exit status 0x%x)\n" % \
status + output
if output:
output = "Printing command: %s\n" % repr(command) + output
tkMessageBox.showerror("Print status", output, master=self.text)
else: #no printing for this platform
message = "Printing is not enabled for this platform: %s" % platform
tkMessageBox.showinfo("Print status", message, master=self.text)
if tempfilename:
os.unlink(tempfilename)
return "break"
opendialog = None
savedialog = None
filetypes = [
("Python files", "*.py *.pyw", "TEXT"),
("Text files", "*.txt", "TEXT"),
("All files", "*"),
]
defaultextension = '.py' if sys.platform == 'darwin' else ''
def askopenfile(self):
dir, base = self.defaultfilename("open")
if not self.opendialog:
self.opendialog = tkFileDialog.Open(master=self.text,
filetypes=self.filetypes)
filename = self.opendialog.show(initialdir=dir, initialfile=base)
return filename
def defaultfilename(self, mode="open"):
if self.filename:
return os.path.split(self.filename)
elif self.dirname:
return self.dirname, ""
else:
try:
pwd = os.getcwd()
except os.error:
pwd = ""
return pwd, ""
def asksavefile(self):
dir, base = self.defaultfilename("save")
if not self.savedialog:
self.savedialog = tkFileDialog.SaveAs(
master=self.text,
filetypes=self.filetypes,
defaultextension=self.defaultextension)
filename = self.savedialog.show(initialdir=dir, initialfile=base)
return filename
def updaterecentfileslist(self,filename):
"Update recent file list on all editor windows"
if self.editwin.flist:
self.editwin.update_recent_files_list(filename)
def test():
root = Tk()
class MyEditWin:
def __init__(self, text):
self.text = text
self.flist = None
self.text.bind("<Control-o>", self.open)
self.text.bind("<Control-s>", self.save)
self.text.bind("<Alt-s>", self.save_as)
self.text.bind("<Alt-z>", self.save_a_copy)
def get_saved(self): return 0
def set_saved(self, flag): pass
def reset_undo(self): pass
def open(self, event):
self.text.event_generate("<<open-window-from-file>>")
def save(self, event):
self.text.event_generate("<<save-window>>")
def save_as(self, event):
self.text.event_generate("<<save-window-as-file>>")
def save_a_copy(self, event):
self.text.event_generate("<<save-copy-of-window-as-file>>")
text = Text(root)
text.pack()
text.focus_set()
editwin = MyEditWin(text)
io = IOBinding(editwin)
root.mainloop()
if __name__ == "__main__":
test()
|
|
"""
Support for functionality to keep track of the sun.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sun/
"""
import logging
from datetime import timedelta
import homeassistant.util as util
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
track_point_in_utc_time, track_utc_time_change)
from homeassistant.util import dt as dt_util
from homeassistant.util import location as location_util
REQUIREMENTS = ['astral==0.9']
DOMAIN = "sun"
ENTITY_ID = "sun.sun"
CONF_ELEVATION = 'elevation'
STATE_ABOVE_HORIZON = "above_horizon"
STATE_BELOW_HORIZON = "below_horizon"
STATE_ATTR_NEXT_RISING = "next_rising"
STATE_ATTR_NEXT_SETTING = "next_setting"
STATE_ATTR_ELEVATION = "elevation"
_LOGGER = logging.getLogger(__name__)
def is_on(hass, entity_id=None):
"""Test if the sun is currently up based on the statemachine."""
entity_id = entity_id or ENTITY_ID
return hass.states.is_state(entity_id, STATE_ABOVE_HORIZON)
def next_setting(hass, entity_id=None):
"""Local datetime object of the next sun setting."""
utc_next = next_setting_utc(hass, entity_id)
return dt_util.as_local(utc_next) if utc_next else None
def next_setting_utc(hass, entity_id=None):
"""UTC datetime object of the next sun setting."""
entity_id = entity_id or ENTITY_ID
state = hass.states.get(ENTITY_ID)
try:
return dt_util.str_to_datetime(
state.attributes[STATE_ATTR_NEXT_SETTING])
except (AttributeError, KeyError):
# AttributeError if state is None
# KeyError if STATE_ATTR_NEXT_SETTING does not exist
return None
def next_rising(hass, entity_id=None):
"""Local datetime object of the next sun rising."""
utc_next = next_rising_utc(hass, entity_id)
return dt_util.as_local(utc_next) if utc_next else None
def next_rising_utc(hass, entity_id=None):
"""UTC datetime object of the next sun rising."""
entity_id = entity_id or ENTITY_ID
state = hass.states.get(ENTITY_ID)
try:
return dt_util.str_to_datetime(
state.attributes[STATE_ATTR_NEXT_RISING])
except (AttributeError, KeyError):
# AttributeError if state is None
# KeyError if STATE_ATTR_NEXT_RISING does not exist
return None
def setup(hass, config):
"""Track the state of the sun in HA."""
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
latitude = util.convert(hass.config.latitude, float)
longitude = util.convert(hass.config.longitude, float)
errors = []
if latitude is None:
errors.append('Latitude needs to be a decimal value')
elif -90 > latitude < 90:
errors.append('Latitude needs to be -90 .. 90')
if longitude is None:
errors.append('Longitude needs to be a decimal value')
elif -180 > longitude < 180:
errors.append('Longitude needs to be -180 .. 180')
if errors:
_LOGGER.error('Invalid configuration received: %s', ", ".join(errors))
return False
platform_config = config.get(DOMAIN, {})
elevation = platform_config.get(CONF_ELEVATION)
if elevation is None:
elevation = location_util.elevation(latitude, longitude)
from astral import Location
location = Location(('', '', latitude, longitude, hass.config.time_zone,
elevation))
sun = Sun(hass, location)
sun.point_in_time_listener(dt_util.utcnow())
return True
class Sun(Entity):
"""Representation of the Sun."""
entity_id = ENTITY_ID
def __init__(self, hass, location):
"""Initialize the Sun."""
self.hass = hass
self.location = location
self._state = self.next_rising = self.next_setting = None
track_utc_time_change(hass, self.timer_update, second=30)
@property
def name(self):
"""Return the name."""
return "Sun"
@property
def state(self):
"""Return the state of the sun."""
if self.next_rising > self.next_setting:
return STATE_ABOVE_HORIZON
return STATE_BELOW_HORIZON
@property
def state_attributes(self):
"""Return the state attributes of the sun."""
return {
STATE_ATTR_NEXT_RISING:
dt_util.datetime_to_str(self.next_rising),
STATE_ATTR_NEXT_SETTING:
dt_util.datetime_to_str(self.next_setting),
STATE_ATTR_ELEVATION: round(self.solar_elevation, 2)
}
@property
def next_change(self):
"""Datetime when the next change to the state is."""
return min(self.next_rising, self.next_setting)
@property
def solar_elevation(self):
"""Angle the sun is above the horizon."""
from astral import Astral
return Astral().solar_elevation(
dt_util.utcnow(),
self.location.latitude,
self.location.longitude)
def update_as_of(self, utc_point_in_time):
"""Calculate sun state at a point in UTC time."""
mod = -1
while True:
next_rising_dt = self.location.sunrise(
utc_point_in_time + timedelta(days=mod), local=False)
if next_rising_dt > utc_point_in_time:
break
mod += 1
mod = -1
while True:
next_setting_dt = (self.location.sunset(
utc_point_in_time + timedelta(days=mod), local=False))
if next_setting_dt > utc_point_in_time:
break
mod += 1
self.next_rising = next_rising_dt
self.next_setting = next_setting_dt
def point_in_time_listener(self, now):
"""Called when the state of the sun has changed."""
self.update_as_of(now)
self.update_ha_state()
# Schedule next update at next_change+1 second so sun state has changed
track_point_in_utc_time(
self.hass, self.point_in_time_listener,
self.next_change + timedelta(seconds=1))
def timer_update(self, time):
"""Needed to update solar elevation."""
self.update_ha_state()
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for interacting with a depth camera device."""
from typing import Optional, Callable, Tuple
import numpy as np
from pyreach import calibration as cal
from pyreach import core
from pyreach import depth_camera
from pyreach.mock import calibration_mock as cal_mock
class DepthFrameMock(depth_camera.DepthFrame):
"""A single depth camera frame taken at a specific time.
Attributes:
time: The time the depth camera image was taken in seconds.
sequence: The sequence number of the color frame.
device_type: The JSON device type associated with the camera.
device_name: The JSON device name associated with the camera.
depth_image: A (DX,DY) array of uint16's containing the depth data.
color_image: A (DX,DY,3) array of uint8's containing the color image.
calibration: The calibration when the image is captured.
"""
def __init__(self, time: float, sequence: int, device_type: str,
device_name: str, depth_image: np.ndarray,
color_image: np.ndarray,
calibration: Optional[cal.Calibration]) -> None:
"""Initialize a MockColorFrame."""
assert len(depth_image.shape) == 2
assert depth_image.dtype == np.uint16
assert len(color_image.shape) == 3
assert color_image.dtype == np.uint8
assert depth_image.shape == color_image.shape[:2]
self._time: float = time
self._sequence = sequence
self._device_type: str = device_type
self._device_name: str = device_name
self._depth_image: np.ndarray = depth_image
self._color_image: np.ndarray = color_image
self._calibration: Optional[cal.Calibration] = calibration
@property
def time(self) -> float:
"""Return timestamp of the DepthFrame."""
return self._time
@property
def sequence(self) -> int:
"""Sequence number of the DepthFrame."""
return self._sequence
@property
def device_type(self) -> str:
"""Return the reach device type."""
return self._device_type
@property
def device_name(self) -> str:
"""Return the Reach device name."""
return self._device_name
@property
def color_data(self) -> np.ndarray:
"""Return the color image as a (DX,DY,3)."""
return self._color_image
@property
def depth_data(self) -> np.ndarray:
"""Return the color image as a (DX,DY)."""
return self._depth_image
@property
def calibration(self) -> Optional[cal.Calibration]:
"""Return the Calibration for for the ColorFrame."""
return self._calibration
def pose(self) -> Optional[core.Pose]:
"""Return the pose of the camera when the image is taken."""
raise NotImplementedError
def get_point_normal(
self, x: int,
y: int) -> Optional[Tuple[np.ndarray, np.ndarray, np.ndarray]]:
"""Return hit point, surface normal and transform of a pixel.
Cast a ray from the camera center to the point cloud. Found the 3D position
of the hit point. Around the hit point, cut a small region and measure the
surface normal. The third return value is the transformation matrix from
the unit z-vector to the hit point, surface normal pair.
Args:
x: x index of the pixel.
y: y index of the pixel.
Returns:
tuple (position, surface normal, transform)
"""
raise NotImplementedError
class DepthCameraMock(depth_camera.DepthCamera):
"""Interface for a depth camera."""
def start_streaming(self, request_period: float = 1.0) -> None:
"""Start depth camera streaming.
Args:
request_period: The optional period between depth camera image quests. If
not specified, it defaults to a period of 1.0 seconds.
"""
pass
def stop_streaming(self) -> None:
"""Stop depth camera streaming."""
raise NotImplementedError
def enable_tagged_request(self) -> None:
"""Enable tagged depth camera image requests."""
raise NotImplementedError
def disable_tagged_request(self) -> None:
"""Disable tagged requests."""
raise NotImplementedError
def image(self) -> Optional[depth_camera.DepthFrame]:
"""Get the latest depth camera frame if it is available."""
depth_frame: depth_camera.DepthFrame = DepthFrameMock(
1.0,
0,
"device_type",
"device_name",
np.zeros((3, 5), dtype=np.uint16), # Depth image
np.zeros((3, 5, 3), dtype=np.uint8), # Color image
cal_mock.CalibrationMock("device_type", "device_name",
"depth_camera_link_name"))
return depth_frame
def fetch_image(self,
timeout: float = 15.0) -> Optional[depth_camera.DepthFrame]:
"""Fetch a new image or possibly times out.
Args:
timeout: The number number of seconds to wait before timing out. This
defaults to 15 seconds if not specified.
Returns:
Returns the latest image.
"""
return self.image()
def async_fetch_image(self,
callback: Optional[Callable[[depth_camera.DepthFrame],
None]] = None,
error_callback: Optional[Callable[[core.PyReachStatus],
None]] = None,
timeout: float = 30) -> None:
"""Fetch a new image asynchronously.
The callback function will be invoked when new image is available.
Args:
callback: callback called when an image arrives. If the camera fails to
load an image, callback will not be called.
error_callback: optional callback called if there is an error.
timeout: timeout for the process, defaults to 30 seconds.
"""
raise NotImplementedError
def add_update_callback(
self,
callback: Callable[[depth_camera.DepthFrame], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback function to be invoked when a new frame is available.
Args:
callback: function to be invoked when a new frame is available. Returns
False to continue receiving new images. Returns True to stop further
update.
finished_callback: Optional callback, called when the callback is stopped
or if the camera is closed.
Returns:
A function that when called stops the callback.
"""
raise NotImplementedError
@property
def pose(self) -> Optional[core.Pose]:
"""Return the latest pose of the camera."""
raise NotImplementedError
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Common utility functions for sql tool."""
import json
import sys
import time
from googlecloudapis.apitools.base import py as apitools_base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core.util import console_io
class OperationError(exceptions.ToolException):
pass
def GetCertRefFromName(
sql_client, sql_messages, resources, instance_ref, common_name):
"""Get a cert reference for a particular instance, given its common name.
Args:
sql_client: apitools.BaseApiClient, A working client for the sql version to
be used.
sql_messages: module, The module that defines the messages for the sql
version to be used.
resources: resources.Registry, The registry that can create resource refs
for the sql version to be used.
instance_ref: resources.Resource, The instance whos ssl cert is being
fetched.
common_name: str, The common name of the ssl cert to be fetched.
Returns:
resources.Resource, A ref for the ssl cert being fetched. Or None if it
could not be found.
"""
cert = GetCertFromName(sql_client, sql_messages, instance_ref, common_name)
if not cert:
return None
return resources.Create(
collection='sql.sslCerts',
project=instance_ref.project,
instance=instance_ref.instance,
sha1Fingerprint=cert.sha1Fingerprint)
def GetCertFromName(
sql_client, sql_messages, instance_ref, common_name):
"""Get a cert for a particular instance, given its common name.
In versions of the SQL API up to at least v1beta3, the last parameter of the
URL is the sha1fingerprint, which is not something writeable or readable by
humans. Instead, the CLI will ask for the common name. To allow this, we first
query all the ssl certs for the instance, and iterate through them to find the
one with the correct common name.
Args:
sql_client: apitools.BaseApiClient, A working client for the sql version to
be used.
sql_messages: module, The module that defines the messages for the sql
version to be used.
instance_ref: resources.Resource, The instance whos ssl cert is being
fetched.
common_name: str, The common name of the ssl cert to be fetched.
Returns:
resources.Resource, A ref for the ssl cert being fetched. Or None if it
could not be found.
"""
certs = sql_client.sslCerts.List(
sql_messages.SqlSslCertsListRequest(
project=instance_ref.project,
instance=instance_ref.instance))
for cert in certs.items:
if cert.commonName == common_name:
return cert
return None
def WaitForOperation(sql_client, operation_ref, message):
"""Wait for a Cloud SQL operation to complete.
Args:
sql_client: apitools.BaseApiClient, The client used to make requests.
operation_ref: resources.Resource, A reference for the operation to poll.
message: str, The string to print while polling.
Returns:
True if the operation succeeded without error.
Raises:
OperationError: If the operation has an error code.
"""
with console_io.ProgressTracker(message, autotick=False) as pt:
while True:
op = sql_client.operations.Get(operation_ref.Request())
if op.error:
raise OperationError(op.error[0].code)
pt.Tick()
if op.state == 'DONE':
return True
if op.state == 'UNKNOWN':
return False
# TODO(user): As the cloud sql people for the best retry schedule.
time.sleep(2)
def GetErrorMessage(error):
content_obj = json.loads(error.content)
return content_obj.get('error', {}).get('message', '')
def ReraiseHttpException(foo):
def Func(*args, **kwargs):
try:
return foo(*args, **kwargs)
except apitools_base.HttpError as error:
msg = GetErrorMessage(error)
unused_type, unused_value, traceback = sys.exc_info()
raise exceptions.HttpException, msg, traceback
return Func
def _ConstructSettingsFromArgs(sql_messages, args):
"""Constructs instance settings from the command line arguments.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A settings object representing the instance settings.
Raises:
ToolException: An error other than http error occured while executing the
command.
"""
settings = sql_messages.Settings(
tier=args.tier,
pricingPlan=args.pricing_plan,
replicationType=args.replication,
activationPolicy=args.activation_policy)
# these args are only present for the patch command
no_assign_ip = getattr(args, 'no_assign_ip', False)
no_require_ssl = getattr(args, 'no_require_ssl', False)
clear_authorized_networks = getattr(args, 'clear_authorized_networks', False)
clear_gae_apps = getattr(args, 'clear_gae_apps', False)
if args.authorized_gae_apps:
settings.authorizedGaeApplications = args.authorized_gae_apps
elif clear_gae_apps:
settings.authorizedGaeApplications = []
if any([args.assign_ip, args.require_ssl, args.authorized_networks,
no_assign_ip, no_require_ssl, clear_authorized_networks]):
settings.ipConfiguration = sql_messages.IpConfiguration()
if args.assign_ip:
settings.ipConfiguration.enabled = True
elif no_assign_ip:
settings.ipConfiguration.enabled = False
if args.authorized_networks:
settings.ipConfiguration.authorizedNetworks = args.authorized_networks
if clear_authorized_networks:
# For patch requests, this field needs to be labeled explicitly cleared.
settings.ipConfiguration.authorizedNetworks = []
if args.require_ssl:
settings.ipConfiguration.requireSsl = True
if no_require_ssl:
settings.ipConfiguration.requireSsl = False
if any([args.follow_gae_app, args.gce_zone]):
settings.locationPreference = sql_messages.LocationPreference(
followGaeApplication=args.follow_gae_app,
zone=args.gce_zone)
enable_database_replication = getattr(
args, 'enable_database_replication', False)
no_enable_database_replication = getattr(
args, 'no_enable_database_replication', False)
if enable_database_replication:
settings.databaseReplicationEnabled = True
if no_enable_database_replication:
settings.databaseReplicationEnabled = False
return settings
def _SetDatabaseFlags(sql_messages, settings, args):
if args.database_flags:
settings.databaseFlags = []
for (name, value) in args.database_flags.items():
settings.databaseFlags.append(sql_messages.DatabaseFlags(
name=name,
value=value))
elif getattr(args, 'clear_database_flags', False):
settings.databaseFlags = []
def _SetBackupConfiguration(sql_messages, settings, args, original):
"""Sets the backup configuration for the instance."""
# these args are only present for the patch command
no_backup = getattr(args, 'no_backup', False)
no_enable_bin_log = getattr(args, 'no_enable_bin_log', False)
if original and (
any([args.backup_start_time, args.enable_bin_log,
no_backup, no_enable_bin_log])):
if original.settings.backupConfiguration:
backup_config = original.settings.backupConfiguration[0]
else:
backup_config = sql_messages.BackupConfiguration(
startTime='00:00',
enabled=False),
elif not any([args.backup_start_time, args.enable_bin_log,
no_backup, no_enable_bin_log]):
return
if not original:
backup_config = sql_messages.BackupConfiguration(
startTime='00:00',
enabled=False)
if args.backup_start_time:
backup_config.startTime = args.backup_start_time
backup_config.enabled = True
if no_backup:
if args.backup_start_time or args.enable_bin_log:
raise exceptions.ToolException(
('Argument --no-backup not allowed with'
' --backup-start-time or --enable_bin_log'))
backup_config.enabled = False
if args.enable_bin_log:
backup_config.binaryLogEnabled = True
if no_enable_bin_log:
backup_config.binaryLogEnabled = False
settings.backupConfiguration = [backup_config]
def ConstructInstanceFromArgs(sql_messages, args, original=None):
"""Construct a Cloud SQL instance from command line args.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The CLI arg namespace.
original: sql_messages.DatabaseInstance, The original instance, if some of
it might be used to fill fields in the new one.
Returns:
sql_messages.DatabaseInstance, The constructed (and possibly partial)
database instance.
Raises:
ToolException: An error other than http error occured while executing the
command.
"""
settings = _ConstructSettingsFromArgs(sql_messages, args)
_SetBackupConfiguration(sql_messages, settings, args, original)
_SetDatabaseFlags(sql_messages, settings, args)
# these flags are only present for the create command
region = getattr(args, 'region', None)
database_version = getattr(args, 'database_version', None)
instance_resource = sql_messages.DatabaseInstance(
region=region,
databaseVersion=database_version,
masterInstanceName=getattr(args, 'master_instance_name', None),
settings=settings)
return instance_resource
def ValidateInstanceName(instance_name):
if ':' in instance_name:
possible_project = instance_name[:instance_name.rindex(':')]
possible_instance = instance_name[instance_name.rindex(':')+1:]
raise exceptions.ToolException("""\
Instance names cannot contain the ':' character. If you meant to indicate the
project for [{instance}], use only '{instance}' for the argument, and either add
'--project {project}' to the command line or first run
$ gcloud config set project {project}
""".format(project=possible_project, instance=possible_instance))
|
|
#
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Support for mocking the opcode processor"""
import re
from ganeti import constants
from ganeti import mcpu
class LogRecordingCallback(mcpu.OpExecCbBase):
"""Helper class for log output recording.
"""
def __init__(self, processor):
super(LogRecordingCallback, self).__init__()
self.processor = processor
def Feedback(self, *args):
assert len(args) < 3
if len(args) == 1:
log_type = constants.ELOG_MESSAGE
log_msg = args[0]
else:
(log_type, log_msg) = args
self.processor.log_entries.append((log_type, log_msg))
def SubmitManyJobs(self, jobs):
results = []
for idx, _ in enumerate(jobs):
results.append((True, idx))
return results
class ProcessorMock(mcpu.Processor):
"""Mocked opcode processor for tests.
This class actually performs much more than a mock, as it drives the
execution of LU's. But it also provides access to the log output of the LU
the result of the execution.
See L{ExecOpCodeAndRecordOutput} for the main method of this class.
"""
def __init__(self, context, wconfd):
super(ProcessorMock, self).__init__(context, 1, True)
self.log_entries = []
self._lu_test_func = None
self.wconfd = wconfd
def ExecOpCodeAndRecordOutput(self, op):
"""Executes the given opcode and records the output for further inspection.
@param op: the opcode to execute.
@return: see L{mcpu.Processor.ExecOpCode}
"""
return self.ExecOpCode(op, LogRecordingCallback(self))
def _ExecLU(self, lu):
# pylint: disable=W0212
if not self._lu_test_func:
return super(ProcessorMock, self)._ExecLU(lu)
else:
# required by a lot LU's, and usually passed in Exec
lu._feedback_fn = self.Log
return self._lu_test_func(lu)
def _CheckLUResult(self, op, result):
# pylint: disable=W0212
if not self._lu_test_func:
return super(ProcessorMock, self)._CheckLUResult(op, result)
else:
pass
def RunWithLockedLU(self, op, func):
"""Takes the given opcode, creates a LU and runs func with it.
@param op: the opcode to get the LU for.
@param func: the function to run with the created and locked LU.
@return: the result of func.
"""
self._lu_test_func = func
try:
return self.ExecOpCodeAndRecordOutput(op)
finally:
self._lu_test_func = None
def GetLogEntries(self):
"""Return the list of recorded log entries.
@rtype: list of (string, string) tuples
@return: the list of recorded log entries
"""
return self.log_entries
def GetLogMessages(self):
"""Return the list of recorded log messages.
@rtype: list of string
@return: the list of recorded log messages
"""
return [msg for _, msg in self.log_entries]
def GetLogEntriesString(self):
"""Return a string with all log entries separated by a newline.
"""
return "\n".join("%s: %s" % (log_type, msg)
for log_type, msg in self.GetLogEntries())
def GetLogMessagesString(self):
"""Return a string with all log messages separated by a newline.
"""
return "\n".join("%s" % msg for _, msg in self.GetLogEntries())
def assertLogContainsEntry(self, expected_type, expected_msg):
"""Asserts that the log contains the exact given entry.
@type expected_type: string
@param expected_type: the expected type
@type expected_msg: string
@param expected_msg: the expected message
"""
for log_type, msg in self.log_entries:
if log_type == expected_type and msg == expected_msg:
return
raise AssertionError(
"Could not find '%s' (type '%s') in LU log messages. Log is:\n%s" %
(expected_msg, expected_type, self.GetLogEntriesString()))
def assertLogContainsMessage(self, expected_msg):
"""Asserts that the log contains the exact given message.
@type expected_msg: string
@param expected_msg: the expected message
"""
for msg in self.GetLogMessages():
if msg == expected_msg:
return
raise AssertionError(
"Could not find '%s' in LU log messages. Log is:\n%s" %
(expected_msg, self.GetLogMessagesString()))
def assertLogContainsRegex(self, expected_regex):
"""Asserts that the log contains a message which matches the regex.
@type expected_regex: string
@param expected_regex: regular expression to match messages with.
"""
for msg in self.GetLogMessages():
if re.search(expected_regex, msg) is not None:
return
raise AssertionError(
"Could not find '%s' in LU log messages. Log is:\n%s" %
(expected_regex, self.GetLogMessagesString())
)
def assertLogContainsInLine(self, expected):
"""Asserts that the log contains a message which contains a string.
@type expected: string
@param expected: string to search in messages.
"""
self.assertLogContainsRegex(re.escape(expected))
def assertLogDoesNotContainRegex(self, expected_regex):
"""Asserts that the log does not contain a message which matches the regex.
@type expected_regex: string
@param expected_regex: regular expression to match messages with.
"""
for msg in self.GetLogMessages():
if re.search(expected_regex, msg) is not None:
raise AssertionError(
"Found '%s' in LU log messages. Log is:\n%s" %
(expected_regex, self.GetLogMessagesString())
)
def assertLogIsEmpty(self):
"""Asserts that the log does not contain any message.
"""
if len(self.GetLogMessages()) > 0:
raise AssertionError("Log is not empty. Log is:\n%s" %
self.GetLogMessagesString())
def ClearLogMessages(self):
"""Clears all recorded log messages.
This is useful if you use L{GetLockedLU} and want to test multiple calls
on it.
"""
self.log_entries = []
|
|
import sys
import time
from multiprocessing import Process, Queue
import yaml
import numpy as np
import zmq
import pycuda.driver as drv
#sys.path.append('./lib')
from tools import (save_weights, load_weights,
save_momentums, load_momentums)
from train_funcs import (unpack_configs, adjust_learning_rate,
get_val_error_loss, get_rand3d, train_model_wrap,
proc_configs)
def train_net(config):
# UNPACK CONFIGS
(flag_para_load, train_filenames, val_filenames,
train_labels, val_labels, img_mean) = unpack_configs(config)
# pycuda set up
drv.init()
dev = drv.Device(int(config['gpu'][-1]))
ctx = dev.make_context()
if flag_para_load:
# zmq set up
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://localhost:{0}'.format(config['sock_data']))
load_send_queue = config['queue_t2l']
load_recv_queue = config['queue_l2t']
else:
load_send_queue = None
load_recv_queue = None
import theano.sandbox.cuda
theano.sandbox.cuda.use(config['gpu'])
import theano
theano.config.on_unused_input = 'warn'
from layers import DropoutLayer
from alex_net import AlexNet, compile_models
#from vgg_net import VggNet, compile_models
import theano.misc.pycuda_init
import theano.misc.pycuda_utils
## BUILD NETWORK ##
model = AlexNet(config)
#model = VggNet(config)
layers = model.layers
batch_size = model.batch_size
## COMPILE FUNCTIONS ##
(train_model, validate_model, train_error, learning_rate,
shared_x, shared_y, rand_arr, vels) = compile_models(model, config)
######################### TRAIN MODEL ################################
print '... training'
if flag_para_load:
# pass ipc handle and related information
gpuarray_batch = theano.misc.pycuda_utils.to_gpuarray(
shared_x.container.value)
h = drv.mem_get_ipc_handle(gpuarray_batch.ptr)
sock.send_pyobj((gpuarray_batch.shape, gpuarray_batch.dtype, h))
load_send_queue.put(img_mean)
print '... para load started'
n_train_batches = len(train_filenames)
minibatch_range = range(n_train_batches)
# Start Training Loop
ferror_rate = open("data/error_rate.txt",'w')
print '... start 0 epoch'
epoch = 0
step_idx = 0
val_record = []
while epoch < config['n_epochs']:
epoch = epoch + 1
if config['shuffle']:
np.random.shuffle(minibatch_range)
if config['resume_train'] and epoch == 1:
load_epoch = config['load_epoch']
load_weights(layers, config['weights_dir'], load_epoch)
lr_to_load = np.load(
config['weights_dir'] + 'lr_' + str(load_epoch) + '.npy')
val_record = list(
np.load(config['weights_dir'] + 'val_record.npy'))
learning_rate.set_value(lr_to_load)
load_momentums(vels, config['weights_dir'], load_epoch)
epoch = load_epoch + 1
if flag_para_load:
# send the initial message to load data, before each epoch
load_send_queue.put(str(train_filenames[minibatch_range[0]]))
load_send_queue.put(get_rand3d())
# clear the sync before 1st calc
load_send_queue.put('calc_finished')
count = 0
for minibatch_index in minibatch_range:
num_iter = (epoch - 1) * n_train_batches + count
count = count + 1
if count == 1:
s = time.time()
if count == 20:
e = time.time()
print "time per 20 iter:", (e - s)
cost_ij = train_model_wrap(train_model, shared_x,
shared_y, rand_arr, img_mean,
count, minibatch_index,
minibatch_range, batch_size,
train_filenames, train_labels,
flag_para_load,
config['batch_crop_mirror'],
send_queue=load_send_queue,
recv_queue=load_recv_queue)
if num_iter % config['print_freq'] == 0:
print 'training @ iter = ', num_iter
print 'training cost:', cost_ij
if config['print_train_error']:
print 'training error rate:', train_error()
# ferror_rate.write('training error rate:', train_error())
if flag_para_load and (count < len(minibatch_range)):
load_send_queue.put('calc_finished')
############### Test on Validation Set ##################
DropoutLayer.SetDropoutOff()
this_validation_error, this_validation_loss = get_val_error_loss(
rand_arr, shared_x, shared_y,
val_filenames, val_labels,
flag_para_load, img_mean,
batch_size, validate_model,
send_queue=load_send_queue, recv_queue=load_recv_queue)
print('epoch %i: validation loss %f ' %
(epoch, this_validation_loss))
print('epoch %i: validation error %f %%' %
(epoch, this_validation_error * 100.))
val_record.append([this_validation_error, this_validation_loss])
np.save(config['weights_dir'] + 'val_record.npy', val_record)
DropoutLayer.SetDropoutOn()
############################################
# Adapt Learning Rate
step_idx = adjust_learning_rate(config, epoch, step_idx,
val_record, learning_rate)
# Save weights
if epoch % config['snapshot_freq'] == 0:
save_weights(layers, config['weights_dir'], epoch)
np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy',
learning_rate.get_value())
save_momentums(vels, config['weights_dir'], epoch)
print('Optimization complete.')
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
config = yaml.load(f)
with open('spec_1gpu.yaml', 'r') as f:
config = dict(config.items() + yaml.load(f).items())
config = proc_configs(config)
if config['para_load']:
from proc_load import fun_load
config['queue_l2t'] = Queue(1)
config['queue_t2l'] = Queue(1)
train_proc = Process(target=train_net, args=(config,))
load_proc = Process(
target=fun_load, args=(config, config['sock_data']))
train_proc.start()
load_proc.start()
train_proc.join()
load_proc.join()
else:
train_proc = Process(target=train_net, args=(config,))
train_proc.start()
train_proc.join()
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for automatically creating .nmf files from .nexe/.pexe executables.
As well as creating the nmf file this tool can also find and stage
any shared libraries dependancies that the executables might have.
"""
import errno
import json
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import getos
import quote
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
NeededMatcher = re.compile('^ *NEEDED *([^ ]+)\n$')
FormatMatcher = re.compile('^(.+):\\s*file format (.+)\n$')
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
OBJDUMP_ARCH_MAP = {
# Names returned by Linux's objdump:
'elf64-x86-64': 'x86-64',
'elf32-i386': 'x86-32',
'elf32-little': 'arm',
'elf32-littlearm': 'arm',
# Names returned by x86_64-nacl-objdump:
'elf64-nacl': 'x86-64',
'elf32-nacl': 'x86-32',
}
ARCH_LOCATION = {
'x86-32': 'lib32',
'x86-64': 'lib64',
'arm': 'lib',
}
# These constants are used within nmf files.
RUNNABLE_LD = 'runnable-ld.so' # Name of the dynamic loader
MAIN_NEXE = 'main.nexe' # Name of entry point for execution
PROGRAM_KEY = 'program' # Key of the program section in an nmf file
URL_KEY = 'url' # Key of the url field for a particular file in an nmf file
FILES_KEY = 'files' # Key of the files section in an nmf file
PNACL_OPTLEVEL_KEY = 'optlevel' # key for PNaCl optimization level
PORTABLE_KEY = 'portable' # key for portable section of manifest
TRANSLATE_KEY = 'pnacl-translate' # key for translatable objects
# The proper name of the dynamic linker, as kept in the IRT. This is
# excluded from the nmf file by convention.
LD_NACL_MAP = {
'x86-32': 'ld-nacl-x86-32.so.1',
'x86-64': 'ld-nacl-x86-64.so.1',
'arm': None,
}
def DebugPrint(message):
if DebugPrint.debug_mode:
sys.stderr.write('%s\n' % message)
DebugPrint.debug_mode = False # Set to True to enable extra debug prints
def MakeDir(dirname):
"""Just like os.makedirs but doesn't generate errors when dirname
already exists.
"""
if os.path.isdir(dirname):
return
Trace("mkdir: %s" % dirname)
try:
os.makedirs(dirname)
except OSError as exception_info:
if exception_info.errno != errno.EEXIST:
raise
class Error(Exception):
'''Local Error class for this file.'''
pass
def ParseElfHeader(path):
"""Determine properties of a nexe by parsing elf header.
Return tuple of architecture and boolean signalling whether
the executable is dynamic (has INTERP header) or static.
"""
# From elf.h:
# typedef struct
# {
# unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
# Elf64_Half e_type; /* Object file type */
# Elf64_Half e_machine; /* Architecture */
# ...
# } Elf32_Ehdr;
elf_header_format = '16s2H'
elf_header_size = struct.calcsize(elf_header_format)
with open(path, 'rb') as f:
header = f.read(elf_header_size)
try:
header = struct.unpack(elf_header_format, header)
except struct.error:
raise Error("error parsing elf header: %s" % path)
e_ident, _, e_machine = header[:3]
elf_magic = '\x7fELF'
if e_ident[:4] != elf_magic:
raise Error('Not a valid NaCl executable: %s' % path)
e_machine_mapping = {
3 : 'x86-32',
40 : 'arm',
62 : 'x86-64'
}
if e_machine not in e_machine_mapping:
raise Error('Unknown machine type: %s' % e_machine)
# Set arch based on the machine type in the elf header
arch = e_machine_mapping[e_machine]
# Now read the full header in either 64bit or 32bit mode
dynamic = IsDynamicElf(path, arch == 'x86-64')
return arch, dynamic
def IsDynamicElf(path, is64bit):
"""Examine an elf file to determine if it is dynamically
linked or not.
This is determined by searching the program headers for
a header of type PT_INTERP.
"""
if is64bit:
elf_header_format = '16s2HI3QI3H'
else:
elf_header_format = '16s2HI3II3H'
elf_header_size = struct.calcsize(elf_header_format)
with open(path, 'rb') as f:
header = f.read(elf_header_size)
header = struct.unpack(elf_header_format, header)
p_header_offset = header[5]
p_header_entry_size = header[9]
num_p_header = header[10]
f.seek(p_header_offset)
p_headers = f.read(p_header_entry_size*num_p_header)
# Read the first word of each Phdr to find out its type.
#
# typedef struct
# {
# Elf32_Word p_type; /* Segment type */
# ...
# } Elf32_Phdr;
elf_phdr_format = 'I'
PT_INTERP = 3
while p_headers:
p_header = p_headers[:p_header_entry_size]
p_headers = p_headers[p_header_entry_size:]
phdr_type = struct.unpack(elf_phdr_format, p_header[:4])[0]
if phdr_type == PT_INTERP:
return True
return False
class ArchFile(object):
'''Simple structure containing information about
Attributes:
name: Name of this file
path: Full path to this file on the build system
arch: Architecture of this file (e.g., x86-32)
url: Relative path to file in the staged web directory.
Used for specifying the "url" attribute in the nmf file.'''
def __init__(self, name, path, url, arch=None):
self.name = name
self.path = path
self.url = url
self.arch = arch
if not arch:
self.arch = ParseElfHeader(path)[0]
def __repr__(self):
return '<ArchFile %s>' % self.path
def __str__(self):
'''Return the file path when invoked with the str() function'''
return self.path
class NmfUtils(object):
'''Helper class for creating and managing nmf files
Attributes:
manifest: A JSON-structured dict containing the nmf structure
needed: A dict with key=filename and value=ArchFile (see GetNeeded)
'''
def __init__(self, main_files=None, objdump=None,
lib_path=None, extra_files=None, lib_prefix=None,
remap=None, pnacl_optlevel=None):
'''Constructor
Args:
main_files: List of main entry program files. These will be named
files->main.nexe for dynamic nexes, and program for static nexes
objdump: path to x86_64-nacl-objdump tool (or Linux equivalent)
lib_path: List of paths to library directories
extra_files: List of extra files to include in the nmf
lib_prefix: A list of path components to prepend to the library paths,
both for staging the libraries and for inclusion into the nmf file.
Examples: ['..'], ['lib_dir']
remap: Remaps the library name in the manifest.
pnacl_optlevel: Optimization level for PNaCl translation.
'''
self.objdump = objdump
self.main_files = main_files or []
self.extra_files = extra_files or []
self.lib_path = lib_path or []
self.manifest = None
self.needed = {}
self.lib_prefix = lib_prefix or []
self.remap = remap or {}
self.pnacl = main_files and main_files[0].endswith('pexe')
self.pnacl_optlevel = pnacl_optlevel
for filename in self.main_files:
if not os.path.exists(filename):
raise Error('Input file not found: %s' % filename)
if not os.path.isfile(filename):
raise Error('Input is not a file: %s' % filename)
def GleanFromObjdump(self, files, arch):
'''Get architecture and dependency information for given files
Args:
files: A list of files to examine.
[ '/path/to/my.nexe',
'/path/to/lib64/libmy.so',
'/path/to/mydata.so',
'/path/to/my.data' ]
arch: The architecure we are looking for, or None to accept any
architecture.
Returns: A tuple with the following members:
input_info: A dict with key=filename and value=ArchFile of input files.
Includes the input files as well, with arch filled in if absent.
Example: { '/path/to/my.nexe': ArchFile(my.nexe),
'/path/to/libfoo.so': ArchFile(libfoo.so) }
needed: A set of strings formatted as "arch/name". Example:
set(['x86-32/libc.so', 'x86-64/libgcc.so'])
'''
if not self.objdump:
self.objdump = FindObjdumpExecutable()
if not self.objdump:
raise Error('No objdump executable found (see --help for more info)')
full_paths = set()
for filename in files:
if os.path.exists(filename):
full_paths.add(filename)
else:
for path in self.FindLibsInPath(filename):
full_paths.add(path)
cmd = [self.objdump, '-p'] + list(full_paths)
DebugPrint('GleanFromObjdump[%s](%s)' % (arch, cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=-1)
input_info = {}
found_basenames = set()
needed = set()
output, err_output = proc.communicate()
if proc.returncode:
raise Error('%s\nStdError=%s\nobjdump failed with error code: %d' %
(output, err_output, proc.returncode))
for line in output.splitlines(True):
# Objdump should display the architecture first and then the dependencies
# second for each file in the list.
matched = FormatMatcher.match(line)
if matched:
filename = matched.group(1)
file_arch = OBJDUMP_ARCH_MAP[matched.group(2)]
if arch and file_arch != arch:
continue
name = os.path.basename(filename)
found_basenames.add(name)
input_info[filename] = ArchFile(
arch=file_arch,
name=name,
path=filename,
url='/'.join(self.lib_prefix + [ARCH_LOCATION[file_arch], name]))
matched = NeededMatcher.match(line)
if matched:
match = '/'.join([file_arch, matched.group(1)])
needed.add(match)
Trace("NEEDED: %s" % match)
for filename in files:
if os.path.basename(filename) not in found_basenames:
raise Error('Library not found [%s]: %s' % (arch, filename))
return input_info, needed
def FindLibsInPath(self, name):
'''Finds the set of libraries matching |name| within lib_path
Args:
name: name of library to find
Returns:
A list of system paths that match the given name within the lib_path'''
files = []
for dirname in self.lib_path:
filename = os.path.join(dirname, name)
if os.path.exists(filename):
files.append(filename)
if not files:
raise Error('cannot find library %s' % name)
return files
def GetNeeded(self):
'''Collect the list of dependencies for the main_files
Returns:
A dict with key=filename and value=ArchFile of input files.
Includes the input files as well, with arch filled in if absent.
Example: { '/path/to/my.nexe': ArchFile(my.nexe),
'/path/to/libfoo.so': ArchFile(libfoo.so) }'''
if self.needed:
return self.needed
DebugPrint('GetNeeded(%s)' % self.main_files)
dynamic = any(ParseElfHeader(f)[1] for f in self.main_files)
if dynamic:
examined = set()
all_files, unexamined = self.GleanFromObjdump(self.main_files, None)
for arch_file in all_files.itervalues():
arch_file.url = arch_file.path
if unexamined:
unexamined.add('/'.join([arch_file.arch, RUNNABLE_LD]))
while unexamined:
files_to_examine = {}
# Take all the currently unexamined files and group them
# by architecture.
for arch_name in unexamined:
arch, name = arch_name.split('/')
files_to_examine.setdefault(arch, []).append(name)
# Call GleanFromObjdump() for each architecture.
needed = set()
for arch, files in files_to_examine.iteritems():
new_files, new_needed = self.GleanFromObjdump(files, arch)
all_files.update(new_files)
needed |= new_needed
examined |= unexamined
unexamined = needed - examined
# With the runnable-ld.so scheme we have today, the proper name of
# the dynamic linker should be excluded from the list of files.
ldso = [LD_NACL_MAP[arch] for arch in set(OBJDUMP_ARCH_MAP.values())]
for name, arch_file in all_files.items():
if arch_file.name in ldso:
del all_files[name]
self.needed = all_files
else:
for filename in self.main_files:
url = os.path.split(filename)[1]
archfile = ArchFile(name=os.path.basename(filename),
path=filename, url=url)
self.needed[filename] = archfile
return self.needed
def StageDependencies(self, destination_dir):
'''Copies over the dependencies into a given destination directory
Each library will be put into a subdirectory that corresponds to the arch.
Args:
destination_dir: The destination directory for staging the dependencies
'''
nexe_root = os.path.dirname(os.path.abspath(self.main_files[0]))
nexe_root = os.path.normcase(nexe_root)
needed = self.GetNeeded()
for arch_file in needed.itervalues():
urldest = arch_file.url
source = arch_file.path
# for .nexe and .so files specified on the command line stage
# them in paths relative to the .nexe (with the .nexe always
# being staged at the root).
if source in self.main_files:
absdest = os.path.normcase(os.path.abspath(urldest))
if absdest.startswith(nexe_root):
urldest = os.path.relpath(urldest, nexe_root)
destination = os.path.join(destination_dir, urldest)
if (os.path.normcase(os.path.abspath(source)) ==
os.path.normcase(os.path.abspath(destination))):
continue
# make sure target dir exists
MakeDir(os.path.dirname(destination))
Trace('copy: %s -> %s' % (source, destination))
shutil.copy2(source, destination)
def _GeneratePNaClManifest(self):
manifest = {}
manifest[PROGRAM_KEY] = {}
manifest[PROGRAM_KEY][PORTABLE_KEY] = {}
translate_dict = {
"url": os.path.basename(self.main_files[0]),
}
if self.pnacl_optlevel is not None:
translate_dict[PNACL_OPTLEVEL_KEY] = self.pnacl_optlevel
manifest[PROGRAM_KEY][PORTABLE_KEY][TRANSLATE_KEY] = translate_dict
self.manifest = manifest
def _GenerateManifest(self):
'''Create a JSON formatted dict containing the files
NaCl will map url requests based on architecture. The startup NEXE
can always be found under the top key PROGRAM. Additional files are under
the FILES key further mapped by file name. In the case of 'runnable' the
PROGRAM key is populated with urls pointing the runnable-ld.so which acts
as the startup nexe. The application itself is then placed under the
FILES key mapped as 'main.exe' instead of the original name so that the
loader can find it. '''
manifest = { FILES_KEY: {}, PROGRAM_KEY: {} }
needed = self.GetNeeded()
runnable = any(n.endswith(RUNNABLE_LD) for n in needed)
extra_files_kv = [(key, ArchFile(name=key,
arch=arch,
path=url,
url=url))
for key, arch, url in self.extra_files]
nexe_root = os.path.dirname(os.path.abspath(self.main_files[0]))
for need, archinfo in needed.items() + extra_files_kv:
urlinfo = { URL_KEY: archinfo.url }
name = archinfo.name
# If starting with runnable-ld.so, make that the main executable.
if runnable:
if need.endswith(RUNNABLE_LD):
manifest[PROGRAM_KEY][archinfo.arch] = urlinfo
continue
if need in self.main_files:
# Ensure that the .nexe and .so names are relative to the root
# of where the .nexe lives.
if os.path.abspath(urlinfo[URL_KEY]).startswith(nexe_root):
urlinfo[URL_KEY] = os.path.relpath(urlinfo[URL_KEY], nexe_root)
if need.endswith(".nexe"):
# Place it under program if we aren't using the runnable-ld.so.
if not runnable:
manifest[PROGRAM_KEY][archinfo.arch] = urlinfo
continue
# Otherwise, treat it like another another file named main.nexe.
name = MAIN_NEXE
name = self.remap.get(name, name)
fileinfo = manifest[FILES_KEY].get(name, {})
fileinfo[archinfo.arch] = urlinfo
manifest[FILES_KEY][name] = fileinfo
self.manifest = manifest
def GetManifest(self):
'''Returns a JSON-formatted dict containing the NaCl dependencies'''
if not self.manifest:
if self.pnacl:
self._GeneratePNaClManifest()
else:
self._GenerateManifest()
return self.manifest
def GetJson(self):
'''Returns the Manifest as a JSON-formatted string'''
pretty_string = json.dumps(self.GetManifest(), indent=2)
# json.dumps sometimes returns trailing whitespace and does not put
# a newline at the end. This code fixes these problems.
pretty_lines = pretty_string.split('\n')
return '\n'.join([line.rstrip() for line in pretty_lines]) + '\n'
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def ParseExtraFiles(encoded_list, err):
"""Parse the extra-files list and return a canonicalized list of
[key, arch, url] triples. The |encoded_list| should be a list of
strings of the form 'key:url' or 'key:arch:url', where an omitted
'arch' is taken to mean 'portable'.
All entries in |encoded_list| are checked for syntax errors before
returning. Error messages are written to |err| (typically
sys.stderr) so that the user has actionable feedback for fixing all
errors, rather than one at a time. If there are any errors, None is
returned instead of a list, since an empty list is a valid return
value.
"""
seen_error = False
canonicalized = []
for ix in range(len(encoded_list)):
kv = encoded_list[ix]
unquoted = quote.unquote(kv, ':')
if len(unquoted) == 3:
if unquoted[1] != ':':
err.write('Syntax error for key:value tuple ' +
'for --extra-files argument: ' + kv + '\n')
seen_error = True
else:
canonicalized.append([unquoted[0], 'portable', unquoted[2]])
elif len(unquoted) == 5:
if unquoted[1] != ':' or unquoted[3] != ':':
err.write('Syntax error for key:arch:url tuple ' +
'for --extra-files argument: ' +
kv + '\n')
seen_error = True
else:
canonicalized.append([unquoted[0], unquoted[2], unquoted[4]])
else:
err.write('Bad key:arch:url tuple for --extra-files: ' + kv + '\n')
if seen_error:
return None
return canonicalized
def GetSDKRoot():
"""Determine current NACL_SDK_ROOT, either via the environment variable
itself, or by attempting to derive it from the location of this script.
"""
sdk_root = os.environ.get('NACL_SDK_ROOT')
if not sdk_root:
sdk_root = os.path.dirname(SCRIPT_DIR)
if not os.path.exists(os.path.join(sdk_root, 'toolchain')):
return None
return sdk_root
def FindObjdumpExecutable():
"""Derive path to objdump executable to use for determining shared
object dependencies.
"""
sdk_root = GetSDKRoot()
if not sdk_root:
return None
osname = getos.GetPlatform()
toolchain = os.path.join(sdk_root, 'toolchain', '%s_x86_glibc' % osname)
objdump = os.path.join(toolchain, 'bin', 'x86_64-nacl-objdump')
if osname == 'win':
objdump += '.exe'
if not os.path.exists(objdump):
sys.stderr.write('WARNING: failed to find objdump in default '
'location: %s' % objdump)
return None
return objdump
def GetDefaultLibPath(config):
"""Derive default library path to use when searching for shared
objects. This currently include the toolchain library folders
as well as the top level SDK lib folder and the naclports lib
folder. We include both 32-bit and 64-bit library paths.
"""
assert(config in ('Debug', 'Release'))
sdk_root = GetSDKRoot()
if not sdk_root:
# TOOD(sbc): output a warning here? We would also need to suppress
# the warning when run from the chromium build.
return []
osname = getos.GetPlatform()
libpath = [
# Core toolchain libraries
'toolchain/%s_x86_glibc/x86_64-nacl/lib' % osname,
'toolchain/%s_x86_glibc/x86_64-nacl/lib32' % osname,
# naclports installed libraries
'toolchain/%s_x86_glibc/x86_64-nacl/usr/lib' % osname,
'toolchain/%s_x86_glibc/i686-nacl/usr/lib' % osname,
# SDK bundle libraries
'lib/glibc_x86_32/%s' % config,
'lib/glibc_x86_64/%s' % config,
# naclports bundle libraries
'ports/lib/glibc_x86_32/%s' % config,
'ports/lib/glibc_x86_64/%s' % config,
]
libpath = [os.path.normpath(p) for p in libpath]
libpath = [os.path.join(sdk_root, p) for p in libpath]
return libpath
def main(argv):
parser = optparse.OptionParser(
usage='Usage: %prog [options] nexe [extra_libs...]', description=__doc__)
parser.add_option('-o', '--output', dest='output',
help='Write manifest file to FILE (default is stdout)',
metavar='FILE')
parser.add_option('-D', '--objdump', dest='objdump',
help='Override the default "objdump" tool used to find '
'shared object dependencies',
metavar='TOOL')
parser.add_option('--no-default-libpath', action='store_true',
help="Don't include the SDK default library paths")
parser.add_option('--debug-libs', action='store_true',
help='Use debug library paths when constructing default '
'library path.')
parser.add_option('-L', '--library-path', dest='lib_path',
action='append', default=[],
help='Add DIRECTORY to library search path',
metavar='DIRECTORY')
parser.add_option('-P', '--path-prefix', dest='path_prefix', default='',
help='A path to prepend to shared libraries in the .nmf',
metavar='DIRECTORY')
parser.add_option('-s', '--stage-dependencies', dest='stage_dependencies',
help='Destination directory for staging libraries',
metavar='DIRECTORY')
parser.add_option('-t', '--toolchain', help='Legacy option, do not use')
parser.add_option('-n', '--name', dest='name',
help='Rename FOO as BAR',
action='append', default=[], metavar='FOO,BAR')
parser.add_option('-x', '--extra-files',
help=('Add extra key:file tuple to the "files"' +
' section of the .nmf'),
action='append', default=[], metavar='FILE')
parser.add_option('-O', '--pnacl-optlevel',
help='Set the optimization level to N in PNaCl manifests',
metavar='N')
parser.add_option('-v', '--verbose',
help='Verbose output', action='store_true')
parser.add_option('-d', '--debug-mode',
help='Debug mode', action='store_true')
# To enable bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete create_nmf.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options, args = parser.parse_args(argv)
if options.verbose:
Trace.verbose = True
if options.debug_mode:
DebugPrint.debug_mode = True
if options.toolchain is not None:
sys.stderr.write('warning: option -t/--toolchain is deprecated.\n')
if len(args) < 1:
parser.error('No nexe files specified. See --help for more info')
canonicalized = ParseExtraFiles(options.extra_files, sys.stderr)
if canonicalized is None:
parser.error('Bad --extra-files (-x) argument syntax')
remap = {}
for ren in options.name:
parts = ren.split(',')
if len(parts) != 2:
parser.error('Expecting --name=<orig_arch.so>,<new_name.so>')
remap[parts[0]] = parts[1]
if options.path_prefix:
path_prefix = options.path_prefix.split('/')
else:
path_prefix = []
for libpath in options.lib_path:
if not os.path.exists(libpath):
sys.stderr.write('Specified library path does not exist: %s\n' % libpath)
elif not os.path.isdir(libpath):
sys.stderr.write('Specified library is not a directory: %s\n' % libpath)
if not options.no_default_libpath:
# Add default libraries paths to the end of the search path.
config = options.debug_libs and 'Debug' or 'Release'
options.lib_path += GetDefaultLibPath(config)
pnacl_optlevel = None
if options.pnacl_optlevel is not None:
pnacl_optlevel = int(options.pnacl_optlevel)
if pnacl_optlevel < 0 or pnacl_optlevel > 3:
sys.stderr.write(
'warning: PNaCl optlevel %d is unsupported (< 0 or > 3)\n' %
pnacl_optlevel)
nmf = NmfUtils(objdump=options.objdump,
main_files=args,
lib_path=options.lib_path,
extra_files=canonicalized,
lib_prefix=path_prefix,
remap=remap,
pnacl_optlevel=pnacl_optlevel)
nmf.GetManifest()
if not options.output:
sys.stdout.write(nmf.GetJson())
else:
with open(options.output, 'w') as output:
output.write(nmf.GetJson())
if options.stage_dependencies and not nmf.pnacl:
Trace('Staging dependencies...')
nmf.StageDependencies(options.stage_dependencies)
return 0
if __name__ == '__main__':
try:
rtn = main(sys.argv[1:])
except Error, e:
sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e))
rtn = 1
except KeyboardInterrupt:
sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
rtn = 1
sys.exit(rtn)
|
|
#############################################################################
##
## Copyright (C) 2012 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
import sys
import re
from PyQt4.uic.Compiler.indenter import write_code
from PyQt4.uic.Compiler.misc import Literal, moduleMember
if sys.hexversion >= 0x03000000:
from PyQt4.uic.port_v3.proxy_base import ProxyBase
from PyQt4.uic.port_v3.as_string import as_string
else:
from PyQt4.uic.port_v2.proxy_base import ProxyBase
from PyQt4.uic.port_v2.as_string import as_string
i18n_strings = []
i18n_context = ""
def i18n_print(string):
i18n_strings.append(string)
def i18n_void_func(name):
def _printer(self, *args):
i18n_print("%s.%s(%s)" % (self, name, ", ".join(map(as_string, args))))
return _printer
def i18n_func(name):
def _printer(self, rname, *args):
i18n_print("%s = %s.%s(%s)" % (rname, self, name, ", ".join(map(as_string, args))))
return Literal(rname)
return _printer
def strict_getattr(module, clsname):
cls = getattr(module, clsname)
if issubclass(cls, LiteralProxyClass):
raise AttributeError(cls)
else:
return cls
class i18n_string(object):
def __init__(self, string, disambig):
self.string = string
self.disambig = disambig
def __str__(self):
if self.disambig is None:
disambig = "None"
else:
disambig = as_string(self.disambig, encode=False)
return '_translate("%s", %s, %s)' % (i18n_context, as_string(self.string, encode=False), disambig)
# Classes with this flag will be handled as literal values. If functions are
# called on these classes, the literal value changes.
# Example:
# the code
# >>> QSize(9,10).expandedTo(...)
# will print just that code.
AS_ARGUMENT = 2
# ATTENTION: currently, classes can either be literal or normal. If a class
# should need both kinds of behaviour, the code has to be changed.
class ProxyClassMember(object):
def __init__(self, proxy, function_name, flags):
self.proxy = proxy
self.function_name = function_name
self.flags = flags
def __str__(self):
return "%s.%s" % (self.proxy, self.function_name)
def __call__(self, *args):
if self.function_name == 'setProperty':
str_args = (as_string(args[0], encode=False), as_string(args[1]))
else:
str_args = map(as_string, args)
func_call = "%s.%s(%s)" % (self.proxy,
self.function_name,
", ".join(str_args))
if self.flags & AS_ARGUMENT:
self.proxy._uic_name = func_call
return self.proxy
else:
needs_translation = False
for arg in args:
if isinstance(arg, i18n_string):
needs_translation = True
if needs_translation:
i18n_print(func_call)
else:
write_code(func_call)
class ProxyClass(ProxyBase):
flags = 0
def __init__(self, objectname, is_attribute, args=(), noInstantiation=False):
if objectname:
if is_attribute:
objectname = "self." + objectname
self._uic_name = objectname
else:
self._uic_name = "Unnamed"
if not noInstantiation:
funcall = "%s(%s)" % \
(moduleMember(self.module, self.__class__.__name__),
", ".join(map(str, args)))
if objectname:
funcall = "%s = %s" % (objectname, funcall)
write_code(funcall)
def __str__(self):
return self._uic_name
def __getattribute__(self, attribute):
try:
return object.__getattribute__(self, attribute)
except AttributeError:
return ProxyClassMember(self, attribute, self.flags)
class LiteralProxyClass(ProxyClass):
"""LiteralObject(*args) -> new literal class
a literal class can be used as argument in a function call
>>> class Foo(LiteralProxyClass): pass
>>> str(Foo(1,2,3)) == "Foo(1,2,3)"
"""
flags = AS_ARGUMENT
def __init__(self, *args):
self._uic_name = "%s(%s)" % \
(moduleMember(self.module, self.__class__.__name__),
", ".join(map(as_string, args)))
class ProxyNamespace(ProxyBase):
pass
# These are all the Qt classes used by pyuic4 in their namespaces. If a class
# is missing, the compiler will fail, normally with an AttributeError.
#
# For adding new classes:
# - utility classes used as literal values do not need to be listed
# because they are created on the fly as subclasses of LiteralProxyClass
# - classes which are *not* QWidgets inherit from ProxyClass and they
# have to be listed explicitly in the correct namespace. These classes
# are created via a ProxyQObjectCreator
# - new QWidget-derived classes have to inherit from qtproxies.QWidget
# If the widget does not need any special methods, it can be listed
# in _qwidgets
class QtCore(ProxyNamespace):
class Qt(ProxyNamespace):
pass
## connectSlotsByName and connect have to be handled as class methods,
## otherwise they would be created as LiteralProxyClasses and never be
## printed
class QMetaObject(ProxyClass):
def connectSlotsByName(cls, *args):
ProxyClassMember(cls, "connectSlotsByName", 0)(*args)
connectSlotsByName = classmethod(connectSlotsByName)
class QObject(ProxyClass):
def metaObject(self):
class _FakeMetaObject(object):
def className(*args):
return self.__class__.__name__
return _FakeMetaObject()
def objectName(self):
return self._uic_name.split(".")[-1]
def connect(cls, *args):
# Handle slots that have names corresponding to Python keywords.
slot_name = str(args[-1])
if slot_name.endswith('.raise'):
args = list(args[:-1])
args.append(Literal(slot_name + '_'))
ProxyClassMember(cls, "connect", 0)(*args)
connect = classmethod(connect)
# These sub-class QWidget but aren't themselves sub-classed.
_qwidgets = ("QCalendarWidget", "QDialogButtonBox", "QDockWidget", "QGroupBox",
"QLineEdit", "QMainWindow", "QMenuBar", "QProgressBar", "QStatusBar",
"QToolBar", "QWizardPage")
class QtGui(ProxyNamespace):
class QApplication(QtCore.QObject):
def translate(uiname, text, disambig, encoding):
return i18n_string(text or "", disambig)
translate = staticmethod(translate)
class QIcon(ProxyClass):
class fromTheme(ProxyClass): pass
class QConicalGradient(ProxyClass): pass
class QLinearGradient(ProxyClass): pass
class QRadialGradient(ProxyClass): pass
class QBrush(ProxyClass): pass
class QPainter(ProxyClass): pass
class QPalette(ProxyClass): pass
class QFont(ProxyClass): pass
class QSpacerItem(ProxyClass): pass
class QSizePolicy(ProxyClass): pass
## QActions inherit from QObject for the metaobject stuff
## and the hierarchy has to be correct since we have a
## isinstance(x, QtGui.QLayout) call in the ui parser
class QAction(QtCore.QObject): pass
class QActionGroup(QtCore.QObject): pass
class QButtonGroup(QtCore.QObject): pass
class QLayout(QtCore.QObject): pass
class QGridLayout(QLayout): pass
class QBoxLayout(QLayout): pass
class QHBoxLayout(QBoxLayout): pass
class QVBoxLayout(QBoxLayout): pass
class QFormLayout(QLayout): pass
class QWidget(QtCore.QObject):
def font(self):
return Literal("%s.font()" % self)
def minimumSizeHint(self):
return Literal("%s.minimumSizeHint()" % self)
def sizePolicy(self):
sp = LiteralProxyClass()
sp._uic_name = "%s.sizePolicy()" % self
return sp
class QDialog(QWidget): pass
class QAbstractPrintDialog(QDialog): pass
class QColorDialog(QDialog): pass
class QFileDialog(QDialog): pass
class QFontDialog(QDialog): pass
class QInputDialog(QDialog): pass
class QMessageBox(QDialog): pass
class QPageSetupDialog(QDialog): pass
class QWizard(QDialog): pass
class QAbstractSlider(QWidget): pass
class QDial(QAbstractSlider): pass
class QScrollBar(QAbstractSlider): pass
class QSlider(QAbstractSlider): pass
class QMenu(QWidget):
def menuAction(self):
return Literal("%s.menuAction()" % self)
class QTabWidget(QWidget):
def addTab(self, *args):
text = args[-1]
if isinstance(text, i18n_string):
i18n_print("%s.setTabText(%s.indexOf(%s), %s)" % \
(self._uic_name, self._uic_name, args[0], text))
args = args[:-1] + ("", )
ProxyClassMember(self, "addTab", 0)(*args)
def indexOf(self, page):
return Literal("%s.indexOf(%s)" % (self, page))
class QComboBox(QWidget): pass
class QFontComboBox(QComboBox): pass
class QAbstractSpinBox(QWidget): pass
class QDoubleSpinBox(QAbstractSpinBox): pass
class QSpinBox(QAbstractSpinBox): pass
class QDateTimeEdit(QAbstractSpinBox): pass
class QDateEdit(QDateTimeEdit): pass
class QTimeEdit(QDateTimeEdit): pass
class QFrame(QWidget): pass
class QLabel(QFrame): pass
class QLCDNumber(QFrame): pass
class QSplitter(QFrame): pass
class QStackedWidget(QFrame): pass
class QToolBox(QFrame):
def addItem(self, *args):
text = args[-1]
if isinstance(text, i18n_string):
i18n_print("%s.setItemText(%s.indexOf(%s), %s)" % \
(self._uic_name, self._uic_name, args[0], text))
args = args[:-1] + ("", )
ProxyClassMember(self, "addItem", 0)(*args)
def indexOf(self, page):
return Literal("%s.indexOf(%s)" % (self, page))
def layout(self):
return QtGui.QLayout("%s.layout()" % self,
False, (), noInstantiation=True)
class QAbstractScrollArea(QFrame):
def viewport(self):
return QtGui.QWidget("%s.viewport()" % self, False, (),
noInstantiation=True)
class QGraphicsView(QAbstractScrollArea): pass
class QMdiArea(QAbstractScrollArea): pass
class QPlainTextEdit(QAbstractScrollArea): pass
class QScrollArea(QAbstractScrollArea): pass
class QTextEdit(QAbstractScrollArea): pass
class QTextBrowser(QTextEdit): pass
class QAbstractItemView(QAbstractScrollArea): pass
class QColumnView(QAbstractItemView): pass
class QHeaderView(QAbstractItemView): pass
class QListView(QAbstractItemView): pass
class QTableView(QAbstractItemView):
def horizontalHeader(self):
return QtGui.QHeaderView("%s.horizontalHeader()" % self,
False, (), noInstantiation=True)
def verticalHeader(self):
return QtGui.QHeaderView("%s.verticalHeader()" % self,
False, (), noInstantiation=True)
class QTreeView(QAbstractItemView):
def header(self):
return QtGui.QHeaderView("%s.header()" % self,
False, (), noInstantiation=True)
class QListWidgetItem(ProxyClass): pass
class QListWidget(QListView):
setSortingEnabled = i18n_void_func("setSortingEnabled")
isSortingEnabled = i18n_func("isSortingEnabled")
item = i18n_func("item")
class QTableWidgetItem(ProxyClass): pass
class QTableWidget(QTableView):
setSortingEnabled = i18n_void_func("setSortingEnabled")
isSortingEnabled = i18n_func("isSortingEnabled")
item = i18n_func("item")
horizontalHeaderItem = i18n_func("horizontalHeaderItem")
verticalHeaderItem = i18n_func("verticalHeaderItem")
class QTreeWidgetItem(ProxyClass):
def child(self, index):
return QtGui.QTreeWidgetItem("%s.child(%i)" % (self, index),
False, (), noInstantiation=True)
class QTreeWidget(QTreeView):
setSortingEnabled = i18n_void_func("setSortingEnabled")
isSortingEnabled = i18n_func("isSortingEnabled")
def headerItem(self):
return QtGui.QWidget("%s.headerItem()" % self, False, (),
noInstantiation=True)
def topLevelItem(self, index):
return QtGui.QTreeWidgetItem("%s.topLevelItem(%i)" % (self, index),
False, (), noInstantiation=True)
class QAbstractButton(QWidget): pass
class QCheckBox(QAbstractButton): pass
class QRadioButton(QAbstractButton): pass
class QToolButton(QAbstractButton): pass
class QPushButton(QAbstractButton): pass
class QCommandLinkButton(QPushButton): pass
# Add all remaining classes.
for _class in _qwidgets:
if _class not in locals():
locals()[_class] = type(_class, (QWidget, ), {})
|
|
import unittest
from numpy import pi, Infinity, exp, sqrt
from abelfunctions.complex_path import (
ComplexPathPrimitive,
ComplexPath,
ComplexLine,
ComplexArc,
ComplexRay,
)
class TestConstruction(unittest.TestCase):
def test_line(self):
gamma = ComplexLine(0, 1)
self.assertEqual(gamma.x0, 0)
self.assertEqual(gamma.x1, 1)
def test_arc(self):
gamma = ComplexArc(1, 0, pi, -pi)
self.assertEqual(gamma.R, 1)
self.assertEqual(gamma.w, 0)
self.assertEqual(gamma.theta, pi)
self.assertEqual(gamma.dtheta, -pi)
def test_ray(self):
gamma = ComplexRay(1)
self.assertEqual(gamma.x0, 1)
def test_composite(self):
gamma1 = ComplexLine(-1, 0)
gamma2 = ComplexLine(0, 1.j)
gamma = gamma1 + gamma2
self.assertEqual(gamma.segments, [gamma1, gamma2])
def test_indexing(self):
gamma0 = ComplexLine(-1, 0)
gamma1 = ComplexLine(0, 1.j)
gamma2 = ComplexArc(1, 0, pi/2, -pi/2)
gamma = gamma0 + gamma1 + gamma2
self.assertEqual(gamma.segments, [gamma0, gamma1, gamma2])
self.assertEqual(gamma[0], gamma0)
self.assertEqual(gamma[1], gamma1)
self.assertEqual(gamma[2], gamma2)
def test_iteration(self):
gamma0 = ComplexLine(-1, 0)
gamma1 = ComplexLine(0, 1.j)
gamma2 = ComplexArc(1, 0, pi/2, -pi/2)
gamma = gamma0 + gamma1 + gamma2
index = 0
for segment in gamma:
if index == 0:
self.assertEqual(segment, gamma0)
elif index == 1:
self.assertEqual(segment, gamma1)
elif index == 2:
self.assertEqual(segment, gamma2)
index += 1
def test_iteration_reverse(self):
gamma0 = ComplexLine(-1, 0)
gamma1 = ComplexLine(0, 1.j)
gamma2 = ComplexArc(1, 0, pi/2, -pi/2)
gamma = gamma0 + gamma1 + gamma2
index = 0
for segment in gamma[::-1]:
if index == 0:
self.assertEqual(segment, gamma2)
elif index == 1:
self.assertEqual(segment, gamma1)
elif index == 2:
self.assertEqual(segment, gamma0)
index += 1
def test_equality(self):
gamma0 = ComplexLine(-1, 0)
gamma1 = ComplexLine(-1, 0)
self.assertEqual(gamma0, gamma1)
gamma0 = ComplexArc(1, 0, 0, pi)
gamma1 = ComplexArc(1, 0, 0, pi)
self.assertEqual(gamma0, gamma1)
gamma0 = ComplexRay(-1)
gamma1 = ComplexRay(-1)
self.assertEqual(gamma0, gamma1)
class TestReverse(unittest.TestCase):
def test_single_line(self):
gamma = ComplexLine(-1,2)
gamma_rev = gamma.reverse()
self.assertAlmostEqual(gamma(0.0), gamma_rev(1.0))
self.assertAlmostEqual(gamma(0.1), gamma_rev(0.9))
self.assertAlmostEqual(gamma(0.25), gamma_rev(0.75))
self.assertAlmostEqual(gamma(0.50), gamma_rev(0.50))
self.assertAlmostEqual(gamma(0.75), gamma_rev(0.25))
self.assertAlmostEqual(gamma(1.0), gamma_rev(0.0))
def test_single_arc(self):
gamma = ComplexArc(1,0,pi/5,3*pi/5)
gamma_rev = gamma.reverse()
self.assertAlmostEqual(gamma(0.0), gamma_rev(1.0))
self.assertAlmostEqual(gamma(0.1), gamma_rev(0.9))
self.assertAlmostEqual(gamma(0.25), gamma_rev(0.75))
self.assertAlmostEqual(gamma(0.50), gamma_rev(0.50))
self.assertAlmostEqual(gamma(0.75), gamma_rev(0.25))
self.assertAlmostEqual(gamma(1.0), gamma_rev(0.0))
def test_composite(self):
x1 = (sqrt(2)+sqrt(2)*1.j)/2
gamma = ComplexLine(0,x1) + ComplexArc(1,0,pi/4,3*pi/5)
gamma_rev = gamma.reverse()
self.assertAlmostEqual(gamma(0.0), gamma_rev(1.0))
self.assertAlmostEqual(gamma(0.1), gamma_rev(0.9))
self.assertAlmostEqual(gamma(0.25), gamma_rev(0.75))
self.assertAlmostEqual(gamma(0.50), gamma_rev(0.50))
self.assertAlmostEqual(gamma(0.75), gamma_rev(0.25))
self.assertAlmostEqual(gamma(1.0), gamma_rev(0.0))
class TestMismatchError(unittest.TestCase):
def test_lines(self):
gamma1 = ComplexLine(-1, 0)
gamma2 = ComplexLine(42, 100)
with self.assertRaises(ValueError):
gamma1 + gamma2
class TestEvaluation(unittest.TestCase):
def test_line(self):
# using AlmostEqual for floating point error
gamma = ComplexLine(0, 1)
self.assertAlmostEqual(gamma(0), 0)
self.assertAlmostEqual(gamma(0.5), 0.5)
self.assertAlmostEqual(gamma(0.75), 0.75)
self.assertAlmostEqual(gamma(1), 1)
gamma = ComplexLine(-1.j, 1.j)
self.assertAlmostEqual(gamma(0), -1.j)
self.assertAlmostEqual(gamma(0.5), 0)
self.assertAlmostEqual(gamma(0.75), 0.5j)
self.assertAlmostEqual(gamma(1), 1.j)
def test_arc(self):
# arc from theta=0 to theta=pi/2 on the unit circle
gamma = ComplexArc(1, 0, 0, pi/2)
self.assertAlmostEqual(gamma(0), 1)
self.assertAlmostEqual(gamma(0.5), exp(1.j*pi/4))
self.assertAlmostEqual(gamma(0.75), exp(1.j*3*pi/8))
self.assertAlmostEqual(gamma(1), exp(1.j*pi/2))
def test_ray(self):
# ray from x=-1 to infinity to the left
gamma = ComplexRay(-1)
self.assertAlmostEqual(gamma(0), -1)
self.assertAlmostEqual(gamma(0.5), -2)
self.assertAlmostEqual(gamma(0.75), -4)
self.assertEqual(gamma(1), Infinity)
def test_composite(self):
gamma1 = ComplexLine(-1, 0)
gamma2 = ComplexLine(0, 1.j)
gamma = gamma1 + gamma2
self.assertAlmostEqual(gamma(0), -1)
self.assertAlmostEqual(gamma(0.25), -0.5)
self.assertAlmostEqual(gamma(0.5), 0)
self.assertAlmostEqual(gamma(0.75), 0.5j)
self.assertAlmostEqual(gamma(1), 1.j)
class TestEvaluationDerivative(unittest.TestCase):
def test_line_derivative(self):
# using AlmostEqual for floating point error
gamma = ComplexLine(0, 1)
self.assertAlmostEqual(gamma.derivative(0), 1)
self.assertAlmostEqual(gamma.derivative(0.5), 1)
self.assertAlmostEqual(gamma.derivative(0.75), 1)
self.assertAlmostEqual(gamma.derivative(0), 1)
gamma = ComplexLine(-1.j, 1.j)
self.assertAlmostEqual(gamma.derivative(0), 2.j)
self.assertAlmostEqual(gamma.derivative(0.5), 2.j)
self.assertAlmostEqual(gamma.derivative(0.75), 2.j)
self.assertAlmostEqual(gamma.derivative(1), 2.j)
def test_arc_derivative(self):
# arc from theta=0 to theta=pi/2 on the unit circle
gamma = ComplexArc(1, 0, 0, pi/2)
scale = 1.j*pi/2
self.assertAlmostEqual(gamma.derivative(0), scale)
self.assertAlmostEqual(gamma.derivative(0.5), scale*exp(1.j*pi/4))
self.assertAlmostEqual(gamma.derivative(0.75), scale*exp(1.j*3*pi/8))
self.assertAlmostEqual(gamma.derivative(1), scale*exp(1.j*pi/2))
def test_ray_derivative(self):
# ray from x=-1 to infinity to the left
gamma = ComplexRay(-1)
self.assertAlmostEqual(gamma.derivative(0), 1)
self.assertAlmostEqual(gamma.derivative(0.5), 4)
self.assertAlmostEqual(gamma.derivative(0.75), 16)
self.assertAlmostEqual(gamma.derivative(1), Infinity)
def test_composite(self):
gamma1 = ComplexLine(-1, 0) # derivative == 1
gamma2 = ComplexLine(0, 1.j) # derivative == 1.j
gamma = gamma1 + gamma2
# derivative is defined on the half-open intervals [s_i,s_{i+1}) except
# for the last segment
self.assertAlmostEqual(gamma.derivative(0), 1)
self.assertAlmostEqual(gamma.derivative(0.25), 1)
self.assertAlmostEqual(gamma.derivative(0.49), 1)
self.assertAlmostEqual(gamma.derivative(0.5), 1.j)
self.assertAlmostEqual(gamma.derivative(0.51), 1.j)
self.assertAlmostEqual(gamma.derivative(0.75), 1.j)
self.assertAlmostEqual(gamma.derivative(1), 1.j)
|
|
"""
Implements an Exchange user object and access types. Exchange provides two different ways of granting access for a
login to a specific account. Impersonation is used mainly for service accounts that connect via EWS. Delegate is used
for ad-hoc access e.g. granted manually by the user.
See https://docs.microsoft.com/en-us/exchange/client-developer/exchange-web-services/impersonation-and-ews-in-exchange
"""
import abc
import logging
from threading import RLock
from oauthlib.oauth2 import OAuth2Token
from .errors import InvalidTypeError
log = logging.getLogger(__name__)
IMPERSONATION = "impersonation"
DELEGATE = "delegate"
ACCESS_TYPES = (IMPERSONATION, DELEGATE)
class BaseCredentials(metaclass=abc.ABCMeta):
"""Base for credential storage.
Establishes a method for refreshing credentials (mostly useful with OAuth, which expires tokens relatively
frequently) and provides a lock for synchronizing access to the object around refreshes.
"""
def __init__(self):
self._lock = RLock()
@property
def lock(self):
return self._lock
@abc.abstractmethod
def refresh(self, session):
"""Obtain a new set of valid credentials. This is mostly intended to support OAuth token refreshing, which can
happen in long- running applications or those that cache access tokens and so might start with a token close to
expiration.
:param session: requests session asking for refreshed credentials
:return:
"""
def _get_hash_values(self):
return (getattr(self, k) for k in self.__dict__ if k != "_lock")
def __eq__(self, other):
return all(getattr(self, k) == getattr(other, k) for k in self.__dict__ if k != "_lock")
def __hash__(self):
return hash(tuple(self._get_hash_values()))
def __getstate__(self):
# The lock cannot be pickled
state = self.__dict__.copy()
del state["_lock"]
return state
def __setstate__(self, state):
# Restore the lock
self.__dict__.update(state)
self._lock = RLock()
class Credentials(BaseCredentials):
r"""Keeps login info the way Exchange likes it.
Usernames for authentication are of one of these forms:
* PrimarySMTPAddress
* WINDOMAIN\username
* User Principal Name (UPN)
password: Clear-text password
"""
EMAIL = "email"
DOMAIN = "domain"
UPN = "upn"
def __init__(self, username, password):
super().__init__()
if username.count("@") == 1:
self.type = self.EMAIL
elif username.count("\\") == 1:
self.type = self.DOMAIN
else:
self.type = self.UPN
self.username = username
self.password = password
def refresh(self, session):
pass
def __repr__(self):
return self.__class__.__name__ + repr((self.username, "********"))
def __str__(self):
return self.username
class OAuth2Credentials(BaseCredentials):
"""Login info for OAuth 2.0 client credentials authentication, as well as a base for other OAuth 2.0 grant types.
This is primarily useful for in-house applications accessing data from a single Microsoft account. For applications
that will access multiple tenants' data, the client credentials flow does not give the application enough
information to restrict end users' access to the appropriate account. Use OAuth2AuthorizationCodeCredentials and
the associated auth code grant type for multi-tenant applications.
"""
def __init__(self, client_id, client_secret, tenant_id=None, identity=None, access_token=None):
"""
:param client_id: ID of an authorized OAuth application, required for automatic token fetching and refreshing
:param client_secret: Secret associated with the OAuth application
:param tenant_id: Microsoft tenant ID of the account to access
:param identity: An Identity object representing the account that these credentials are connected to.
:param access_token: Previously-obtained access token, as a dict or an oauthlib.oauth2.OAuth2Token
"""
super().__init__()
self.client_id = client_id
self.client_secret = client_secret
self.tenant_id = tenant_id
self.identity = identity
self.access_token = access_token
def refresh(self, session):
# Creating a new session gets a new access token, so there's no work here to refresh the credentials. This
# implementation just makes sure we don't raise a NotImplementedError.
pass
def on_token_auto_refreshed(self, access_token):
"""Set the access_token. Called after the access token is refreshed (requests-oauthlib can automatically
refresh tokens if given an OAuth client ID and secret, so this is how our copy of the token stays up-to-date).
Applications that cache access tokens can override this to store the new token - just remember to call the
super() method.
:param access_token: New token obtained by refreshing
"""
# Ensure we don't update the object in the middle of a new session being created, which could cause a race.
if not isinstance(access_token, dict):
raise InvalidTypeError("access_token", access_token, OAuth2Token)
with self.lock:
log.debug("%s auth token for %s", "Refreshing" if self.access_token else "Setting", self.client_id)
self.access_token = access_token
def _get_hash_values(self):
# 'access_token' may be refreshed once in a while. This should not affect the hash signature.
# 'identity' is just informational and should also not affect the hash signature.
return (getattr(self, k) for k in self.__dict__ if k not in ("_lock", "identity", "access_token"))
def sig(self):
# Like hash(self), but pulls in the access token. Protocol.refresh_credentials() uses this to find out
# if the access_token needs to be refreshed.
res = []
for k in self.__dict__:
if k in ("_lock", "identity"):
continue
if k == "access_token":
res.append(self.access_token["access_token"] if self.access_token else None)
continue
res.append(getattr(self, k))
return hash(tuple(res))
def __repr__(self):
return self.__class__.__name__ + repr((self.client_id, "********"))
def __str__(self):
return self.client_id
class OAuth2AuthorizationCodeCredentials(OAuth2Credentials):
"""Login info for OAuth 2.0 authentication using the authorization code grant type. This can be used in one of
several ways:
* Given an authorization code, client ID, and client secret, fetch a token ourselves and refresh it as needed if
supplied with a refresh token.
* Given an existing access token, client ID, and client secret, use the access token until it expires and then
refresh it as needed.
* Given only an existing access token, use it until it expires. This can be used to let the calling application
refresh tokens itself by subclassing and implementing refresh().
Unlike the base (client credentials) grant, authorization code credentials don't require a Microsoft tenant ID
because each access token (and the authorization code used to get the access token) is restricted to a single
tenant.
"""
def __init__(self, authorization_code=None, access_token=None, client_id=None, client_secret=None, **kwargs):
"""
:param client_id: ID of an authorized OAuth application, required for automatic token fetching and refreshing
:param client_secret: Secret associated with the OAuth application
:param tenant_id: Microsoft tenant ID of the account to access
:param identity: An Identity object representing the account that these credentials are connected to.
:param authorization_code: Code obtained when authorizing the application to access an account. In combination
with client_id and client_secret, will be used to obtain an access token.
:param access_token: Previously-obtained access token. If a token exists and the application will handle
refreshing by itself (or opts not to handle it), this parameter alone is sufficient.
"""
super().__init__(client_id=client_id, client_secret=client_secret, **kwargs)
self.authorization_code = authorization_code
if access_token is not None and not isinstance(access_token, dict):
raise InvalidTypeError("access_token", access_token, OAuth2Token)
self.access_token = access_token
def __repr__(self):
return self.__class__.__name__ + repr(
(self.client_id, "[client_secret]", "[authorization_code]", "[access_token]")
)
def __str__(self):
client_id = self.client_id
credential = (
"[access_token]"
if self.access_token is not None
else ("[authorization_code]" if self.authorization_code is not None else None)
)
description = " ".join(filter(None, [client_id, credential]))
return description or "[underspecified credentials]"
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
from datetime import datetime
from decimal import Decimal
from collections import OrderedDict
import pytz
import pytest
from poker.room.fulltiltpoker import FullTiltPokerHandHistory, _Flop
from poker.card import Card
from poker.hand import Combo
from poker.handhistory import _Player
from poker.constants import Game, Currency, Limit, GameType, Action
from . import ftp_hands
ET = pytz.timezone('US/Eastern')
@pytest.fixture
def hand_header(request):
"""Parse hand history header only defined in hand_text and returns a FullTiltPokerHandHistory instance."""
h = FullTiltPokerHandHistory(request.instance.hand_text)
h.parse_header()
return h
@pytest.fixture
def hand(request):
"""Parse handhistory defined in hand_text class attribute and returns a FullTiltPokerHandHistory instance."""
hh = FullTiltPokerHandHistory(request.instance.hand_text)
hh.parse()
return hh
@pytest.fixture
def flop(scope='module'):
return _Flop(
['[8h 4h Tc] (Total Pot: 230, 2 Players)',
'JohnyyR checks',
'FatalRevange has 15 seconds left to act',
'FatalRevange bets 120',
'JohnyyR folds',
'Uncalled bet of 120 returned to FatalRevange',
'FatalRevange mucks',
'FatalRevange wins the pot (230)'
], 0)
class TestHandWithFlopOnly:
hand_text = ftp_hands.HAND1
@pytest.mark.parametrize(('attribute', 'expected_value'), [
('game_type', GameType.TOUR),
('sb', Decimal(10)),
('bb', Decimal(20)),
('date', ET.localize(datetime(2013, 9, 22, 13, 26, 50))),
('game', Game.HOLDEM),
('limit', Limit.NL),
('ident', '33286946295'),
('tournament_ident', '255707037'),
('table_name', '179'),
('tournament_level', None),
('buyin', None),
('rake', None),
('currency', None),
])
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
@pytest.mark.parametrize('attribute,expected_value',
[('players', [
_Player(name='Popp1987', stack=13587, seat=1, combo=None),
_Player(name='Luckytobgood', stack=10110, seat=2, combo=None),
_Player(name='FatalRevange', stack=9970, seat=3, combo=None),
_Player(name='IgaziFerfi', stack=10000, seat=4, combo=Combo('Ks9d')),
_Player(name='egis25', stack=6873, seat=5, combo=None),
_Player(name='gamblie', stack=9880, seat=6, combo=None),
_Player(name='idanuTz1', stack=10180, seat=7, combo=None),
_Player(name='PtheProphet', stack=9930, seat=8, combo=None),
_Player(name='JohnyyR', stack=9840, seat=9, combo=None),
]),
('button', _Player(name='egis25', stack=6873, seat=5, combo=None)),
('max_players', 9),
('hero', _Player(name='IgaziFerfi', stack=10000, seat=4, combo=Combo('Ks9d'))),
('preflop_actions', ('PtheProphet has 15 seconds left to act',
'PtheProphet folds',
'JohnyyR raises to 40',
'Popp1987 has 15 seconds left to act',
'Popp1987 folds',
'Luckytobgood folds',
'FatalRevange raises to 100',
'IgaziFerfi folds',
'egis25 folds',
'gamblie folds',
'idanuTz1 folds',
'JohnyyR has 15 seconds left to act',
'JohnyyR calls 60')),
('turn', None),
('river', None),
('total_pot', Decimal(230)),
('show_down', False),
('winners', ('FatalRevange',)),
('board', (Card('8h'), Card('4h'), Card('Tc'))),
('extra', dict(tournament_name='MiniFTOPS Main Event',
turn_pot=None, turn_num_players=None,
river_pot=None, river_num_players=None)),
])
def test_body(self, hand, attribute, expected_value):
assert getattr(hand, attribute) == expected_value
@pytest.mark.parametrize(('attribute', 'expected_value'), [
('actions', (('JohnyyR', Action.CHECK),
('FatalRevange', Action.THINK),
('FatalRevange', Action.BET, Decimal(120)),
('JohnyyR', Action.FOLD),
('FatalRevange', Action.RETURN, Decimal(120)),
('FatalRevange', Action.MUCK),
('FatalRevange', Action.WIN, Decimal(230)),
)
),
('cards', (Card('8h'), Card('4h'), Card('Tc'))),
('is_rainbow', False),
('is_monotone', False),
('is_triplet', False),
# TODO: http://www.pokerology.com/lessons/flop-texture/
# assert flop.is_dry
('has_pair', False),
('has_straightdraw', True),
('has_gutshot', True),
('has_flushdraw', True),
('players', ('JohnyyR', 'FatalRevange')),
('pot', Decimal(230))
])
def test_flop_attributes(self, hand, attribute, expected_value):
assert getattr(hand.flop, attribute) == expected_value
def test_flop(self, hand):
assert isinstance(hand.flop, _Flop)
class TestHandWithFlopTurnRiver:
hand_text = ftp_hands.TURBO_SNG
@pytest.mark.parametrize('attribute,expected_value',
[('game_type', GameType.SNG),
('sb', Decimal(15)),
('bb', Decimal(30)),
('date', ET.localize(datetime(2014, 6, 29, 5, 57, 1))),
('game', Game.HOLDEM),
('limit', Limit.NL),
('ident', '34374264321'),
('tournament_ident', '268569961'),
('table_name', '1'),
('tournament_level', None),
('buyin', Decimal(10)),
('rake', None),
('currency', Currency.USD),
])
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
@pytest.mark.parametrize('attribute,expected_value',
[('players', [
_Player(name='snake 422', stack=1500, seat=1, combo=None),
_Player(name='IgaziFerfi', stack=1500, seat=2, combo=Combo('5d2h')),
_Player(name='MixaOne', stack=1500, seat=3, combo=None),
_Player(name='BokkaBlake', stack=1500, seat=4, combo=None),
_Player(name='Sajiee', stack=1500, seat=5, combo=None),
_Player(name='AzzzJJ', stack=1500, seat=6, combo=None),
]),
('button', _Player(name='AzzzJJ', stack=1500, seat=6, combo=None)),
('max_players', 6),
('hero', _Player(name='IgaziFerfi', stack=1500, seat=2, combo=Combo('5d2h'))),
('preflop_actions', ('MixaOne calls 30',
'BokkaBlake folds',
'Sajiee folds',
'AzzzJJ raises to 90',
'snake 422 folds',
'IgaziFerfi folds',
'MixaOne calls 60',)
),
('turn', None),
('turn_actions', None),
('river', None),
('river_actions', None),
('total_pot', Decimal('285')),
('show_down', False),
('winners', ('AzzzJJ',)),
('board', (Card('6s'), Card('9c'), Card('3d'))),
('extra', dict(tournament_name='$10 Sit & Go (Turbo)',
turn_pot=None, turn_num_players=None,
river_pot=None, river_num_players=None)),
])
def test_body(self, hand, attribute, expected_value):
assert getattr(hand, attribute) == expected_value
@pytest.mark.parametrize(('attribute', 'expected_value'), [
('actions', (('MixaOne', Action.BET, Decimal(30)),
('AzzzJJ', Action.RAISE, Decimal(120)),
('MixaOne', Action.FOLD),
('AzzzJJ', Action.RETURN, Decimal(90)),
('AzzzJJ', Action.MUCK),
('AzzzJJ', Action.WIN, Decimal(285)),
)
),
('cards', (Card('6s'), Card('9c'), Card('3d'))),
('is_rainbow', True),
('is_monotone', False),
('is_triplet', False),
# TODO: http://www.pokerology.com/lessons/flop-texture/
# assert flop.is_dry
('has_pair', False),
('has_straightdraw', True),
('has_gutshot', True),
('has_flushdraw', False),
('players', ('MixaOne', 'AzzzJJ')),
('pot', Decimal(285))
])
def test_flop_attributes(self, hand, attribute, expected_value):
assert getattr(hand.flop, attribute) == expected_value
def test_flop(self, hand):
assert isinstance(hand.flop, _Flop)
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_log import versionutils
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import stevedore
from keystone.common import controller
from keystone.common import dependency
from keystone.common import utils
from keystone.common import wsgi
from keystone import config
from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
LOG = log.getLogger(__name__)
CONF = cfg.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_method(method):
plugin_name = CONF.auth.get(method) or 'default'
try:
namespace = 'keystone.auth.%s' % method
driver_manager = stevedore.DriverManager(namespace, plugin_name,
invoke_on_load=True)
return driver_manager.driver
except RuntimeError:
LOG.debug('Failed to load the %s driver (%s) using stevedore, will '
'attempt to load using import_object instead.',
method, plugin_name)
@versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
in_favor_of='entrypoints',
what='direct import of driver')
def _load_using_import(plugin_name):
return importutils.import_object(plugin_name)
return _load_using_import(plugin_name)
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in set(CONF.auth.methods):
AUTH_METHODS[plugin] = load_auth_method(plugin)
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
class AuthContext(dict):
"""Retrofitting auth_context to reconcile identity attributes.
The identity attributes must not have conflicting values among the
auth plug-ins. The only exception is `expires_at`, which is set to its
earliest value.
"""
# identity attributes need to be reconciled among the auth plugins
IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
'access_token_id', 'domain_id',
'expires_at'])
def __setitem__(self, key, val):
if key in self.IDENTITY_ATTRIBUTES and key in self:
existing_val = self[key]
if key == 'expires_at':
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
else:
val = min(existing_val, val)
elif existing_val != val:
msg = _('Unable to reconcile identity attribute %(attribute)s '
'as it has conflicting values %(new)s and %(old)s') % (
{'attribute': key,
'new': val,
'old': existing_val})
raise exception.Unauthorized(msg)
return super(AuthContext, self).__setitem__(key, val)
@dependency.requires('resource_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref, unscoped)
# project scope: (None, project_id, None, None)
# domain scope: (domain_id, None, None, None)
# trust scope: (None, None, trust_ref, None)
# unscoped: (None, None, None, 'unscoped')
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
try:
self.resource_api.assert_project_enabled(
project_id=project_ref['id'],
project=project_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_domain_is_enabled(self, domain_ref):
try:
self.resource_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.resource_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.resource_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.resource_api.get_project(project_id)
# NOTE(morganfainberg): The _lookup_domain method will raise
# exception.Unauthorized if the domain isn't found or is
# disabled.
self._lookup_domain({'id': project_ref['domain_id']})
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'unscoped' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, OS-TRUST:trust or unscoped',
target='scope')
if 'unscoped' in self.auth['scope']:
self._scope_data = (None, None, None, 'unscoped')
return
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None, None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if trust_ref.get('project_id') is not None:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref, None)
else:
self._scope_data = (None, None, trust_ref, None)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref, unscoped).
If scope to a project, (None, project_id, None, None)
will be returned.
If scoped to a domain, (domain_id, None, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref, None),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None, 'unscoped') will be
returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None,
unscoped=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust, unscoped)
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
try:
auth_info = AuthInfo.create(context, auth=auth)
auth_context = AuthContext(extras={},
method_names=[],
bind={})
self.authenticate(context, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
token_audit_id = auth_context.get('audit_id')
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, trust, metadata_ref, include_catalog,
parent_audit_id=token_audit_id)
# NOTE(wanghong): We consume a trust use only when we are using
# trusts and have successfully issued a token.
if trust:
self.trust_api.consume_use(trust['id'])
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if federation_constants.IDENTITY_PROVIDER in auth_context:
return
# Do not scope if request is for explicitly unscoped token
if unscoped is not None:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.resource_api.get_project(
default_project_id)
default_project_domain_ref = self.resource_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _LW("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token"
" will be unscoped rather than scoped to the"
" project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _LW("User %(user_id)s's default project %(project_id)s"
" is disabled. The token will be unscoped rather"
" than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _LW("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# The 'external' method allows any 'REMOTE_USER' based authentication
# In some cases the server can set REMOTE_USER as '' instead of
# dropping it, so this must be filtered out
if context['environment'].get('REMOTE_USER'):
try:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
except exception.AuthMethodNotSupported:
# This will happen there is no 'external' plugin registered
# and the container is performing authentication.
# The 'kerberos' and 'saml' methods will be used this way.
# In those cases, it is correct to not register an
# 'external' plugin; if there is both an 'external' and a
# 'kerberos' plugin, it would run the check on identity twice.
LOG.debug("No 'external' plugin is registered.")
except exception.Unauthorized:
# If external fails then continue and attempt to determine
# user identity using remaining auth methods
LOG.debug("Authorization failed for 'external' auth method.")
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, context):
token_id = context.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected()
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = utils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
def _combine_lists_uniquely(self, a, b):
# it's most likely that only one of these will be filled so avoid
# the combination if possible.
if a and b:
return {x['id']: x for x in a + b}.values()
else:
return a or b
@controller.protected()
def get_auth_projects(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_projects_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_projects_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.ProjectV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_domains(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_domains_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_domains_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.DomainV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_catalog(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
project_id = auth_context.get('project_id')
if not project_id:
raise exception.Forbidden(
_('A project-scoped token is required to produce a service '
'catalog.'))
# The V3Controller base methods mostly assume that you're returning
# either a collection or a single element from a collection, neither of
# which apply to the catalog. Because this is a special case, this
# re-implements a tiny bit of work done by the base controller (such as
# self-referential link building) to avoid overriding or refactoring
# several private methods.
return {
'catalog': self.catalog_api.get_v3_catalog(user_id, project_id),
'links': {'self': self.base_url(context, path='auth/catalog')}
}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
|
|
from __future__ import annotations
from typing import Iterator, List
from parsing.ast import Symbol, Nonterm, Token
from parsing.errors import UnexpectedToken
from parsing.grammar import (
Production,
SymbolSpec,
TokenSpec,
EndOfInput,
ShiftAction,
ReduceAction,
)
from parsing.lrparser import Lr
class GssPathStep:
pass
class Gssn(GssPathStep):
"""Graph-structured stack node."""
def __init__(
self, below: Gssn | None, value: Symbol | None, nextState: int
) -> None:
self._edges: list[Gsse] = []
if below is not None and value is not None:
Gsse(below, self, value)
self.nextState = nextState
def __repr__(self) -> str:
return "[%d]" % self.nextState
@property
def edge(self) -> Gsse:
assert len(self._edges) == 1
return self._edges[0]
def edges(self) -> Iterator[Gsse]:
for edge in self._edges:
yield edge
def nodes(self) -> Iterator[Gssn]:
for edge in self._edges:
yield edge.node
# Iterate over all paths of length pathLen. Path length is measured as the
# number of edges in the path, so a path of length 0 still consists of a
# single node.
#
# Each path is encoded as a list that alternates between nodes and edges,
# where the first and last elements are always nodes.
#
# <e>-grammars can cause cycles, which requires that we avoid infinite
# recursion.
def paths(
self, pathLen: int | None = None
) -> Iterator[tuple[GssPathStep, ...]]:
assert pathLen is None or isinstance(pathLen, int) and pathLen >= 0
for path in self._pathsRecurse(pathLen, []):
yield path
def _pathsRecurse(
self, pathLen: int | None, path: list[GssPathStep]
) -> Iterator[tuple[GssPathStep, ...]]:
path.insert(0, self)
if pathLen is None and len(self._edges) == 0:
yield tuple(path[:])
elif pathLen is not None and len(path) - 1 == pathLen * 2:
yield tuple(path[:])
else:
for edge in self.edges():
# Avoid infinite recursion due to <e>-production cycles.
if len(path) < 3 or edge != path[1]:
path.insert(0, edge)
for x in edge.node._pathsRecurse(pathLen, path):
yield x
path.pop(0)
path.pop(0)
class Gsse(GssPathStep):
"""Graph-structured stack edge."""
def __init__(self, below: Gssn, above: Gssn, value: Symbol) -> None:
self.node = below
above._edges.append(self)
self.value = value
def __repr__(self) -> str:
return "{%r}" % self.value
def __eq__(self, other: object) -> bool:
if not isinstance(other, Gsse):
return NotImplemented
else:
return self.node == other.node and self.value == other.value
#
# End graph-structured stack (GSS) classes.
# ========================================================================
class Glr(Lr):
"""
GLR parser. The Glr class uses a Spec instance in order to parse input
that is fed to it via the token() method, and terminated via the eoi()
method."""
def reset(self) -> None:
self._start = None
# Initialize with a stack that is in the start state.
self._gss: List[Gssn] = []
top = Gssn(None, None, 0)
self._gss.append(top)
def token(self, token: Token) -> None:
"""
Feed a token to the parser."""
if self.verbose:
print("%s" % ("-" * 80))
print("INPUT: %r" % token)
tokenSpec = self._spec.sym_spec(token)
self._act(token, tokenSpec) # type: ignore
if len(self._gss) == 0:
raise UnexpectedToken("Unexpected token: %r" % token)
def eoi(self) -> None:
"""
Signal end-of-input to the parser."""
token = EndOfInput()
self.token(token)
# Gather the start symbols from the stacks.
self._start = []
for top in self._gss:
for path in top.paths():
assert len(path) == 5
if self.verbose:
print(" --> accept %r" % path)
edge = path[1]
assert isinstance(edge, Gsse)
assert isinstance(edge.value, Nonterm)
assert (
self._spec.sym_spec(edge.value) == self._start_sym
)
self._start.append(edge.value)
if len(self._start) == 0:
raise UnexpectedToken("Unexpected end of input")
if self.verbose:
print("Start: %r" % self._start)
print("%s" % ("-" * 80))
def _act(self, sym: Token, symSpec: TokenSpec) -> None:
self._reductions(sym, symSpec)
self._shifts(sym, symSpec)
def _reductions(self, sym: Token, symSpec: TokenSpec) -> None:
# epsilons is a dictionary that maps production-->[tops]. The purpose
# is to avoid repeating the same epsilon production on a particular
# stack top. Ordinary productions do not require this care because we
# can notice when a path has already been used for a production.
epsilons = {}
if self.verbose:
nReduces = 0
# Enqueue work.
workQ: list[tuple[tuple[GssPathStep, ...], Production]] = []
i = 0
while i < len(self._gss):
top = self._gss[i]
if symSpec not in self._action[top.nextState]:
# Unexpected token for this stack.
self._gss.pop(i)
else:
for action in self._action[top.nextState][symSpec]:
if type(action) == ReduceAction:
if len(action.production.rhs) == 0:
if action.production not in epsilons:
assert (
len([path for path in top.paths(0)]) == 1
)
path = [p for p in top.paths(0)][0]
epsilons[action.production] = [top]
workQ.append((path, action.production))
if self.verbose:
print(
" --> enqueue(a) %r"
% action.production
)
print(" %r" % path)
elif top not in epsilons[action.production]:
assert (
len([path for path in top.paths(0)]) == 1
)
path = [p for p in top.paths(0)][0]
epsilons[action.production].append(top)
workQ.append((path, action.production))
if self.verbose:
print(
" --> enqueue(b) %r"
% action.production
)
print(" %r" % path)
else:
# Iterate over all reduction paths through stack
# and enqueue them.
for path in top.paths(len(action.production.rhs)):
workQ.append((path, action.production))
if self.verbose:
print(
" --> enqueue(c) %r"
% action.production
)
print(" %r" % path)
i += 1
# Process the work queue.
while len(workQ) > 0:
(path, production) = workQ.pop(0)
if self.verbose:
print(" --> reduce %r" % production)
print(" %r" % path)
nReduces += 1
self._glr_reduce(workQ, epsilons, path, production, symSpec)
if self.verbose:
if nReduces > 0:
self._printStack()
def _glr_reduce(
self,
workQ: list[tuple[tuple[GssPathStep, ...], Production]],
epsilons: dict[Production, list[Gssn]],
path: tuple[GssPathStep, ...],
production: Production,
symSpec: SymbolSpec,
) -> None:
assert len(path[1::2]) == len(production.rhs)
# Build the list of RHS semantic values to pass to the reduction
# action.
rhs = [edge.value for edge in path[1::2]] # type: ignore
# Call the user reduction method.
r = self._production(production, rhs)
below = path[0]
assert isinstance(below, Gssn)
done = False
for top in self._gss:
if top.nextState == self._goto[below.nextState][production.lhs]:
# top is compatible with the reduction result we want to add to
# the set of stack tops.
for edge in top.edges():
if edge.node == below:
nonterm = edge.value
assert isinstance(nonterm, Nonterm)
# There is already a below<--top link, so merge
# competing interpretations.
if self.verbose:
print(" --> merge %r <--> %r" % (nonterm, r))
value = production.lhs.nontermType.merge(nonterm, r)
if self.verbose:
if value == edge.value:
print(
" %s"
% ("-" * len("%r" % nonterm))
)
else:
print(
" %s %s"
% (
(" " * len("%r" % nonterm)),
"-" * len("%r" % r),
)
)
edge.value = value
done = True
break
if not done:
# Create a new below<--top link.
edge = Gsse(below, top, r)
if self.verbose:
print(" --> shift(b) %r" % top)
# Enqueue reduction paths that were created as a result of
# the new link.
self._enqueueLimitedReductions(
workQ, epsilons, edge, symSpec
)
done = True
break
if not done:
# There is no compatible stack top, so create a new one.
top = Gssn(below, r, self._goto[below.nextState][production.lhs])
self._gss.append(top)
if self.verbose:
print(
" --> shift(c) %r"
% self._goto[below.nextState][production.lhs]
)
self._enqueueLimitedReductions(workQ, epsilons, top.edge, symSpec)
# Enqueue paths that incorporate edge.
def _enqueueLimitedReductions(
self,
workQ: list[tuple[tuple[GssPathStep, ...], Production]],
epsilons: dict[Production, list[Gssn]],
edge: Gsse,
symSpec: SymbolSpec,
) -> None:
gotos = self._goto
for top in self._gss:
if symSpec in self._action[top.nextState]:
for action in self._action[top.nextState][symSpec]:
if type(action) == ReduceAction:
if len(action.production.rhs) == 0:
if (
gotos[top.nextState][action.production.lhs]
== top.nextState
):
# Do nothing, since enqueueing a reduction
# would result in performing the same reduction
# twice.
pass
elif action.production not in epsilons:
p = (top,)
epsilons[action.production] = [top]
workQ.append((p, action.production))
if self.verbose:
print(
" --> enqueue(d) %r"
% action.production
)
print(" %r" % p)
elif top not in epsilons[action.production]:
path = (top,)
epsilons[action.production].append(top)
workQ.append((path, action.production))
if self.verbose:
print(
" --> enqueue(e) %r"
% action.production
)
print(" %r" % path)
else:
# Iterate over all reduction paths through stack
# and enqueue them if they incorporate edge.
for rp in top.paths(len(action.production.rhs)):
if edge in rp[1::2]:
workQ.append((rp, action.production))
if self.verbose:
print(
" --> enqueue(f) %r"
% action.production
)
print(" %r" % rp)
def _shifts(self, sym: Token, symSpec: TokenSpec) -> None:
prevGss = self._gss
self._gss = []
if self.verbose:
nShifts = 0
for topA in prevGss:
if symSpec in self._action[topA.nextState]:
for action in self._action[topA.nextState][symSpec]:
if type(action) == ShiftAction:
merged = False
for topB in self._gss:
if topB.nextState == topA.nextState:
Gsse(topA, topB, sym)
merged = True
break
if not merged:
top = Gssn(topA, sym, action.nextState)
self._gss.append(top)
if self.verbose:
print(" --> shift(a) %d" % action.nextState)
nShifts += 1
if self.verbose:
if nShifts > 0:
self._printStack()
def _printStack(self) -> None:
i = 0
for top in self._gss:
for path in top.paths():
if i == 0:
print("STK 0:", end=" ")
else:
print(" %d:" % i, end=" ")
for elm in path:
print("%r" % elm, end=" ")
print()
i += 1
|
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for end-to-end usage of the DFA API."""
__author__ = '[email protected] (Joseph DiLallo)'
import os
import pickle
import shutil
import StringIO
import sys
import tempfile
import unittest
from xml.etree import ElementTree
sys.path.insert(0, os.path.join('..', '..', '..'))
from adspygoogle.common.Errors import ValidationError
from adspygoogle.dfa import DEFAULT_API_VERSION
from adspygoogle.dfa import LIB_SIG
from adspygoogle.dfa.DfaClient import DfaClient
import mock
from oauth2client.client import OAuth2Credentials
# Location of a cached WSDL to generate a service proxy from.
WSDL_FILE_LOCATION = os.path.join('data', 'placement_service.wsdl')
# Location of a cached login WSDL to generate a service proxy from.
LOGIN_WSDL_FILE_LOCATION = os.path.join('data', 'login_service.wsdl')
# Location of the cached expected SOAP request XML, with some Python string
# formatting operations inside for better flexibility.
REQUEST_FILE_LOCATION = os.path.join('data', 'integration_test_request.xml')
AUTH_REQUEST_FILE_LOCATION = os.path.join('data', 'integration_test_auth_request.xml')
# Location of the cached expected SOAP response XML.
RESPONSE_FILE_LOCATION = os.path.join('data', 'integration_test_response.xml')
# Location of the cached expected authentricate SOAP response XML.
AUTH_RESPONSE_FILE_LOCATION = os.path.join('data', 'integration_test_auth_response.xml')
# Location of the cached expected token expired SOAP response XML.
EXPIRED_RESPONSE_FILE_LOCATION = os.path.join('data', 'integration_test_expired_response.xml')
# Deserialized value of the result stored in the cached SOAP response.
EXPECTED_RESULT = (
{'name': 'Publisher Paid Regular', 'id': '1'},
{'name': 'Publisher Paid Interstitial', 'id': '2'},
{'name': 'Agency Paid Regular', 'id': '3'},
{'name': 'Agency Paid Interstitial', 'id': '4'},
{'name': 'Mobile Display', 'id': '7'},
{'name': 'In-Stream Video', 'id': '6'},
{'name': 'Publisher Paid In-Stream', 'id': '8'}
)
# Values used in our test code.
ACCESS_TOKEN = 'a1b2c3d4e5'
CLIENT_ID = 'id1234id'
CLIENT_SECRET = 'shhh,itsasecret'
REFRESH_TOKEN = '1/not_a_refresh_token'
OAUTH_URI = 'uri'
USER_AGENT = 'Integration Test'
USER_NAME = 'dfa_user'
TOKEN = 'dfa_token'
EXPIRED_TOKEN = 'expired_token'
class DfaIntegrationTest(unittest.TestCase):
"""Tests end-to-end usage of the DFA library."""
def testWithPassedInCredential(self):
"""Tests the entire workflow of making a request against DFA.
Uses a credential passed in to the constructor. Starts with no DFA token and
has the library generate one.
Since this library is tightly integrated with SOAPpy, this test mocks out
the HTTP level rather than the SOAPpy proxy level.
"""
oauth2_credential = OAuth2Credentials(
ACCESS_TOKEN, CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, None,
OAUTH_URI, USER_AGENT)
headers = {
'userAgent': USER_AGENT,
'Username': USER_NAME,
'oauth2credentials': oauth2_credential,
'appName': USER_AGENT
}
config = {
'compress': False
}
client = DfaClient(headers=headers, config=config)
placement_service = self._CreatePlacementService(client)
page = self._MakeSoapRequest_NoDfaToken(placement_service)
self.assertEquals(EXPECTED_RESULT, page)
def testWithCachedRefreshToken(self):
"""Tests the entire workflow of making a request against DFA.
Uses a cached refresh token to generate a credential. Starts with no DFA
token and has the library generate one.
Since this library is tightly integrated with SOAPpy, this test mocks out
the HTTP level rather than the SOAPpy proxy level.
"""
directory = self._CreateConfigPickles()
try:
with mock.patch(
'oauth2client.client.OAuth2Credentials.refresh') as mock_refresh:
client = DfaClient(path=directory)
self.assertEquals(CLIENT_ID, client.oauth2credentials.client_id)
self.assertEquals(CLIENT_SECRET, client.oauth2credentials.client_secret)
self.assertEquals(REFRESH_TOKEN, client.oauth2credentials.refresh_token)
def SetAccessToken(unused_http):
client.oauth2credentials.access_token = ACCESS_TOKEN
mock_refresh.side_effect = SetAccessToken
placement_service = self._CreatePlacementService(client)
page = self._MakeSoapRequest_NoDfaToken(placement_service)
self.assertEquals(EXPECTED_RESULT, page)
client.oauth2credentials.refresh.assert_called_once_with(mock.ANY)
finally:
shutil.rmtree(directory)
def testExpiredDfaToken(self):
"""Tests regenerating the DFA token once it has expired.
Uses a credential passed in to the constructor. Starts with an expired DFA
token and has the library regenerate it.
Since this library is tightly integrated with SOAPpy, this test mocks out
the HTTP level rather than the SOAPpy proxy level.
"""
oauth2_credential = OAuth2Credentials(
ACCESS_TOKEN, CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, None,
OAUTH_URI, USER_AGENT)
headers = {
'userAgent': USER_AGENT,
'Username': USER_NAME,
'oauth2credentials': oauth2_credential,
'AuthToken': EXPIRED_TOKEN,
'appName': USER_AGENT
}
config = {
'compress': False
}
client = DfaClient(headers=headers, config=config)
placement_service = self._CreatePlacementService(client)
placement_service._config['compress'] = False
page = self._MakeSoapRequest_ExpiredToken(placement_service)
self.assertEquals(EXPECTED_RESULT, page)
def testWithInvalidApplicationName(self):
"""Tests instantiation of a DFA client with an invalid application name.
Uses a credential passed in to the constructor. Starts with no DFA token and
has the library generate one.
Since this library is tightly integrated with SOAPpy, this test mocks out
the HTTP level rather than the SOAPpy proxy level.
"""
oauth2_credential = OAuth2Credentials(
ACCESS_TOKEN, CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, None,
OAUTH_URI, USER_AGENT)
headers = {
'userAgent': USER_AGENT,
'Username': USER_NAME,
'oauth2credentials': oauth2_credential,
'app_name': ' ',
}
config = {
'compress': False
}
self.assertRaises(ValidationError, DfaClient, headers, config)
def _CreateConfigPickles(self):
"""Creates configuration pickles for testing use of cached values.
Returns:
string The directory the pickles were stored in.
"""
directory = tempfile.mkdtemp()
auth_credentials = {
'clientId': CLIENT_ID,
'clientSecret': CLIENT_SECRET,
'refreshToken': REFRESH_TOKEN,
'Username': USER_NAME,
'appName': USER_AGENT
}
config = {
'compress': False,
}
with open(os.path.join(directory, DfaClient.auth_pkl_name),
'w') as handle:
pickle.dump(auth_credentials, handle)
with open(os.path.join(directory, DfaClient.config_pkl_name),
'w') as handle:
pickle.dump(config, handle)
return directory
def _CreatePlacementService(self, client):
"""Creates a SOAP service proxy for the DFA placement service.
All of the network interactions are mocked out.
Args:
client: adspygoogle.adwords.DfaClient.DfaClient A client ready to make
requests against the DFA API.
Returns:
adspygoogle.adwords.GenericDfaService.GenericDfaService A service proxy
for the placement service.
"""
wsdl_data = open(WSDL_FILE_LOCATION).read() % {'version': DEFAULT_API_VERSION}
with mock.patch('urllib.urlopen') as mock_urlopen:
mock_urlopen.return_value = StringIO.StringIO(wsdl_data)
return client.GetPlacementService()
def _MakeSoapRequest_NoDfaToken(self, placement_service):
"""Makes a "getPlacementTypes" request against the DFA placement service.
All of the network interactions are mocked out. This method also makes an
"authenticate" call against the DFA API.
Args:
placement_service: adspygoogle.adwords.GenericDfaService.GenericDfaService
A service proxy for the DFA placement service.
Returns:
tuple The result set from the getPlacementTypes operation.
"""
request_values = {
'appName': USER_AGENT,
'username': USER_NAME,
'token': TOKEN,
'libSig': LIB_SIG,
'version': DEFAULT_API_VERSION
}
expected_request = self._GetSoapXml(REQUEST_FILE_LOCATION, request_values)
expected_auth_request = self._GetSoapXml(AUTH_REQUEST_FILE_LOCATION,
request_values)
response_values = {
'version': DEFAULT_API_VERSION,
'token': TOKEN
}
response_xml = self._GetSoapXml(RESPONSE_FILE_LOCATION, response_values)
auth_response_xml = self._GetSoapXml(AUTH_RESPONSE_FILE_LOCATION,
response_values)
http_headers = mock.MagicMock()
http_headers.get.return_value = None
expected_response = (200, 'OK', http_headers)
with mock.patch('httplib.HTTPS') as mock_https:
https_instance = mock_https.return_value
https_instance.getreply.return_value = expected_response
soap_responses = [StringIO.StringIO(response_xml),
StringIO.StringIO(auth_response_xml)]
https_instance.getfile.side_effect = lambda *x: soap_responses.pop()
login_wsdl_data = open(LOGIN_WSDL_FILE_LOCATION).read() % {'version': DEFAULT_API_VERSION}
with mock.patch('urllib.urlopen') as mock_urlopen:
mock_urlopen.return_value = StringIO.StringIO(login_wsdl_data)
page = placement_service.GetPlacementTypes()
# Ensure that the SOAP request matches the expected output.
self.assertEqual(_RequestMatcher(expected_auth_request, True),
https_instance.send.call_args_list[0])
self.assertEqual(_RequestMatcher(expected_request, False),
https_instance.send.call_args_list[1])
# Ensure that we set the OAuth2 HTTP header.
self.assertTrue(mock.call('Authorization', 'Bearer %s' % ACCESS_TOKEN) in
https_instance.putheader.call_args_list)
return page
def _MakeSoapRequest_ExpiredToken(self, placement_service):
"""Makes a "getPlacementTypes" request against the DFA placement service.
All of the network interactions are mocked out. This method returns an
expired token error to the SOAP call and tests that the library will refresh
the token and retry the request.
Args:
placement_service: adspygoogle.adwords.GenericDfaService.GenericDfaService
A service proxy for the DFA placement service.
Returns:
tuple The result set from the getPlacementTypes operation.
"""
request_values = {
'appName': USER_AGENT,
'username': USER_NAME,
'token': TOKEN,
'libSig': LIB_SIG,
'version': DEFAULT_API_VERSION
}
expected_request = self._GetSoapXml(REQUEST_FILE_LOCATION, request_values)
expected_auth_request = self._GetSoapXml(AUTH_REQUEST_FILE_LOCATION,
request_values)
request_values['token'] = EXPIRED_TOKEN
expected_failed_request = self._GetSoapXml(REQUEST_FILE_LOCATION,
request_values)
response_values = {
'version': DEFAULT_API_VERSION,
'token': TOKEN
}
response_xml = self._GetSoapXml(RESPONSE_FILE_LOCATION, response_values)
expired_token_response_xml = self._GetSoapXml(
EXPIRED_RESPONSE_FILE_LOCATION, response_values)
auth_response_xml = self._GetSoapXml(AUTH_RESPONSE_FILE_LOCATION,
response_values)
http_headers = mock.MagicMock()
http_headers.get.return_value = None
expected_response = (200, 'OK', http_headers)
with mock.patch('httplib.HTTPS') as mock_https:
https_instance = mock_https.return_value
https_instance.getreply.return_value = expected_response
soap_responses = [StringIO.StringIO(response_xml),
StringIO.StringIO(auth_response_xml),
StringIO.StringIO(expired_token_response_xml)]
https_instance.getfile.side_effect = lambda *x: soap_responses.pop()
login_wsdl_data = open(LOGIN_WSDL_FILE_LOCATION).read() % {'version': DEFAULT_API_VERSION}
with mock.patch('urllib.urlopen') as mock_urlopen:
mock_urlopen.return_value = StringIO.StringIO(login_wsdl_data)
page = placement_service.GetPlacementTypes()
# Ensure that the SOAP request matches the expected output.
self.assertEqual(_RequestMatcher(expected_failed_request, False),
https_instance.send.call_args_list[0])
self.assertEqual(_RequestMatcher(expected_auth_request, True),
https_instance.send.call_args_list[1])
self.assertEqual(_RequestMatcher(expected_request, False),
https_instance.send.call_args_list[2])
# Ensure that we set the OAuth2 HTTP header.
self.assertTrue(mock.call('Authorization', 'Bearer %s' % ACCESS_TOKEN) in
https_instance.putheader.call_args_list)
return page
def _GetSoapXml(self, file_location, template_values):
raw_soap = open(file_location).read()
return raw_soap % template_values
class _RequestMatcher(object):
"""Ensures that a SOAP request is equivalent to the expected request.
For a definition of what we mean by equivalence, see the __eq__ function.
"""
# The SOAP environment namespace used for extracting the SOAP header.
SOAP_ENV_NS = 'http://schemas.xmlsoap.org/soap/envelope/'
WSSE_NS = ('http://docs.oasis-open.org/wss/2004/01/'
'oasis-200401-wss-wssecurity-secext-1.0.xsd')
def __init__(self, expected_xml, login_request):
"""Initializes a _RequestMatcher.
Args:
expected_xml: string The XML of the expected SOAP request.
login_request: bool Whether this is a login service request.
"""
self.expected_xml = expected_xml
self.login_request = login_request
def __str__(self):
return self.expected_xml
def __repr__(self):
return '_RequestMatcher(%s)' % str(self)
def __eq__(self, request_call):
"""Tests that the given SOAP request is equivalent to the expected request.
In our context, equivalent means:
1) With the exception of the SOAP header and its descendants, all XML is
exactly identical, including the ordering of elements. AdWords enforces
that all elements are in a proper order.
2) The SOAP headers contain the same number of children, these children have
identical tags, and all grandchildren of the two SOAP headers are
identical, but the order of the children and grandchildren does not have
to be identical.
Args:
request_call: mock.call The method call made by the library.
Returns:
boolean: Whether the given SOAP request XML is equivalent to the expected
SOAP request.
"""
request_args, request_kargs = request_call
if len(request_args) != 1 or request_kargs:
return False
actual_xml = request_args[0]
actual_tree = ElementTree.fromstring(actual_xml)
expected_tree = ElementTree.fromstring(self.expected_xml)
actual_request_header = actual_tree.find('{%s}Header' % self.SOAP_ENV_NS)
expected_request_header = expected_tree.find('{%s}Header' %
self.SOAP_ENV_NS)
actual_tree.remove(actual_request_header)
expected_tree.remove(expected_request_header)
return (self._CompareSoapHeaders(actual_request_header,
expected_request_header) and
self._CompareRequestMinusHeader(actual_tree, expected_tree))
def _CompareRequestMinusHeader(self, actual_tree, expected_tree):
"""Compares two XML trees for equivalence.
By equivalence, we check that the string representations are identical. This
enforces that the order of elements is always the same.
Args:
actual_tree: xml.etree.ElementTree The tree of the actual request with the
SOAP header node removed.
expected_tree: xml.etree.ElementTree The tree of the expected request with
the SOAP header node removed.
Returns:
boolean Whether the trees were equivalent.
"""
return (ElementTree.tostring(actual_tree) ==
ElementTree.tostring(expected_tree))
def _CompareSoapHeaders(
self, actual_header_node, expected_header_node):
"""Compares two SOAP headers for equivalence.
By equivalence, we check that the two SOAP headers contain the same amount
of children, that the RequestHeader children are identical, and that the
WSSE headers contain identical grandchildren along with identical child
tags. We do not enforce that the children, grandchildren, or
great-grandchildren are in the same order.
Args:
actual_header_node: xml.etree.ElementTree The node of the actual SOAP
header.
expected_header_node: xml.etree.ElementTree The node of the expected SOAP
header.
Returns:
boolean Whether the headers were equivalent.
"""
same_length = len(actual_header_node) == len(expected_header_node)
actual_request_header = actual_header_node.find('./RequestHeader')
expected_request_header = expected_header_node.find('./RequestHeader')
actual_wsse_header = actual_header_node.find(
'./{%s}Security' % self.WSSE_NS)
expected_wsse_header = expected_header_node.find(
'./{%s}Security' % self.WSSE_NS)
identical_request_headers = (ElementTree.tostring(actual_request_header) ==
ElementTree.tostring(expected_request_header))
if actual_wsse_header is not None and expected_wsse_header is not None:
equivalent_wsse_headers = (
set([ElementTree.tostring(grandchild) for grandchild in
actual_wsse_header.findall('./*/*')]) ==
set([ElementTree.tostring(grandchild) for grandchild in
expected_wsse_header.findall('./*/*')]) and
actual_wsse_header.find('./*').tag ==
expected_wsse_header.find('./*').tag)
else:
equivalent_wsse_headers = False
return (same_length and identical_request_headers and
(equivalent_wsse_headers or self.login_request))
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import itertools
import logging
import six
from .second import Second
class BaseVideoUnit(object):
"""
...
"""
__source = None
__time = None
__global_number = None
__logger = logging.getLogger(__name__)
def __init__(self, kwargs_items=None, **kwargs):
"""
:param kwargs_items:
:param kwargs:
"""
if kwargs_items:
self._stored_attr_seq(kwargs_items)
else:
self._stored_attr_dict(kwargs)
def _stored_attr_dict(self, kwargs):
"""
:param kwargs:
:return:
"""
kwargs_items = six.iteritems(kwargs)
return self._stored_attr_seq(kwargs_items)
def _stored_attr_seq(self, kwargs_items):
"""
:param kwargs_items:
:return:
"""
for attr, value in kwargs_items:
# self.__logger.info("attr, value = %s %s", attr, value)
setattr(self, attr, value)
@property
def time(self):
"""
:return:
"""
if self.__time is None:
if self.source and self.source.time:
self.__time = Second(self.source.time)
return self.__time
@time.setter
def time(self, value):
"""
:param value:
:return:
"""
self.__time = value
@property
def hms(self):
"""
:return:
"""
if self.time:
return self.time.hms()
return '00:00:00'
@property
def minsec(self):
"""
:return:
"""
if self.time:
return self.time.minsec()
return 0.0
@property
def second(self):
"""
:return:
"""
if self.time:
return self.time
return 0.0
@property
def minute(self):
"""
:return:
"""
if self.time:
return self.time.minute()
return 0.0
@property
def global_number(self):
"""
:return:
"""
if self.__global_number is None:
if self.source:
self.__global_number = self.source.global_number
return self.__global_number
@global_number.setter
def global_number(self, value):
"""
:param value:
:return:
"""
self.__global_number = value
@property
def number(self):
"""
:return:
"""
return self.global_number
@number.setter
def number(self, value):
"""
:param value:
:return:
"""
self.global_number = value
@property
def source(self):
"""
:return:
"""
return self.__source
@source.setter
def source(self, value):
"""
:param value:
:return:
"""
self.__source = value
@classmethod
def source_sequence(cls, sequence):
"""
:param sequence:
:return:
"""
for unit in sequence:
yield unit.source
def copy(self, **kwargs):
"""
:param kwargs:
:return:
"""
old_attr_seq = six.iteritems(vars(self))
kwargs_seq = six.iteritems(kwargs)
new_attr_seq = itertools.chain(old_attr_seq, kwargs_seq)
obj = type(self)(kwargs_items=new_attr_seq)
return obj
def __repr__(self):
"""
:return:
"""
repr_list = []
mro = self.__class__.mro()
class_name_list = [klass.__name__ for klass in mro]
for key, value in six.iteritems(vars(self)):
for name in class_name_list:
key = key.replace('_{}__'.format(name), '@')
repr_list += ["'{k}':{v}".format(k=key, v=value)]
repr_str = ','.join(repr_list)
return "{%s}" % repr_str
def __str__(self):
"""
:return:
"""
class_name = self.__class__.__name__
return "{class_name} {number} {hms} {time}".format(
class_name=class_name,
number=self.number,
hms=self.hms,
time=self.time,
)
|
|
import os
import sys
import threading
import time
import traceback
import datetime
import pandas as pd
import sqlite3 as sqlite
from sqlite3 import Error
from modules.ExchangeApi import ApiError
# Bot libs
import modules.Configuration as Config
from modules.Data import truncate
try:
import numpy
use_numpy = True
except ImportError as ex:
ex.message = ex.message if ex.message else str(ex)
print("WARN: Module Numpy not found, using manual percentile method instead. "
"It is recommended to install Numpy. Error: {0}".format(ex.message))
use_numpy = False
# Improvements
# [ ] Provide something that takes into account dust offers. (The golden cross works well on BTC, not slower markets)
# [ ] RE: above. Weighted rate.
# [ ] Add docstring to everything
# [ ] Unit tests
# NOTES
# * A possible solution for the dust problem is take the top 10 offers and if the offer amount is less than X% of the
# total available, ignore it as dust.
class MarketDataException(Exception):
pass
class MarketAnalysis(object):
def __init__(self, config, api):
self.currencies_to_analyse = config.get_currencies_list('analyseCurrencies', 'MarketAnalysis')
self.update_interval = int(config.get('MarketAnalysis', 'analyseUpdateInterval', 10, 1, 3600))
self.api = api
self.lending_style = int(config.get('MarketAnalysis', 'lendingStyle', 75, 1, 99))
self.recorded_levels = 10
self.modules_dir = os.path.dirname(os.path.realpath(__file__))
self.top_dir = os.path.dirname(self.modules_dir)
self.db_dir = os.path.join(self.top_dir, 'market_data')
self.recorded_levels = int(config.get('MarketAnalysis', 'recorded_levels', 3, 1, 100))
self.data_tolerance = float(config.get('MarketAnalysis', 'data_tolerance', 15, 10, 90))
self.ma_debug_log = config.getboolean('MarketAnalysis', 'ma_debug_log')
self.MACD_long_win_seconds = int(config.get('MarketAnalysis', 'MACD_long_win_seconds',
60 * 30 * 1 * 1,
60 * 1 * 1 * 1,
60 * 60 * 24 * 7))
self.percentile_seconds = int(config.get('MarketAnalysis', 'percentile_seconds',
60 * 60 * 24 * 1,
60 * 60 * 1 * 1,
60 * 60 * 24 * 14))
if self.MACD_long_win_seconds > self.percentile_seconds:
keep_sec = self.MACD_long_win_seconds
else:
keep_sec = self.percentile_seconds
self.keep_history_seconds = int(config.get('MarketAnalysis', 'keep_history_seconds',
int(keep_sec * 1.1),
int(keep_sec * 1.1),
60 * 60 * 24 * 14))
self.MACD_short_win_seconds = int(config.get('MarketAnalysis', 'MACD_short_win_seconds',
int(self.MACD_long_win_seconds / 12),
1,
self.MACD_long_win_seconds / 2))
self.daily_min_multiplier = float(config.get('Daily_min', 'multiplier', 1.05, 1))
self.delete_thread_sleep = float(config.get('MarketAnalysis', 'delete_thread_sleep',
self.keep_history_seconds / 2,
60,
60 * 60 * 2))
self.exchange = config.get_exchange()
if len(self.currencies_to_analyse) != 0:
for currency in self.currencies_to_analyse:
try:
self.api.return_loan_orders(currency, 5)
except Exception as cur_ex:
raise Exception("ERROR: You entered an incorrect currency: '{0}' to analyse the market of, please "
"check your settings. Error message: {1}".format(currency, cur_ex))
time.sleep(2)
def run(self):
"""
Main entry point to start recording data. This starts all the other threads.
"""
for cur in self.currencies_to_analyse:
db_con = self.create_connection(cur)
self.create_rate_table(db_con, self.recorded_levels)
db_con.close()
self.run_threads()
self.run_del_threads()
def run_threads(self):
"""
Start threads for each currency we want to record. (should be configurable later)
"""
for _ in ['thread1']:
for cur in self.currencies_to_analyse:
thread = threading.Thread(target=self.update_market_thread, args=(cur,))
thread.deamon = True
thread.start()
def run_del_threads(self):
"""
Start thread to start the DB cleaning threads.
"""
for _ in ['thread1']:
for cur in self.currencies_to_analyse:
del_thread = threading.Thread(target=self.delete_old_data_thread, args=(cur, self.keep_history_seconds))
del_thread.daemon = False
del_thread.start()
def delete_old_data_thread(self, cur, seconds):
"""
Thread to clean the DB.
"""
while True:
try:
db_con = self.create_connection(cur)
self.delete_old_data(db_con, seconds)
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Error in MarketAnalysis: {0}".format(ex.message))
traceback.print_exc()
time.sleep(self.delete_thread_sleep)
@staticmethod
def print_traceback(ex, log_message):
ex.message = ex.message if ex.message else str(ex)
print("{0}: {1}".format(log_message, ex.message))
traceback.print_exc()
@staticmethod
def print_exception_error(ex, log_message, debug=False):
ex.message = ex.message if ex.message else str(ex)
print("{0}: {1}".format(log_message, ex.message))
if debug:
import traceback
ex_type, value, tb = sys.exc_info()
print("DEBUG: Class:{0} Args:{1}".format(ex.__class__, ex.args))
print("DEBUG: Type:{0} Value:{1} LineNo:{2}".format(ex_type, value, tb.tb_lineno))
traceback.print_exc()
def update_market_thread(self, cur, levels=None):
"""
This is where the main work is done for recording the market data. The loop will not exit and continuously
polls exchange for the current loans in the book.
:param cur: The currency (database) to remove data from
:param levels: The depth of offered rates to store
"""
if levels is None:
levels = self.recorded_levels
db_con = self.create_connection(cur)
while True:
try:
raw_data = self.api.return_loan_orders(cur, levels)['offers']
except ApiError as ex:
if '429' in str(ex):
if self.ma_debug_log:
print("Caught ERR_RATE_LIMIT, sleeping capture and increasing request delay. Current"
" {0}ms".format(self.api.req_period))
time.sleep(130)
except Exception as ex:
if self.ma_debug_log:
self.print_traceback(ex, "Error in returning data from exchange")
else:
print("Error in returning data from exchange, ignoring")
market_data = []
for i in xrange(levels):
try:
market_data.append(str(raw_data[i]['rate']))
market_data.append(str(raw_data[i]['amount']))
except IndexError:
market_data.append("5")
market_data.append("0.1")
market_data.append('0') # Percentile field not being filled yet.
self.insert_into_db(db_con, market_data)
time.sleep(5)
def insert_into_db(self, db_con, market_data, levels=None):
if levels is None:
levels = self.recorded_levels
insert_sql = "INSERT INTO loans ("
for level in xrange(levels):
insert_sql += "rate{0}, amnt{0}, ".format(level)
insert_sql += "percentile) VALUES ({0});".format(','.join(market_data)) # percentile = 0
with db_con:
try:
db_con.execute(insert_sql)
except Exception as ex:
self.print_traceback(ex, "Error inserting market data into DB")
def delete_old_data(self, db_con, seconds):
"""
Delete old data from the database
:param db_con: Connection to the database
:param cur: The currency (database) to remove data from
:param seconds: The time in seconds of the oldest data to be kept
"""
del_time = int(time.time()) - seconds
with db_con:
query = "DELETE FROM loans WHERE unixtime < {0};".format(del_time)
cursor = db_con.cursor()
cursor.execute(query)
@staticmethod
def get_day_difference(date_time): # Will be a number of seconds since epoch
"""
Get the difference in days between the supplied date_time and now.
:param date_time: A python date time object
:return: The number of days that have elapsed since date_time
"""
date1 = datetime.datetime.fromtimestamp(float(date_time))
now = datetime.datetime.now()
diff_days = (now - date1).days
return diff_days
def get_rate_list(self, cur, seconds):
"""
Query the database (cur) for rates that are within the supplied number of seconds and now.
:param cur: The currency (database) to remove data from
:param seconds: The number of seconds between the oldest order returned and now.
:return: A pandas DataFrame object with named columns ('time', 'rate0', 'rate1',...)
"""
# Request more data from the DB than we need to allow for skipped seconds
request_seconds = int(seconds * 1.1)
full_list = Config.get_all_currencies()
if isinstance(cur, sqlite.Connection):
db_con = cur
else:
if cur not in full_list:
raise ValueError("{0} is not a valid currency, must be one of {1}".format(cur, full_list))
if cur not in self.currencies_to_analyse:
return []
db_con = self.create_connection(cur)
price_levels = ['rate0']
rates = self.get_rates_from_db(db_con, from_date=time.time() - request_seconds, price_levels=price_levels)
if len(rates) == 0:
return []
df = pd.DataFrame(rates)
columns = ['time']
columns.extend(price_levels)
try:
df.columns = columns
except:
if self.ma_debug_log:
print("DEBUG:get_rate_list: cols: {0} rates:{1} db:{2}".format(columns, rates, db_con))
raise
# convert unixtimes to datetimes so we can resample
df.time = pd.to_datetime(df.time, unit='s')
# If we don't have enough data return df, otherwise the resample will fill out all values with the same data.
# Missing data tolerance allows for a percentage to be ignored and filled in by resampling.
if len(df) < seconds * (self.data_tolerance / 100):
return df
# Resample into 1 second intervals, average if we get two in the same second and fill any empty spaces with the
# previous value
df = df.resample('1s', on='time').mean().ffill()
return df
def get_analysis_seconds(self, method):
"""
Gets the correct number of seconds to use for anylsing data depeding on the method being used.
"""
if method == 'percentile':
return self.percentile_seconds
elif method == 'MACD':
return self.MACD_long_win_seconds
def get_rate_suggestion(self, cur, rates=None, method='percentile'):
"""
Return the suggested rate from analysed data. This is the main method for retrieving data from this module.
Currently this only supports returning of a single value, the suggested rate. However this will be expanded to
suggest a lower and higher rate for spreads.
:param cur: The currency (database) to remove data from
:param rates: This is used for unit testing only. It allows you to populate the data used for the suggestion.
:param method: The method by which you want to calculate the suggestion.
:return: A float with the suggested rate for the currency.
"""
error_msg = "WARN: Exception found when analysing markets, if this happens for more than a couple minutes " +\
"please create a Github issue so we can fix it. Otherwise, you can ignore it. Error"
try:
rates = self.get_rate_list(cur, self.get_analysis_seconds(method)) if rates is None else rates
if not isinstance(rates, pd.DataFrame):
raise ValueError("Rates must be a Pandas DataFrame")
if len(rates) == 0:
print("Rate list not populated")
if self.ma_debug_log:
print("DEBUG:get_analysis_seconds: cur: {0} method:{1} rates:{2}".format(cur, method, rates))
return 0
if method == 'percentile':
return self.get_percentile(rates.rate0.values.tolist(), self.lending_style)
if method == 'MACD':
macd_rate = truncate(self.get_MACD_rate(cur, rates), 6)
if self.ma_debug_log:
print("Cur:{0}, MACD:{1:.6f}, Perc:{2:.6f}, Best:{3:.6f}"
.format(cur, macd_rate, self.get_percentile(rates.rate0.values.tolist(), self.lending_style),
rates.rate0.iloc[-1]))
return macd_rate
except MarketDataException:
if method != 'percentile':
print("Caught exception during {0} analysis, using percentile for now".format(method))
return self.get_percentile(rates.rate0.values.tolist(), self.lending_style)
else:
raise
except Exception as ex:
self.print_exception_error(ex, error_msg, debug=self.ma_debug_log)
return 0
@staticmethod
def percentile(N, percent, key=lambda x: x):
"""
http://stackoverflow.com/questions/2374640/how-do-i-calculate-percentiles-with-python-numpy/2753343#2753343
Find the percentile of a list of values.
:parameter N: A list of values. Note N MUST BE already sorted.
:parameter percent: A float value from 0.0 to 1.0.
:parameter key: Optional key function to compute value from each element of N.
:return: Percentile of the values
"""
import math
if not N:
return None
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c - k)
d1 = key(N[int(c)]) * (k - f)
return d0 + d1
def get_percentile(self, rates, lending_style, use_numpy=use_numpy):
"""
Take a list of rates no matter what method is being used, simple list, no pandas / numpy array
"""
if use_numpy:
result = numpy.percentile(rates, int(lending_style))
else:
result = self.percentile(sorted(rates), lending_style / 100.0)
result = truncate(result, 6)
return result
def get_MACD_rate(self, cur, rates_df):
"""
Golden cross is a bit of a misnomer. But we're trying to look at the short term moving average and the long
term moving average. If the short term is above the long term then the market is moving in a bullish manner and
it's a good time to lend. So return the short term moving average (scaled with the multiplier).
:param cur: The currency (database) to remove data from
:param rates_df: A pandas DataFrame with times and rates
:param short_period: Length in seconds of the short window for MACD calculations
:param long_period: Length in seconds of the long window for MACD calculations
:param multiplier: The multiplier to apply to the rate before returning.
:retrun: A float of the suggested, calculated rate
"""
if len(rates_df) < self.get_analysis_seconds('MACD') * (self.data_tolerance / 100):
print("{0} : Need more data for analysis, still collecting. I have {1}/{2} records"
.format(cur, len(rates_df), int(self.get_analysis_seconds('MACD') * (self.data_tolerance / 100))))
raise MarketDataException
short_rate = rates_df.rate0.tail(self.MACD_short_win_seconds).mean()
long_rate = rates_df.rate0.tail(self.MACD_long_win_seconds).mean()
if self.ma_debug_log:
sys.stdout.write("Short higher: ") if short_rate > long_rate else sys.stdout.write("Long higher: ")
if short_rate > long_rate:
if rates_df.rate0.iloc[-1] < short_rate:
return short_rate * self.daily_min_multiplier
else:
return rates_df.rate0.iloc[-1] * self.daily_min_multiplier
else:
return long_rate * self.daily_min_multiplier
def create_connection(self, cur, db_path=None, db_type='sqlite3'):
"""
Create a connection to the sqlite DB. This will create a new file if one doesn't exist. We can use :memory:
here for db_path if we don't want to store the data on disk
:param cur: The currency (database) in the DB
:param db_path: DB directory
:return: Connection object or None
"""
if db_path is None:
prefix = Config.get_exchange()
db_path = os.path.join(self.db_dir, '{0}-{1}.db'.format(prefix, cur))
try:
con = sqlite.connect(db_path)
return con
except Error as ex:
print(ex.message)
def create_rate_table(self, db_con, levels):
"""
Create a new table to hold rate data.
:param db_con: Connection to the database
:param cur: The currency being stored in the DB. There's a table for each currency.
:param levels: The depth of offered rates to store
"""
with db_con:
cursor = db_con.cursor()
create_table_sql = "CREATE TABLE IF NOT EXISTS loans (id INTEGER PRIMARY KEY AUTOINCREMENT," + \
"unixtime integer(4) not null default (strftime('%s','now')),"
for level in xrange(levels):
create_table_sql += "rate{0} FLOAT, ".format(level)
create_table_sql += "amnt{0} FLOAT, ".format(level)
create_table_sql += "percentile FLOAT);"
cursor.execute("PRAGMA journal_mode=wal")
cursor.execute(create_table_sql)
def get_rates_from_db(self, db_con, from_date=None, price_levels=['rate0']):
"""
Query the DB for all rates for a particular currency
:param db_con: Connection to the database
:param cur: The currency you want to get the rates for
:param from_date: The earliest data you want, specified in unix time (seconds since epoch)
:price_level: We record multiple price levels in the DB, the best offer being rate0
"""
with db_con:
cursor = db_con.cursor()
query = "SELECT unixtime, {0} FROM loans ".format(",".join(price_levels))
if from_date is not None:
query += "WHERE unixtime > {0}".format(from_date)
query += ";"
cursor.execute(query)
return cursor.fetchall()
|
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# import htmlentitydefs
import re
import string
import sys
import mimetools
import StringIO
from elementtree import ElementTree
class recollector:
def __init__(self):
self.res = {}
self.regs = {}
def add(self, name, reg, mods=None):
self.regs[name] = reg % self.regs
# print "%s = %s" % (name, self.regs[name])
if mods:
self.res[name] = re.compile(self.regs[
name], mods) # check that it is valid
else:
self.res[name] = re.compile(self.regs[
name]) # check that it is valid
collector = recollector()
a = collector.add
a("TextSE", "[^<]+")
a("UntilHyphen", "[^-]*-")
a("Until2Hyphens", "%(UntilHyphen)s(?:[^-]%(UntilHyphen)s)*-")
a("CommentCE", "%(Until2Hyphens)s>?")
a("UntilRSBs", "[^\\]]*](?:[^\\]]+])*]+")
a("CDATA_CE", "%(UntilRSBs)s(?:[^\\]>]%(UntilRSBs)s)*>")
a("S", "[ \\n\\t\\r]+")
a("NameStrt", "[A-Za-z_:]|[^\\x00-\\x7F]")
a("NameChar", "[A-Za-z0-9_:.-]|[^\\x00-\\x7F]")
a("Name", "(?:%(NameStrt)s)(?:%(NameChar)s)*")
a("QuoteSE", "\"[^\"]*\"|'[^']*'")
a("DT_IdentSE", "%(S)s%(Name)s(?:%(S)s(?:%(Name)s|%(QuoteSE)s))*")
# http://bugs.activestate.com/show_bug.cgi?id=28765
# a("MarkupDeclCE" , "(?:[^\\]\"'><]+|%(QuoteSE)s)*>" )
a("MarkupDeclCE", "(?:[^\\]\"'> \\n\\t\\r<]+|%(QuoteSE)s)*>")
a("S1", "[\\n\\r\\t ]")
a("UntilQMs", "[^?]*\\?+")
a("PI_Tail", "\\?>|%(S1)s%(UntilQMs)s(?:[^>?]%(UntilQMs)s)*>")
a("DT_ItemSE",
"<(?:!(?:--%(Until2Hyphens)s>|[^-]%(MarkupDeclCE)s)|\\?%(Name)s(?:%(PI_Tail)s))|%%%(Name)s;|%(S)s"
)
a("DocTypeCE",
"%(DT_IdentSE)s(?:%(S)s)?(?:\\[(?:%(DT_ItemSE)s)*](?:%(S)s)?)?>?")
a("DeclCE",
"--(?:%(CommentCE)s)?|\\[CDATA\\[(?:%(CDATA_CE)s)?|DOCTYPE(?:%(DocTypeCE)s)?")
a("PI_CE", "%(Name)s(?:%(PI_Tail)s)?")
a("EndTagCE", "(?P<endtag>%(Name)s)(?:%(S)s)?>?")
a("AttValSE", "\"[^<\"]*\"|'[^<']*'")
a("ElemTagCE",
"(?P<tag>%(Name)s)(?P<attrs>(?:%(S)s%(Name)s(?:%(S)s)?=(?:%(S)s)?(?:%(AttValSE)s))*)(?:%(S)s)?/?>?")
a("MarkupSPE",
"<(?:!(?:%(DeclCE)s)?|\\?(?:%(PI_CE)s)?|/(?:%(EndTagCE)s)?|(?:%(ElemTagCE)s)?)")
a("XML_SPE", "%(TextSE)s|%(MarkupSPE)s")
a("XML_MARKUP_ONLY_SPE", "%(MarkupSPE)s")
a("DOCTYPE",
r'<!DOCTYPE\s+(?P<type>\S+)\s+(?P<ident>PUBLIC|SYSTEM)\s+(?P<data1>%(QuoteSE)s)\s*(?P<data2>%(QuoteSE)s)?\s*(?:\[|>)', re.S)
a("attrfinderRE",
"(?:[\n \t]*)(%(Name)s)(?:%(S)s)?=(?:%(S)s)?(%(AttValSE)s)", re.S | re.U)
attrfinder = collector.res["attrfinderRE"]
is_not_ascii = re.compile(eval(r'u"[\u0080-\uffff]"')).search
def parseiter(data, markuponly=0):
if markuponly:
reg = "XML_MARKUP_ONLY_SPE"
else:
reg = "XML_SPE"
regex = collector.res[reg]
return regex.finditer(data)
def strip_quotes(str):
if not str:
return None
if str[0] in ["'", '"']:
return str[1:-1]
return str
# XXX this should realy be done via DTD/Schema, but that would be a major
# pain. For general purposes, this will work fine and be faster
# these tags are defined to NOT ALLOW end tags at all in html. They never
# have children and never have end tags
# defined in dtd as ELEMENT NAME - O EMPTY
html_no_close_tags = set([
"basefont", "br", "area", "link", "img", "param", "hr", "input",
"col", "frame", "isindex", "base", "meta"
])
# defined in dtd as ELEMENT NAME - O *
html_optional_close_tags = set([
"p", "dt", "dd", "li", "option", "thead", "tfoot", "colgroup",
"col", "tr", "th", "td"
])
html_block_tags = set([
"p", "h1", "h2", "h3", "h4", "h5", "h6", "ul", "ol", "pre", "dl", "div", "noscript",
"blockquote", "form", "hr", "table", "fieldset", "address"
])
# these are optional end tag and cannot contain other block tags defined above
html_cannot_contain_block_tags = set([
"p", "dt"
])
html_close_tag_unnecessary = html_no_close_tags.union(html_optional_close_tags)
class HTMLTreeBuilder(ElementTree.TreeBuilder):
def __init__(self, encoding="iso-8859-1"):
ElementTree.TreeBuilder.__init__(self)
self.encoding = encoding
self.nodes = []
self.nodemap = {} # {child_elem: parent_elem, ... }
self._rootnodes = []
self.current = None
def start(self, tag, attrs, loc_start, loc_end):
if not tag:
return
# print loc
if tag == "meta":
# look for encoding directives
http_equiv = content = None
for k, v in attrs:
if k == "http-equiv":
http_equiv = string.lower(v)
elif k == "content":
content = v
if http_equiv == "content-type" and content:
# use mimetools to parse the http header
header = mimetools.Message(
StringIO.StringIO("%s: %s\n\n" % (http_equiv, content))
)
encoding = header.getparam("charset")
if encoding:
self.encoding = encoding
l_tag = tag.lower()
if self._elem:
p_tag = self._elem[-1].tag.lower()
# if the parent and child are the same tag, then close the
# parent if it uses optional close tags
if l_tag in html_optional_close_tags and p_tag == l_tag:
self.end(tag)
# special case table tags that should be autoclosed only when
# hitting a new table row
elif p_tag in ("td", "th") and l_tag == "tr":
self.end_tag(p_tag)
# if the parent and child are block tags, close the parent
elif p_tag in html_cannot_contain_block_tags and l_tag in html_block_tags:
self.end_tag(p_tag)
attrib = {}
for attr in attrs:
attrib[attr[0]] = strip_quotes(attr[1])
ElementTree.TreeBuilder.start(self, tag, attrib)
el = self._elem[-1]
self.current = el
el.ns = None
el.localName = el.tag
el.start = loc_start
el.end = None
self.nodes.append(el)
if len(self._elem) > 1:
self.nodemap[el] = self._elem[-2]
else:
self.nodemap[el] = None
if l_tag in html_no_close_tags:
self.end_tag(tag, loc_end)
def end(self, tag, loc=None):
if not self._elem:
return None
l_tag = tag
l_lasttag = lasttag = self._elem[-1].tag
if l_tag:
l_tag = l_tag.lower()
if l_lasttag:
l_lasttag = lasttag.lower()
while (l_tag != l_lasttag
and l_lasttag in html_optional_close_tags
and len(self._elem) > 1
and self._last.start[2] < self._elem[-1].start[2]):
self.end_tag(lasttag)
if self._elem:
lasttag = self._elem[-1].tag
l_lasttag = lasttag.lower()
else:
self.current = self._last
return self._last
# protect against a previous close of this tag
if l_tag in html_close_tag_unnecessary and l_tag != self._elem[-1].tag.lower():
return None
return self.end_tag(tag, loc)
def end_tag(self, tag, loc=None):
if not tag:
return None
self._flush()
# find this tag:
tags = [e.localName for e in self._elem]
if tag not in tags:
# invalid end tag?
return None
last = self._elem.pop()
while last.tag != tag:
last = self._elem.pop()
self._last = last
if not self._elem:
self._rootnodes.append(self._last)
if loc:
self._last.end = loc
self._tail = 1
self.current = self._last
return self._last
def data(self, data):
if isinstance(data, type('')) and is_not_ascii(data):
# convert to unicode, but only if necessary
data = unicode(data, self.encoding, "ignore")
ElementTree.TreeBuilder.data(self, data)
def close(self):
if self._elem:
return self._elem[0]
return self._last
class Parser:
def __init__(self, builder=None):
if not builder:
builder = ElementTree.TreeBuilder()
self._builder = builder
self.doctype = None
self.publicId = None
self.systemId = None
self.locator = {}
self._lastloc = None
self.data = None
def parse_doctype(self, data):
m = collector.res["DOCTYPE"].match(data)
if m is None:
return
result = m.groupdict()
self.doctype = result
self.publicId = None
if result['ident'] == "PUBLIC":
self.publicId = strip_quotes(result['data1'])
self.systemId = strip_quotes(result['data2'])
else:
self.systemId = strip_quotes(result['data1'])
def getLocation(self, loc):
pos = 0
last_lines = 0
if self._lastloc:
pos = self._lastloc
last_lines = self.locator[pos][0]
lines = last_lines + self.data.count("\n", pos, loc)
col = 0
if lines > last_lines:
col = loc - self.data.rfind("\n", pos, loc) - 1
elif pos in self.locator:
col = loc - pos + self.locator[pos][1]
self.locator[loc] = [lines, col]
self._lastloc = loc
return (lines + 1, col, loc)
def feed(self, data, markuponly=0):
no_close_tag = []
opt_close_tag = []
self.data = data
for matchObj in parseiter(data, markuponly):
x = matchObj.group(0)
m = matchObj.groupdict()
if x.startswith("<!"):
continue
# XXX
if x.startswith("<!DOCTYPE"):
self.parse_doctype(x)
elif x.startswith("<?"):
# processing tag
continue
elif x.startswith("</"):
self._builder.end(m[
"endtag"], self.getLocation(matchObj.end(0)))
elif x.startswith("<"):
# get the tag and attrs
attrs = []
if "attrs" in m and m["attrs"] is not None:
attrs = attrfinder.findall(m["attrs"])
start = self.getLocation(matchObj.start(0))
end = self.getLocation(matchObj.end(0))
self._builder.start(m["tag"], attrs, start, end)
if x.endswith("/>"):
self._builder.end(m["tag"], end)
else:
self._builder.data(x)
def close(self):
return self._builder.close()
try:
import sgmlop
ReParser = Parser
class SgmlopParser(ReParser):
def __init__(self, builder=None):
ReParser.__init__(self, builder)
self.__parser = sgmlop.XMLParser()
self.__parser.register(self)
def finish_starttag(self, tag, attrib, loc_start, loc_end):
# builder expects a list of tuples
attrs = list(attrib.items())
self._builder.start(tag, attrs, self.getLocation(
loc_start), self.getLocation(loc_end))
def finish_endtag(self, tag, loc):
self._builder.end(tag, self.getLocation(loc))
def handle_data(self, data):
self._builder.data(data)
def handle_special(self, data, token_type=None):
# here's where we figure out if we've got a doctype
if (token_type == 0x105 or # from sgmlop.c
data and data.startswith("DOCTYPE")):
# we get everything inside <!...>
self.parse_doctype("<!%s>" % data)
def feed(self, data, markuponly=0):
self.data = data
return self.__parser.feed(data)
def close(self):
if self.__parser:
self.__parser.close()
self.__parser = None
return ReParser.close(self)
Parser = SgmlopParser
except:
pass
def HTML(data, ParserClass=Parser):
p = ParserClass(HTMLTreeBuilder())
p.feed(data)
return p.close()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
import time
# read the file and parse it to get a time.
f = open(sys.argv[1])
data = f.read()
f.close()
t1 = time.time()
tree = HTML(data, ReParser)
t2 = time.time()
print "RE parsing took %s" % (t2-t1)
t1 = time.time()
tree = HTML(data, SgmlopParser)
t2 = time.time()
print "sgmlop parsing took %s" % (t2-t1)
sys.exit(0)
data = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head> <title>my title</title> </head>
<body>
<p>blah blah...
<img src="somefile.jpg" alt="blah">
</img>
</p>
</body>
</html>"""
tree = HTML(data)
print ElementTree.tostring(tree)
sys.exit(0)
data = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
"""
tree = HTML(data)
print ElementTree.tostring(tree)
sys.exit(0)
data = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<HTML lang="en">
<BODY>
<p>
<img>
<p>
<br>
</p>
<hr>
<p>"""
# <br>
# <dl>
# <li>
# <li>
# <li>
# </dl>
# <p>
# <hr>
#</p>
#</BODY>
#</HTML>
#"""
data = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<!-- Copyright (c) 2000-2006 ActiveState Software Inc. -->
<!-- See the file LICENSE.txt for licensing information. -->
<html>
<head>
<link rel="stylesheet" type="text/css" href="aspn.css">
<script language="JavaScript" src="displayToc.js"></script>
<script language="JavaScript" src="tocParas.js"></script>
<script language="JavaScript" src="tocTab.js"></script>
<link rel="icon" href="favicon.ico" type="image/x-icon"/>
<link rel="shortcut icon" href="favicon.ico" type="image/x-icon"/>
<title>XML Catalogs</title>
</head>
<body>
<table>
<tr>
<td>
<h1><a name="xml_catalogs_top">XML Catalogs</a></h1>
<p>Komodo can add <a href=komodo-doc-editor.html#XML_AutoComplete">XML
autocompletion</a> support for any XML dialect with a DTD or RelaxNG Schema.
This is done by mapping external identifier entries to local copies of the DTD
or RelaxNG Schema for that document type using <a target="_blank"
href="http://www.oasis-open.org/committees/entity/spec.html">XML
Catalogs</a>.</p>
<p><script>writelinks('xml_catalogs_top');</script> </p>
<h2><a name="using_xml_catalogs">Using an Existing XML Catalog</a></h2>
<p>Some toolkits bundle DTDs or RelaxNG Schemas with their own XML
catalogs. As long as the relative path from the catalog to the .dtd or
.rng file is preserved on the local filesystem, you can add support for
the dialect by specifying the catalog file in Preferences under <a
href="komodo-doc-prefs.html#xml_catalogs">SGML/XML Catalogs</a>.</p>
<p><script>writelinks('using_xml_catalogs');</script> </p>
<h2><a name="creating_xml_catalogs">Creating an XML Catalog</a></h2>
<p>If the DTD or RelaxNG Schema for the dialect does not have a catalog
file, you can create one by mapping the external identifiers and URI
references in the document's namespace declaration to a local filesystem
URI. For example, the <a target="_blank"
href="http://www.xspf.org/specs/">
<acronym title="XML Shareable Playlist Format">XSPF</acronym></a>
playlist format uses the following namespace declaration:</p>
<pre>
<playlist version="1" xmlns="http://xspf.org/ns/0/">
</pre>
<p>A simple catalog for this XML dialect would look like this:</p>
<pre>
<?xml version='1.0'?>
<catalog xmlns="urn:oasis:names:tc:entity:xmlns:xml:catalog"
prefer="public">
<uri name="http://xspf.org/ns/0/" uri="xspf-draft8.rng"/>
</catalog>
</pre>
<p>If your documents use the DOCTYPE declaration, you can add support
for that in the catalog by using the public and system identifier. For
example, <a target="_blank" href="http://www.mozilla.org/projects/xul/">
<acronym title="XML User Interface Language">XUL</acronym></a> uses
DOCTYPE declarations like this one:</p>
<pre>
<!DOCTYPE overlay PUBLIC "-//MOZILLA//DTD XUL V1.0//EN"
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
</pre>
<p>Komodo's catalog for XUL uses <code>publicId</code> and
<code>systemId</code> in addition to <code>uri</code> for the
mapping.</p>
<pre>
<?xml version='1.0'?>
<catalog xmlns="urn:oasis:names:tc:entity:xmlns:xml:catalog" prefer="public">
<public publicId="-//MOZILLA//DTD XUL V1.0//EN"
uri="xul.dtd"/>
<system systemId="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
uri="xul.dtd"/>
<uri name="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
uri="xul.dtd"/>
</catalog>
</pre>
<p><script>writelinks('creating_xml_catalogs');</script> </p>
<h2><a name="xml_catalog_resources">XML Catalog Resources</a></h2>
<p>The XML Catalog specification can be found at:</p>
<ul>
<li><a target="_blank"
href="http://www.oasis-open.org/committees/entity/spec.html">
http://www.oasis-open.org/committees/entity/spec.html</a></li>
</ul>
<p>Examples of XML catalog files can be found in the Komodo installation
under:</p>
<ul>
<li><em><komodo-install-directory>\lib\support\catalogs</em>
(Windows)</li>
<li><em>/Applications/Komodo.app/Contents/SharedSupport/catalogs/ (OS
X)</em></li>
<li><em><komodo-install-directory>/lib/support/catalogs</em>
(Linux)</li>
</ul>
<p><script>writelinks('xml_catalog_resources');</script> </p>
<!-- Footer Start -->
<hr>
</td>
</tr>
</table>
</body>
</html>
"""
tree = HTML(data)
# print ElementTree.tostring(tree)
data = """<html>
<HEAD>
<?php print $javascript->link('calendar') ?>
<?php $othAuth->init($othAuth->data);?>
<!--[if lte IE 6]-->
<?php echo $html->css{'hack'};?>
<!--[endif]-->
<script type="text/javascript">
function fadeTableRow(rowid, opts) {
if (!spts) {
opts = {};
}
}
</script>
</head>
<body>"""
tree = HTML(data)
# print ElementTree.tostring(tree)
data = """<%= error_messages_for 'product' %>
<!--[form:product]-->
<p><label for="product_title">Title</label><br/>
<%= text_field 'product', 'title' %></p>
<p><label for="product_description">Description</label><br/>
<%= text_area 'product', 'description' %></p>
<p><label for="product_image_url">Image url</label><br/>
<%= text_field 'product', 'image_url' %></p>
<p><label for="product_price">Price</label><br/>
<%= text_field 'product', 'price' %></p>
<p><label for="product_date_available">Date available</label><br/>
<%= datetime_select 'product', 'date_available' %></p>
<!--[eoform:product]-->
"""
tree = HTML(data)
print ElementTree.tostring(tree)
p = Parser(HTMLTreeBuilder())
p.feed(data)
p.close()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from decimal import Decimal
from test_framework.blocktools import (
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
CBlock,
CBlockHeader,
BLOCK_HEADER_SIZE
)
from test_framework.mininode import (
P2PDataStore,
)
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
)
from test_framework.script import CScriptNum
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate(template_request={'data': block.serialize().hex(), 'mode': 'proposal', 'rules': ['segwit']})
assert_equal(rsp, expect)
class MiningTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def mine_chain(self):
self.log.info('Create some old blocks')
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
self.nodes[0].setmocktime(t)
self.nodes[0].generate(1)
mining_info = self.nodes[0].getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 4000)
self.restart_node(0)
connect_nodes_bi(self.nodes, 0, 1)
def run_test(self):
self.mine_chain()
node = self.nodes[0]
def assert_submitblock(block, result_str_1, result_str_2=None):
block.solve()
result_str_2 = result_str_2 or 'duplicate-invalid'
assert_equal(result_str_1, node.submitblock(hexdata=block.serialize().hex()))
assert_equal(result_str_2, node.submitblock(hexdata=block.serialize().hex()))
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert 'currentblocktx' not in mining_info
assert 'currentblockweight' not in mining_info
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generatetoaddress(1, node.get_deterministic_priv_key().address)
tmpl = node.getblocktemplate({'rules': ['segwit']})
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
next_height = int(tmpl["height"])
coinbase_tx = create_coinbase(height=next_height)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
# round-trip the encoded bip34 block height commitment
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), next_height)
# round-trip negative and multi-byte CScriptNums to catch python regression
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(1500))), 1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1500))), -1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1))), -1)
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: segwit rule must be set")
assert_raises_rpc_error(-8, "getblocktemplate must be called with the segwit rule set", node.getblocktemplate)
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, block.serialize()[:-15].hex())
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, bad_block.serialize().hex())
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': block.serialize()[:-1].hex(), 'mode': 'proposal', 'rules': ['segwit']})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
assert_submitblock(bad_block, 'bad-txns-duplicate', 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
assert_submitblock(bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
assert_submitblock(bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[BLOCK_HEADER_SIZE], 1)
bad_block_sn[BLOCK_HEADER_SIZE] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': bad_block_sn.hex(), 'mode': 'proposal', 'rules': ['segwit']})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
assert_submitblock(bad_block, 'bad-txnmrklroot', 'bad-txnmrklroot')
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
assert_submitblock(bad_block, 'time-too-new', 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
assert_submitblock(bad_block, 'time-too-old', 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
assert_submitblock(bad_block, 'prev-blk-not-found', 'prev-blk-not-found')
self.log.info('submitheader tests')
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='xx' * BLOCK_HEADER_SIZE))
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='ff' * (BLOCK_HEADER_SIZE-2)))
assert_raises_rpc_error(-25, 'Must submit previous header', lambda: node.submitheader(hexdata=super(CBlock, bad_block).serialize().hex()))
block.nTime += 1
block.solve()
def chain_tip(b_hash, *, status='headers-only', branchlen=1):
return {'hash': b_hash, 'height': 202, 'branchlen': branchlen, 'status': status}
assert chain_tip(block.hash) not in node.getchaintips()
node.submitheader(hexdata=block.serialize().hex())
assert chain_tip(block.hash) in node.getchaintips()
node.submitheader(hexdata=CBlockHeader(block).serialize().hex()) # Noop
assert chain_tip(block.hash) in node.getchaintips()
bad_block_root = copy.deepcopy(block)
bad_block_root.hashMerkleRoot += 2
bad_block_root.solve()
assert chain_tip(bad_block_root.hash) not in node.getchaintips()
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in node.getchaintips()
# Should still reject invalid blocks, even if we have the header:
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert chain_tip(bad_block_root.hash) in node.getchaintips()
# We know the header for this invalid block, so should just return early without error:
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in node.getchaintips()
bad_block_lock = copy.deepcopy(block)
bad_block_lock.vtx[0].nLockTime = 2**32 - 1
bad_block_lock.vtx[0].rehash()
bad_block_lock.hashMerkleRoot = bad_block_lock.calc_merkle_root()
bad_block_lock.solve()
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'bad-txns-nonfinal')
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'duplicate-invalid')
# Build a "good" block on top of the submitted bad block
bad_block2 = copy.deepcopy(block)
bad_block2.hashPrevBlock = bad_block_lock.sha256
bad_block2.solve()
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
# Should reject invalid header right away
bad_block_time = copy.deepcopy(block)
bad_block_time.nTime = 1
bad_block_time.solve()
assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
# Should ask for the block from a p2p node, if they announce the header as well:
node.add_p2p_connection(P2PDataStore())
node.p2p.wait_for_getheaders(timeout=5) # Drop the first getheaders
node.p2p.send_blocks_and_test(blocks=[block], node=node)
# Must be active now:
assert chain_tip(block.hash, status='active', branchlen=0) in node.getchaintips()
# Building a few blocks should give the same results
node.generatetoaddress(10, node.get_deterministic_priv_key().address)
assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
node.submitheader(hexdata=CBlockHeader(block).serialize().hex())
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert_equal(node.submitblock(hexdata=block.serialize().hex()), 'duplicate') # valid
if __name__ == '__main__':
MiningTest().main()
|
|
"""
MOF sbus.
"""
import networkx as nx
import numpy as np
from scipy.spatial import distance
def add_distance_matrix(graph):
carts = []
if(float('.'.join(nx.__version__.split('.')[:2])) >= 2.0):
for j, data in sorted(list(graph.nodes(data=True))):
carts.append(data['cartesian_coordinates'])
else:
for j, data in sorted(graph.nodes_iter(data=True)):
carts.append(data['cartesian_coordinates'])
carts = np.array(carts)
graph.distance_matrix = distance.cdist(carts, carts)
InorganicCluster = {
'Cu':{'Cu Paddlewheel': nx.Graph(name='Cu Paddlewheel') # taken from doi: 10.1126/science.283.5405.1148
},
'Zn':{'Zn4O': nx.Graph(name='Zn4O'), # taken from doi:
'Zn Paddlewheel': nx.Graph(name='Zn Paddlewheel'), # taken from doi:
'Kuratowski': nx.Graph(name='Kuratowski')},
'Zr':{'Zr_UiO': nx.Graph(name='Zr_UiO') # taken from doi:
},
'Cr':{'Cr_tri': nx.Graph(name='Cr_tri') # taken from doi:
},
'V':{'V_pillar': nx.Graph(name='V_pillar') # taken from doi:
},
'Al':{'Al_pillar': nx.Graph(name='Al_pillar') # taken from doi:
}
}
OrganicCluster = {
'N':{'Thymine': nx.Graph(name='Thymine'),
'Adenine': nx.Graph(name='Adenine'),
'CarboxylateImidazolate': nx.Graph(name='CarboxylateImidazolate')},
'C':{'Benzene-2C': nx.Graph(name='Benzene-2C'),
'Biphenyl-2C': nx.Graph(name='Biphenyl-2C'),
'Triphenyl-2C': nx.Graph(name='Triphenyl-2C')
}
}
# add entry
InorganicCluster['Cu']['Cu Paddlewheel'].add_nodes_from([
(1, {'element':'O',
'special_flag': 'O1_Cu_pdw',
'cartesian_coordinates':np.array([1.755, -0.181, -1.376])
}
),
(2, {'element':'O',
'special_flag': 'O2_Cu_pdw',
'cartesian_coordinates':np.array([-1.755, 0.181, -1.376])
}
),
(3, {'element':'O',
'special_flag': 'O1_Cu_pdw',
'cartesian_coordinates':np.array([-0.181, 1.755, 1.376])
}
),
(4, {'element':'O',
'special_flag':'O2_Cu_pdw',
'cartesian_coordinates':np.array([0.181, -1.755, 1.376])
}
),
(5, {'element':'O',
'special_flag':'O1_Cu_pdw',
'cartesian_coordinates':np.array([-1.755, 0.181, 1.376])
}
),
(6, {'element':'O',
'special_flag':'O2_Cu_pdw',
'cartesian_coordinates':np.array([1.755, -0.181, 1.376])
}
),
(7, {'element':'O',
'special_flag':'O1_Cu_pdw',
'cartesian_coordinates':np.array([0.181, -1.755, -1.376])
}
),
(8, {'element':'O',
'special_flag':'O2_Cu_pdw',
'cartesian_coordinates':np.array([-0.181, 1.755, -1.376])
}
),
(9, {'element':'Cu',
'special_flag':'Cu_pdw',
'cartesian_coordinates':np.array([0.929, 0.929, 0.000])
}
),
(10, {'element':'Cu',
'special_flag':'Cu_pdw',
'cartesian_coordinates':np.array([-0.929, -0.929, 0.000])
}
),
(11, {'element':'C',
'special_flag':'C_Cu_pdw',
'cartesian_coordinates':np.array([1.233, -1.233, -1.810])
}
),
(12, {'element':'C',
'special_flag':'C_Cu_pdw',
'cartesian_coordinates':np.array([-1.233, 1.233, -1.810])
}
),
(13, {'element':'C',
'special_flag':'C_Cu_pdw',
'cartesian_coordinates':np.array([-1.233, 1.233, 1.810])
}
),
(14, {'element':'C',
'special_flag':'C_Cu_pdw',
'cartesian_coordinates':np.array([1.233, -1.233, 1.810])
}
)
])
InorganicCluster['Zn']['Zn Paddlewheel'].add_nodes_from([
(1, {'element':'O',
'special_flag': 'O1_Zn_pdw',
'cartesian_coordinates':np.array([-1.398, -1.339, 1.417])
}
),
(2, {'element':'O',
'special_flag': 'O2_Zn_pdw',
'cartesian_coordinates':np.array([-1.398, 0.853, -1.417])
}
),
(3, {'element':'O',
'special_flag': 'O1_Zn_pdw',
'cartesian_coordinates':np.array([-1.398, 0.853, 1.417])
}
),
(4, {'element':'O',
'special_flag':'O2_Zn_pdw',
'cartesian_coordinates':np.array([-1.398, -1.339, -1.417])
}
),
(5, {'element':'O',
'special_flag':'O1_Zn_pdw',
'cartesian_coordinates':np.array([1.398, -1.339, -1.417])
}
),
(6, {'element':'O',
'special_flag':'O2_Zn_pdw',
'cartesian_coordinates':np.array([1.398, 0.853, 1.417])
}
),
(7, {'element':'O',
'special_flag':'O1_Zn_pdw',
'cartesian_coordinates':np.array([1.398, 0.853, -1.417])
}
),
(8, {'element':'O',
'special_flag':'O2_Zn_pdw',
'cartesian_coordinates':np.array([1.398, -1.339, 1.417])
}
),
(9, {'element':'Zn',
'special_flag':'Zn_pdw',
'cartesian_coordinates':np.array([0.000, -1.717, 0.000])
}
),
(10, {'element':'Zn',
'special_flag':'Zn_pdw',
'cartesian_coordinates':np.array([0.000, 1.230, 0.000])
}
),
(11, {'element':'C',
'special_flag':'C_Zn_pdw',
'cartesian_coordinates':np.array([-1.761, -0.243, 1.837])
}
),
(12, {'element':'C',
'special_flag':'C_Zn_pdw',
'cartesian_coordinates':np.array([-1.761, -0.243, -1.837])
}
),
(13, {'element':'C',
'special_flag':'C_Zn_pdw',
'cartesian_coordinates':np.array([1.761, -0.243, 1.837])
}
),
(14, {'element':'C',
'special_flag':'C_Zn_pdw',
'cartesian_coordinates':np.array([1.761, -0.243, -1.837])
}
)
])
InorganicCluster['Zn']['Zn4O'].add_nodes_from([
(1, {'element':'Zn',
'special_flag':'Zn4O',
'cartesian_coordinates':np.array([-1.063000,-1.063000,-1.174000])
}
),
(2, {'element':'Zn',
'special_flag':'Zn4O',
'cartesian_coordinates':np.array([-1.062000,1.179000,1.067000])
}
),
(3, {'element':'Zn',
'special_flag':'Zn4O',
'cartesian_coordinates':np.array([1.179000,-1.063000,1.067000])
}
),
(4, {'element':'Zn',
'special_flag':'Zn4O',
'cartesian_coordinates':np.array([1.179000,1.178000,-1.175000])
}
),
(5, {'element':'O',
'special_flag':'O_z_Zn4O',
'cartesian_coordinates':np.array([0.058000,0.058000,-0.054000])
}
),
(6, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([-2.939000,-0.765000,-0.876000])
}
),
(7, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([-0.764000,0.883000,2.943000])
}
),
(8, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([0.881000,-2.938000,0.770000])
}
),
(9, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([-2.938000,0.883000,0.770000])
}
),
(10, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([-0.767000,-2.938000,-0.876000])
}
),
(11, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([0.882000,-0.764000,2.943000])
}
),
(12, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([3.055000,-0.766000,0.769000])
}
),
(13, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([0.881000,0.880000,-3.051000])
}
),
(14, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([3.055000,0.880000,-0.878000])
}
),
(15, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([-0.766000,-0.766000,-3.050000])
}
),
(16, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([-0.764000,3.055000,0.769000])
}
),
(17, {'element':'O',
'special_flag':'O_c_Zn4O',
'cartesian_coordinates':np.array([0.882000,3.054000,-0.879000])
}
),
(18, {'element':'C',
'special_flag':'C_Zn4O',
'cartesian_coordinates':np.array([3.541000,0.057000,-0.055000])
}
),
(19, {'element':'C',
'special_flag':'C_Zn4O',
'cartesian_coordinates':np.array([0.059000,3.541000,-0.055000])
}
),
(20, {'element':'C',
'special_flag':'C_Zn4O',
'cartesian_coordinates':np.array([0.057000,0.057000,-3.550000])
}
),
(21, {'element':'C',
'special_flag':'C_Zn4O',
'cartesian_coordinates':np.array([-3.438000,0.059000,-0.053000])
}
),
(22, {'element':'C',
'special_flag':'C_Zn4O',
'cartesian_coordinates':np.array([0.057000,-3.438000,-0.053000])
}
),
(23, {'element':'C',
'special_flag':'C_Zn4O',
'cartesian_coordinates':np.array([0.058000,0.058000,3.429000])
}
)
])
InorganicCluster['Zn']['Kuratowski'].add_nodes_from([
(1, {'element':'Zn',
'special_flag':'Zn_tet',
'cartesian_coordinates':np.array([2.079000,2.079000,-2.079000])
}
),
(2, {'element':'Cl',
'special_flag':'Cl_kuratowski',
'cartesian_coordinates':np.array([3.295000,3.295000,-3.295000])
}
),
(3, {'element':'Zn',
'special_flag':'Zn_tet',
'cartesian_coordinates':np.array([-2.079000,2.079000,2.079000])
}
),
(4, {'element':'Cl',
'special_flag':'Cl_kuratowski',
'cartesian_coordinates':np.array([-3.295000,3.295000,3.295000])
}
),
(5, {'element':'Zn',
'special_flag':'Zn_tet',
'cartesian_coordinates':np.array([2.079000,-2.079000,2.079000])
}
),
(6, {'element':'Cl',
'special_flag':'Cl_kuratowski',
'cartesian_coordinates':np.array([3.295000,-3.295000,3.295000])
}
),
(7, {'element':'Zn',
'special_flag':'Zn_tet',
'cartesian_coordinates':np.array([-2.079000,-2.079000,-2.079000])
}
),
(8, {'element':'Cl',
'special_flag':'Cl_kuratowski',
'cartesian_coordinates':np.array([-3.295000,-3.295000,-3.295000])
}
),
(9, {'element':'Zn',
'special_flag':'Zn_oct',
'cartesian_coordinates':np.array([-0.000000,-0.000000,-0.000000])
}
),
(10, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([2.946000,0.770000,-0.770000])
}
),
(11, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([4.261000,-0.493000,0.493000])
}
),
(12, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([-0.770000,2.946000,0.770000])
}
),
(13, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([0.493000,4.261000,-0.493000])
}
),
(14, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([0.770000,-0.770000,2.946000])
}
),
(15, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([-0.493000,0.493000,4.261000])
}
),
(16, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([0.770000,2.946000,-0.770000])
}
),
(17, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([-0.493000,4.261000,0.493000])
}
),
(18, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([2.946000,-0.770000,0.770000])
}
),
(19, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([4.261000,0.493000,-0.493000])
}
),
(20, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([-0.770000,0.770000,2.946000])
}
),
(21, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([0.493000,-0.493000,4.261000])
}
),
(22, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([-0.770000,-2.946000,-0.770000])
}
),
(23, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([0.493000,-4.261000,0.493000])
}
),
(24, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([0.770000,0.770000,-2.946000])
}
),
(25, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([-0.493000,-0.493000,-4.261000])
}
),
(26, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([0.770000,-2.946000,0.770000])
}
),
(27, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([-0.493000,-4.261000,-0.493000])
}
),
(28, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([-0.770000,-0.770000,-2.946000])
}
),
(29, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([0.493000,0.493000,-4.261000])
}
),
(30, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([-2.946000,0.770000,0.770000])
}
),
(31, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([-4.261000,-0.493000,-0.493000])
}
),
(32, {'element':'N',
'special_flag':'N_tet',
'cartesian_coordinates':np.array([-2.946000,-0.770000,-0.770000])
}
),
(33, {'element':'C',
'special_flag':'C_kuratowski',
'cartesian_coordinates':np.array([-4.261000,0.493000,0.493000])
}
),
(34, {'element':'N',
'special_flag':'N_oct',
'cartesian_coordinates':np.array([2.211000,-0.000000,-0.000000])
}
),
(35, {'element':'N',
'special_flag':'N_oct',
'cartesian_coordinates':np.array([-0.000000,2.211000,-0.000000])
}
),
(36, {'element':'N',
'special_flag':'N_oct',
'cartesian_coordinates':np.array([-0.000000,-0.000000,2.211000])
}
),
(37, {'element':'N',
'special_flag':'N_oct',
'cartesian_coordinates':np.array([-0.000000,-2.211000,-0.000000])
}
),
(38, {'element':'N',
'special_flag':'N_oct',
'cartesian_coordinates':np.array([-0.000000,-0.000000,-2.211000])
}
),
(39, {'element':'N',
'special_flag':'N_oct',
'cartesian_coordinates':np.array([-2.211000,-0.000000,-0.000000])
}
)
])
InorganicCluster['Zr']['Zr_UiO'].add_nodes_from([
(1, {'element':'Zr',
'special_flag':'Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-2.521000,0.000000])
}
),
(2, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([1.973000,-3.568000,0.000000])
}
),
(3, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-1.973000,-3.568000,0.000000])
}
),
(4, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-2.012000,-3.529000])
}
),
(5, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-2.012000,3.529000])
}
),
(6, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-3.568000,-1.973000])
}
),
(7, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-3.568000,1.973000])
}
),
(8, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-3.529000,-2.012000,0.000000])
}
),
(9, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([3.529000,-2.012000,0.000000])
}
),
(10, {'element':'O',
'special_flag':'O_h_Zr_UiO',
'cartesian_coordinates':np.array([1.161000,-1.200000,-1.161000])
}
),
(11, {'element':'O',
'special_flag':'O_h_Zr_UiO',
'cartesian_coordinates':np.array([-1.161000,-1.200000,1.161000])
}
),
(12, {'element':'O',
'special_flag':'O_z_Zr_UiO',
'cartesian_coordinates':np.array([1.161000,-1.200000,1.161000])
}
),
(13, {'element':'O',
'special_flag':'O_z_Zr_UiO',
'cartesian_coordinates':np.array([-1.161000,-1.200000,-1.161000])
}
),
(14, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-3.180000,-3.219000,0.000000])
}
),
(15, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([3.180000,-3.219000,0.000000])
}
),
(16, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-3.219000,3.180000])
}
),
(17, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-3.219000,-3.180000])
}
),
(18, {'element':'Zr',
'special_flag':'Zr_UiO',
'cartesian_coordinates':np.array([2.482000,-0.039000,0.000000])
}
),
(19, {'element':'Zr',
'special_flag':'Zr_UiO',
'cartesian_coordinates':np.array([-2.482000,-0.039000,0.000000])
}
),
(20, {'element':'Zr',
'special_flag':'Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,2.443000,0.000000])
}
),
(21, {'element':'Zr',
'special_flag':'Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-0.039000,2.482000])
}
),
(22, {'element':'Zr',
'special_flag':'Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,-0.039000,-2.482000])
}
),
(23, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([3.529000,-0.039000,1.973000])
}
),
(24, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-3.529000,-0.039000,1.973000])
}
),
(25, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-3.529000,-0.039000,-1.973000])
}
),
(26, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([3.529000,-0.039000,-1.973000])
}
),
(27, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([1.973000,3.490000,0.000000])
}
),
(28, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-1.973000,3.490000,0.000000])
}
),
(29, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,1.934000,3.529000])
}
),
(30, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,1.934000,-3.529000])
}
),
(31, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,3.490000,-1.973000])
}
),
(32, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,3.490000,1.973000])
}
),
(33, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([3.529000,1.934000,0.000000])
}
),
(34, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-3.529000,1.934000,0.000000])
}
),
(35, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([1.973000,-0.039000,-3.529000])
}
),
(36, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([1.973000,-0.039000,3.529000])
}
),
(37, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-1.973000,-0.039000,3.529000])
}
),
(38, {'element':'O',
'special_flag':'O_c_Zr_UiO',
'cartesian_coordinates':np.array([-1.973000,-0.039000,-3.529000])
}
),
(39, {'element':'O',
'special_flag':'O_h_Zr_UiO',
'cartesian_coordinates':np.array([-1.161000,1.122000,-1.161000])
}
),
(40, {'element':'O',
'special_flag':'O_h_Zr_UiO',
'cartesian_coordinates':np.array([1.161000,1.122000,1.161000])
}
),
(41, {'element':'O',
'special_flag':'O_z_Zr_UiO',
'cartesian_coordinates':np.array([-1.161000,1.122000,1.161000])
}
),
(42, {'element':'O',
'special_flag':'O_z_Zr_UiO',
'cartesian_coordinates':np.array([1.161000,1.122000,-1.161000])
}
),
(43, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([3.180000,-0.039000,-3.180000])
}
),
(44, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-3.180000,-0.039000,-3.180000])
}
),
(45, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-3.180000,-0.039000,3.180000])
}
),
(46, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([3.180000,-0.039000,3.180000])
}
),
(47, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-3.180000,3.141000,0.000000])
}
),
(48, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([3.180000,3.141000,0.000000])
}
),
(49, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,3.141000,-3.180000])
}
),
(50, {'element':'C',
'special_flag':'C_Zr_UiO',
'cartesian_coordinates':np.array([-0.000000,3.141000,3.180000])
}
),
(51, {'element':'H',
'special_flag':'H_o_Zr_UiO',
'cartesian_coordinates':np.array([1.881000,1.801000,1.666000])
}
),
(52, {'element':'H',
'special_flag':'H_o_Zr_UiO',
'cartesian_coordinates':np.array([-1.832000,-1.884000,1.722000])
}
),
(53, {'element':'H',
'special_flag':'H_o_Zr_UiO',
'cartesian_coordinates':np.array([-1.838000,1.795000,-1.728000])
}
),
(54, {'element':'H',
'special_flag':'H_o_Zr_UiO',
'cartesian_coordinates':np.array([1.871000,-1.866000,-1.695000])
}
)
])
InorganicCluster['Cr']['Cr_tri'].add_nodes_from([
(1, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([2.267000,-1.345000,1.482000])
}
),
(2, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([-0.321000,-2.272000,1.374000])
}
),
(3, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([-1.353000,-2.006000,2.059000])
}
),
(4, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([-2.299000,-1.290000,1.482000])
}
),
(5, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([-1.808000,1.414000,1.374000])
}
),
(6, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([-1.061000,2.175000,2.059000])
}
),
(7, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([0.032000,2.636000,1.482000])
}
),
(8, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([2.128000,0.859000,1.374000])
}
),
(9, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([2.414000,-0.169000,2.059000])
}
),
(10, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([2.267000,-1.345000,-1.477000])
}
),
(11, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([-0.321000,-2.272000,-1.369000])
}
),
(12, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([-1.353000,-2.006000,-2.054000])
}
),
(13, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([-2.299000,-1.290000,-1.477000])
}
),
(14, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([-1.808000,1.414000,-1.369000])
}
),
(15, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([-1.061000,2.175000,-2.054000])
}
),
(16, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([0.032000,2.636000,-1.477000])
}
),
(17, {'element':'O',
'special_flag':'O',
'cartesian_coordinates':np.array([2.128000,0.859000,-1.369000])
}
),
(18, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([2.414000,-0.169000,-2.054000])
}
),
(19, {'element':'Cr',
'special_flag':'Cr_tri',
'cartesian_coordinates':np.array([0.918000,-1.740000,0.002000])
}
),
(20, {'element':'Cr',
'special_flag':'Cr_tri',
'cartesian_coordinates':np.array([-1.966000,0.075000,0.002000])
}
),
(21, {'element':'Cr',
'special_flag':'Cr_tri',
'cartesian_coordinates':np.array([1.048000,1.665000,0.002000])
}
),
(22, {'element':'O',
'special_flag':'O_z_Cr_tri',
'cartesian_coordinates':np.array([0.000000,0.000000,0.002000])
}
)
])
InorganicCluster['V']['V_pillar'].add_nodes_from([
(1, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([-3.335000,1.411000,1.192000])
}
),
(2, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([-1.088000,-1.401000,1.345000])
}
),
(3, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([0.073000,-1.411000,-1.136000])
}
),
(4, {'element':'C',
'special_flag':'C_V_pillar',
'cartesian_coordinates':np.array([-2.221000,-1.831000,1.655000])
}
),
(5, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([-1.088000,1.401000,1.345000])
}
),
(6, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([0.073000,1.411000,-1.136000])
}
),
(7, {'element':'C',
'special_flag':'C_V_pillar',
'cartesian_coordinates':np.array([-2.221000,1.831000,1.655000])
}
),
(8, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([-3.335000,-1.411000,1.192000])
}
),
(9, {'element':'O',
'special_flag':'O_z_V_pillar',
'cartesian_coordinates':np.array([-2.201000,0.000000,-0.786000])
}
),
(10, {'element':'V',
'special_flag':'V_pillar',
'cartesian_coordinates':np.array([-0.327000,0.000000,0.179000])
}
),
(11, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([2.321000,1.401000,-1.289000])
}
),
(12, {'element':'C',
'special_flag':'C_V_pillar',
'cartesian_coordinates':np.array([1.187000,1.831000,-1.599000])
}
),
(13, {'element':'O',
'special_flag':'O_c_V_pillar',
'cartesian_coordinates':np.array([2.321000,-1.401000,-1.289000])
}
),
(14, {'element':'C',
'special_flag':'C_V_pillar',
'cartesian_coordinates':np.array([1.187000,-1.831000,-1.599000])
}
),
(15, {'element':'V',
'special_flag':'V_pillar',
'cartesian_coordinates':np.array([3.082000,0.000000,-0.123000])
}
),
(16, {'element':'O',
'special_flag':'O_z_V_pillar',
'cartesian_coordinates':np.array([1.208000,0.000000,0.842000])
}
)
])
InorganicCluster['Al']['Al_pillar'].add_nodes_from([
(1, {'element':'O',
'special_flag':'O_c_Al_pillar',
'cartesian_coordinates':np.array([-1.215000,1.107000,-0.732000])
}
),
(2, {'element':'O',
'special_flag':'O_c_Al_pillar',
'cartesian_coordinates':np.array([1.383000,-1.106000,-0.464000])
}
),
(3, {'element':'O',
'special_flag':'O_c_Al_pillar',
'cartesian_coordinates':np.array([1.383000,1.107000,-0.464000])
}
),
(4, {'element':'O',
'special_flag':'O_c_Al_pillar',
'cartesian_coordinates':np.array([-1.215000,-1.106000,-0.732000])
}
),
(5, {'element':'Al',
'special_flag':'Al_pillar',
'cartesian_coordinates':np.array([-0.102000,-1.657000,0.608000])
}
),
(6, {'element':'O',
'special_flag':'O_z_Al_pillar',
'cartesian_coordinates':np.array([-0.102000,0.000000,1.473000])
}
),
(7, {'element':'C',
'special_flag':'C_Al_pillar',
'cartesian_coordinates':np.array([2.005000,0.000000,-0.744000])
}
),
(8, {'element':'C',
'special_flag':'C_Al_pillar',
'cartesian_coordinates':np.array([-1.849000,0.000000,-0.976000])
}
),
(9, {'element':'H',
'special_flag':'H_Al_pillar',
'cartesian_coordinates':np.array([-0.121000,-0.071000,2.580000])
}
)#,
#(10, {'element':'Al',
# 'special_flag':'Al_pillar',
# 'cartesian_coordinates':np.array([-0.102000,1.658000,0.608000])
# }
# )
])
OrganicCluster['N']['Adenine'].add_nodes_from([
(1, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([-0.108000,-0.237000,0.527000])
}
),
(2, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([0.853000,-2.150000,0.700000])
}
),
(3, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([0.550000,-0.540000,-0.675000])
}
),
(4, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([-0.074000,1.419000,-1.600000])
}
),
(5, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([-0.796000,0.992000,0.603000])
}
),
(6, {'element':'H',
'special_flag':'Hd',
'cartesian_coordinates':np.array([-1.914000,2.348000,1.629000])
}
),
(7, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([-1.599000,0.804000,2.476000])
}
),
(8, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([1.193000,-3.098000,1.104000])
}
),
(9, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([-0.080000,2.127000,-2.431000])
}
),
(10, {'element':'N',
'special_flag':'N',
'cartesian_coordinates':np.array([0.121000,-1.283000,1.403000])
}
),
(11, {'element':'N',
'special_flag':'N',
'cartesian_coordinates':np.array([1.133000,-1.761000,-0.560000])
}
),
(12, {'element':'N',
'special_flag':'N',
'cartesian_coordinates':np.array([0.617000,0.283000,-1.751000])
}
),
(13, {'element':'N',
'special_flag':'Na',
'cartesian_coordinates':np.array([-0.763000,1.773000,-0.514000])
}
),
(14, {'element':'N',
'special_flag':'Nd',
'cartesian_coordinates':np.array([-1.424000,1.447000,1.691000])
}
)
])
OrganicCluster['N']['Thymine'].add_nodes_from([
(1, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([13.966000,16.972000,12.145000])
}
),
(2, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([12.549000,18.380000,13.950000])
}
),
(3, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([11.714000,19.119000,14.888000])
}
),
(4, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([13.016000,17.103000,14.220000])
}
),
(5, {'element':'N',
'special_flag':'Ndw',
'cartesian_coordinates':np.array([13.714000,16.442000,13.316000])
}
),
(6, {'element':'O',
'special_flag':'Oa2',
'cartesian_coordinates':np.array([14.542000,16.323000,11.289000])
}
),
(7, {'element':'O',
'special_flag':'Oaw',
'cartesian_coordinates':np.array([12.755000,16.528000,15.269000])
}
),
(8, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([10.864000,18.500000,15.184000])
}
),
(9, {'element':'H',
'special_flag':'Hdw',
'cartesian_coordinates':np.array([14.003000,15.581000,13.493000])
}
),
(10, {'element':'C',
'special_flag':'C',
'cartesian_coordinates':np.array([12.877000,18.890000,12.738000])
}
),
(11, {'element':'N',
'special_flag':'Nd2',
'cartesian_coordinates':np.array([13.557000,18.186000,11.867000])
}
),
(12, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([12.293000,19.381000,15.776000])
}
),
(13, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([11.316000,20.039000,14.453000])
}
),
(14, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([12.585000,19.801000,12.470000])
}
),
(15, {'element':'H',
'special_flag':'Hd2',
'cartesian_coordinates':np.array([13.727000,18.544000,11.021000])
}
)
])
OrganicCluster['N']['CarboxylateImidazolate'].add_nodes_from([
(1, {'element':'C',
'special_flag':'C13',
'cartesian_coordinates':np.array([-0.325000,-0.797000,0.755000])
}
),
(2, {'element':'N',
'special_flag':'N20',
'cartesian_coordinates':np.array([-0.712000,0.499000,0.760000])
}
),
(3, {'element':'C',
'special_flag':'N20',
'cartesian_coordinates':np.array([-0.133000,1.108000,-0.263000])
}
),
(4, {'element':'C',
'special_flag':'C13',
'cartesian_coordinates':np.array([0.616000,0.148000,-0.885000])
}
),
(5, {'element':'N',
'special_flag':'N20',
'cartesian_coordinates':np.array([0.512000,-1.071000,-0.265000])
}
),
(6, {'element':'H',
'special_flag':'8H13',
'cartesian_coordinates':np.array([1.218000,0.325000,-1.764000])
}
),
(7, {'element':'H',
'special_flag':'H',
'cartesian_coordinates':np.array([-0.314000,2.158000,-0.439000])
}
),
(8, {'element':'C',
'special_flag':'C1',
'cartesian_coordinates':np.array([-0.843000,-1.760000,1.840000])
}
),
(9, {'element':'O',
'special_flag':'O2',
'cartesian_coordinates':np.array([-0.453000,-3.062000,1.835000])
}
),
(10, {'element':'O',
'special_flag':'O3',
'cartesian_coordinates':np.array([-1.690000,-1.307000,2.803000])
}
)
])
# Note, the special_flags for the organic linkers below are designed to be compatible
# with the Dubbeldam force field, so changing these values will break if one requests
# the Dubbeldam FF.
OrganicCluster['C']['Benzene-2C'].add_nodes_from([
(1, {'element':'C',
'special_flag':'Cb',
'cartesian_coordinates':np.array([-0.000000,-0.000000,-1.401000])
}
),
(2, {'element':'C',
'special_flag':'Cb',
'cartesian_coordinates':np.array([-0.000000,-0.000000,1.399000])
}
),
(3, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([-0.858000,0.858000,-0.700000])
}
),
(4, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([-1.519000,1.519000,-1.239000])
}
),
(5, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([-0.857000,0.857000,0.700000])
}
),
(6, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([-1.519000,1.519000,1.241000])
}
),
(7, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([0.858000,-0.858000,-0.700000])
}
),
(8, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([1.519000,-1.519000,-1.239000])
}
),
(9, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([0.857000,-0.857000,0.700000])
}
),
(10, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([1.519000,-1.519000,1.241000])
}
)
])
OrganicCluster['C']['Biphenyl-2C'].add_nodes_from([
(1, {'element':'C',
'special_flag':'Cb',
'cartesian_coordinates':np.array([0.000000,0.000000,-3.571000])
}
),
(2, {'element':'C',
'special_flag':'Ce',
'cartesian_coordinates':np.array([0.000000,0.000000,-0.771000])
}
),
(3, {'element':'C',
'special_flag':'Cb',
'cartesian_coordinates':np.array([0.000000,0.000000,3.569000])
}
),
(4, {'element':'C',
'special_flag':'Ce',
'cartesian_coordinates':np.array([0.000000,0.000000,0.771000])
}
),
(5, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([1.519000,-1.519000,0.928000])
}
),
(6, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([0.858000,-0.858000,1.469000])
}
),
(7, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([-0.858000,0.858000,-2.870000])
}
),
(8, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([-1.519000,1.519000,-3.409000])
}
),
(9, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([-0.857000,0.857000,-1.470000])
}
),
(10, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([-1.519000,1.519000,-0.929000])
}
),
(11, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([-1.519000,1.519000,3.412000])
}
),
(12, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([-0.858000,0.858000,2.872000])
}
),
(13, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([1.519000,-1.519000,3.412000])
}
),
(14, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([0.858000,-0.858000,2.872000])
}
),
(15, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([-1.519000,1.519000,0.928000])
}
),
(16, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([-0.858000,0.858000,1.469000])
}
),
(17, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([0.858000,-0.858000,-2.870000])
}
),
(18, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([1.519000,-1.519000,-3.409000])
}
),
(19, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([0.857000,-0.857000,-1.470000])
}
),
(20, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([1.519000,-1.519000,-0.929000])
}
)
])
OrganicCluster['C']['Triphenyl-2C'].add_nodes_from([
(1, {'element':'C',
'special_flag':'Cb',
'cartesian_coordinates':np.array([-0.000000,-0.000000,-5.741000])
}
),
(2, {'element':'C',
'special_flag':'Ce',
'cartesian_coordinates':np.array([-0.000000,-0.000000,-2.941000])
}
),
(3, {'element':'C',
'special_flag':'Cf',
'cartesian_coordinates':np.array([-0.000000,-0.000000,1.399000])
}
),
(4, {'element':'C',
'special_flag':'Cb',
'cartesian_coordinates':np.array([-0.000000,-0.000000,5.741000])
}
),
(5, {'element':'C',
'special_flag':'Ce',
'cartesian_coordinates':np.array([-0.000000,-0.000000,2.941000])
}
),
(6, {'element':'C',
'special_flag':'Cf',
'cartesian_coordinates':np.array([-0.000000,-0.000000,-1.399000])
}
),
(7, {'element':'H',
'special_flag':'Hc',
'cartesian_coordinates':np.array([1.519000,-1.519000,-1.242000])
}
),
(8, {'element':'C',
'special_flag':'Cg',
'cartesian_coordinates':np.array([0.858000,-0.858000,-0.701000])
}
),
(9, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([-0.858000,0.858000,-5.040000])
}
),
(10, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([-1.519000,1.519000,-5.579000])
}
),
(11, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([-0.857000,0.857000,-3.640000])
}
),
(12, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([-1.519000,1.519000,-3.099000])
}
),
(13, {'element':'H',
'special_flag':'Hc',
'cartesian_coordinates':np.array([-1.519000,1.519000,1.242000])
}
),
(14, {'element':'C',
'special_flag':'Cg',
'cartesian_coordinates':np.array([-0.858000,0.858000,0.701000])
}
),
(15, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([0.858000,-0.858000,5.040000])
}
),
(16, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([1.519000,-1.519000,5.579000])
}
),
(17, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([0.857000,-0.857000,3.640000])
}
),
(18, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([1.519000,-1.519000,3.099000])
}
),
(19, {'element':'H',
'special_flag':'Hc',
'cartesian_coordinates':np.array([1.519000,-1.519000,1.242000])
}
),
(20, {'element':'C',
'special_flag':'Cg',
'cartesian_coordinates':np.array([0.858000,-0.858000,0.701000])
}
),
(21, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([-0.858000,0.858000,5.040000])
}
),
(22, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([-1.519000,1.519000,5.579000])
}
),
(23, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([-0.857000,0.857000,3.640000])
}
),
(24, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([-1.519000,1.519000,3.099000])
}
),
(25, {'element':'H',
'special_flag':'Hc',
'cartesian_coordinates':np.array([-1.519000,1.519000,-1.242000])
}
),
(26, {'element':'C',
'special_flag':'Cg',
'cartesian_coordinates':np.array([-0.858000,0.858000,-0.701000])
}
),
(27, {'element':'C',
'special_flag':'Cc',
'cartesian_coordinates':np.array([0.858000,-0.858000,-5.040000])
}
),
(28, {'element':'H',
'special_flag':'Ha',
'cartesian_coordinates':np.array([1.519000,-1.519000,-5.579000])
}
),
(29, {'element':'C',
'special_flag':'Cd',
'cartesian_coordinates':np.array([0.857000,-0.857000,-3.640000])
}
),
(30, {'element':'H',
'special_flag':'Hb',
'cartesian_coordinates':np.array([1.519000,-1.519000,-3.099000])
}
)
])
# compute the distance matrix
add_distance_matrix(InorganicCluster['Cu']['Cu Paddlewheel'])
add_distance_matrix(InorganicCluster['Zn']['Zn Paddlewheel'])
add_distance_matrix(InorganicCluster['Zn']['Zn4O'])
add_distance_matrix(InorganicCluster['Zn']['Kuratowski'])
add_distance_matrix(InorganicCluster['Zr']['Zr_UiO'])
add_distance_matrix(InorganicCluster['Cr']['Cr_tri'])
add_distance_matrix(InorganicCluster['V']['V_pillar'])
add_distance_matrix(InorganicCluster['Al']['Al_pillar'])
add_distance_matrix(OrganicCluster['N']['Adenine'])
add_distance_matrix(OrganicCluster['N']['Thymine'])
add_distance_matrix(OrganicCluster['N']['CarboxylateImidazolate'])
add_distance_matrix(OrganicCluster['C']['Benzene-2C'])
add_distance_matrix(OrganicCluster['C']['Biphenyl-2C'])
add_distance_matrix(OrganicCluster['C']['Triphenyl-2C'])
|
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os, types, re
from twisted.python import runtime
from twisted.internet import reactor
from buildbot.process.buildstep import BuildStep
from buildbot.process.buildstep import SUCCESS, FAILURE
from twisted.internet import error
from twisted.internet.protocol import ProcessProtocol
import pprint
class MasterShellCommand(BuildStep):
"""
Run a shell command locally - on the buildmaster. The shell command
COMMAND is specified just as for a RemoteShellCommand. Note that extra
logfiles are not supported.
"""
name='MasterShellCommand'
description='Running'
descriptionDone='Ran'
descriptionSuffix = None
renderables = [ 'command', 'env', 'description', 'descriptionDone', 'descriptionSuffix' ]
haltOnFailure = True
flunkOnFailure = True
def __init__(self, command,
description=None, descriptionDone=None, descriptionSuffix=None,
env=None, path=None, usePTY=0, interruptSignal="KILL",
**kwargs):
BuildStep.__init__(self, **kwargs)
self.command=command
if description:
self.description = description
if isinstance(self.description, str):
self.description = [self.description]
if descriptionDone:
self.descriptionDone = descriptionDone
if isinstance(self.descriptionDone, str):
self.descriptionDone = [self.descriptionDone]
if descriptionSuffix:
self.descriptionSuffix = descriptionSuffix
if isinstance(self.descriptionSuffix, str):
self.descriptionSuffix = [self.descriptionSuffix]
self.env=env
self.path=path
self.usePTY=usePTY
self.interruptSignal = interruptSignal
class LocalPP(ProcessProtocol):
def __init__(self, step):
self.step = step
def outReceived(self, data):
self.step.stdio_log.addStdout(data)
def errReceived(self, data):
self.step.stdio_log.addStderr(data)
def processEnded(self, status_object):
if status_object.value.exitCode is not None:
self.step.stdio_log.addHeader("exit status %d\n" % status_object.value.exitCode)
if status_object.value.signal is not None:
self.step.stdio_log.addHeader("signal %s\n" % status_object.value.signal)
self.step.processEnded(status_object)
def start(self):
# render properties
command = self.command
# set up argv
if type(command) in types.StringTypes:
if runtime.platformType == 'win32':
argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
if '/c' not in argv: argv += ['/c']
argv += [command]
else:
# for posix, use /bin/sh. for other non-posix, well, doesn't
# hurt to try
argv = ['/bin/sh', '-c', command]
else:
if runtime.platformType == 'win32':
argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
if '/c' not in argv: argv += ['/c']
argv += list(command)
else:
argv = command
self.stdio_log = stdio_log = self.addLog("stdio")
if type(command) in types.StringTypes:
stdio_log.addHeader(command.strip() + "\n\n")
else:
stdio_log.addHeader(" ".join(command) + "\n\n")
stdio_log.addHeader("** RUNNING ON BUILDMASTER **\n")
stdio_log.addHeader(" in dir %s\n" % os.getcwd())
stdio_log.addHeader(" argv: %s\n" % (argv,))
self.step_status.setText(self.describe())
if self.env is None:
env = os.environ
else:
assert isinstance(self.env, dict)
env = self.env
for key, v in self.env.iteritems():
if isinstance(v, list):
# Need to do os.pathsep translation. We could either do that
# by replacing all incoming ':'s with os.pathsep, or by
# accepting lists. I like lists better.
# If it's not a string, treat it as a sequence to be
# turned in to a string.
self.env[key] = os.pathsep.join(self.env[key])
# do substitution on variable values matching pattern: ${name}
p = re.compile('\${([0-9a-zA-Z_]*)}')
def subst(match):
return os.environ.get(match.group(1), "")
newenv = {}
for key, v in env.iteritems():
if v is not None:
if not isinstance(v, basestring):
raise RuntimeError("'env' values must be strings or "
"lists; key '%s' is incorrect" % (key,))
newenv[key] = p.sub(subst, env[key])
env = newenv
stdio_log.addHeader(" env: %r\n" % (env,))
# TODO add a timeout?
self.process = reactor.spawnProcess(self.LocalPP(self), argv[0], argv,
path=self.path, usePTY=self.usePTY, env=env )
# (the LocalPP object will call processEnded for us)
def processEnded(self, status_object):
if status_object.value.signal is not None:
self.descriptionDone = ["killed (%s)" % status_object.value.signal]
self.step_status.setText(self.describe(done=True))
self.finished(FAILURE)
elif status_object.value.exitCode != 0:
self.descriptionDone = ["failed (%d)" % status_object.value.exitCode]
self.step_status.setText(self.describe(done=True))
self.finished(FAILURE)
else:
self.step_status.setText(self.describe(done=True))
self.finished(SUCCESS)
def describe(self, done=False):
desc = self.descriptionDone if done else self.description
if self.descriptionSuffix:
desc = desc[:]
desc.extend(self.descriptionSuffix)
return desc
def interrupt(self, reason):
try:
self.process.signalProcess(self.interruptSignal)
except KeyError: # Process not started yet
pass
except error.ProcessExitedAlready:
pass
BuildStep.interrupt(self, reason)
class SetProperty(BuildStep):
name='SetProperty'
description=['Setting']
descriptionDone=['Set']
renderables = [ 'value' ]
def __init__(self, property, value, **kwargs):
BuildStep.__init__(self, **kwargs)
self.property = property
self.value = value
def start(self):
properties = self.build.getProperties()
properties.setProperty(self.property, self.value, self.name, runtime=True)
self.step_status.setText(self.describe(done=True))
self.finished(SUCCESS)
class LogRenderable(BuildStep):
name='LogRenderable'
description=['Logging']
descriptionDone=['Logged']
renderables = ['content']
def __init__(self, content, **kwargs):
BuildStep.__init__(self, **kwargs)
self.content = content
def start(self):
content = pprint.pformat(self.content)
self.addCompleteLog(name='Output', text=content)
self.step_status.setText(self.describe(done=True))
self.finished(SUCCESS)
|
|
# MIT License
# Copyright (c) [2017] [Zachary A. Eckstein]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#Created by Zoro#
#Over-commenting in order to make a map for my future self
import pygame
from pygame.locals import *
import random
import time
pygame.init()
#load sprites
head_sprite = pygame.image.load('snekHead.png')
apple_image = pygame.image.load('apple.png')
body_segment = pygame.image.load('bodysegment.png')
tail_segment = pygame.image.load('tail.png')
#color definitions because this is what people do i guess
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
pink = (255,192,203)
yellow = (255,255,0)
orange = (255,127,80)
#default settings
display_width = 1000
display_height = 800
play_field_width = 800
play_field_height = 800
scale = 40
font_size = 35
#The Game
class Main(object):
def __init__(self):
self.done = False
self.game_over = False
self.FPS = 10
self.start = True
self.color = white
self.difficulty = 0
#change window dressing
pygame.display.set_caption('Snek')
pygame.display.set_icon(apple_image)
#shorten clock reference
clock = pygame.time.Clock()
#make screen
screen_size = (display_width, display_height)
game_display = pygame.display.set_mode(screen_size)
#construct objects
my_snake = Snake()
food = Apple()
play_pen = Gameboard()
#Rewrite the message to screen functions to make more sense...
def text_object(text, color, size_of_font = font_size):
font = pygame.font.SysFont(None, size_of_font)
text_surface = font.render(text, True, color)
return text_surface, text_surface.get_rect()
def message(msg, color, y_diplace = 0, size_of_font = font_size):
font = pygame.font.SysFont(None, size_of_font)
text_surface, text_rect = text_object(msg, color, size_of_font)
text_rect.center = (play_field_width/2), ((play_field_height/2)+y_diplace)
game_display.blit(text_surface,text_rect)
#score tracker
def your_score(score, color, size_of_font = font_size):
font = pygame.font.SysFont(None, size_of_font)
screen_number = font.render("Score: " + str(score),True,color)
game_display.blit(screen_number, [play_field_width, 0])
#Main game loop
while not self.done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
#game over loop to restart game
while self.game_over == True:
game_display.fill(white)
play_pen.draw(game_display)
your_score(my_snake.score,black)
message("Game Over", red, -100, 150)
message("You scored " + str(my_snake.score) + "!", white, -30, 80)
message("Press C to continue or ESC to Quit.", white, 10, 50)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
self.game_over = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.done = True
self.game_over = False
if event.key == pygame.K_c:
Main()
#opening menu screen
while self.start == True:
game_display.fill(white)
play_pen.draw(game_display)
your_score(my_snake.score,black)
message("Select Your Difficulty:",self.color, -100, 100)
message("Press 1 for Easy",green, -40, 60)
message("Press 2 for Medium",yellow, 00, 60)
message("Press 3 for Hard",orange, 40, 60)
message("Press 4 for VERY HARD",red, 80, 60)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
self.game_over = False
self.start = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
self.FPS = 3
self.start = False
if event.key == pygame.K_2:
self.FPS = 10
self.start = False
if event.key == pygame.K_3:
self.FPS = 15
self.start = False
if event.key == pygame.K_4:
self.FPS = 5
self.start = False
self.difficulty = 1
if event.key == pygame.K_ESCAPE:
self.done = True
self.game_over = False
self.start = False
#draw stuff to invisible place
game_display.fill(white)
play_pen.draw(game_display)
food.draw(game_display)
my_snake.update()
#wall crash detection
if my_snake.x >= play_field_width or my_snake.x < 0 or my_snake.y < 0 or my_snake.y >= play_field_height:
self.game_over = True
else:
my_snake.draw(game_display, my_snake.body)
#self crash
for bodypart in my_snake.body[:-1]:
if bodypart == my_snake.head:
self.game_over = True
#on eat: make new food location and grow snake longer
if my_snake.x == food.x and my_snake.y == food.y:
food.update()
for bodypart in my_snake.body:
if bodypart[0] == food.x and bodypart[1] == food.y:
food.update()
else:
my_snake.grow()
self.FPS += self.difficulty
#Score tracker updater
your_score(my_snake.score,black)
#update screen from invisible place
pygame.display.flip()
#FPS
clock.tick(self.FPS)
#controls
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.done = True
self.game_over = False
self.start = False
if event.key == pygame.K_LEFT:
if my_snake.vx > 0 or my_snake.check_collision("Left") is True:
pass
else:
my_snake.vx = -scale
my_snake.vy = 0
my_snake.rotate = "Left"
elif event.key == pygame.K_RIGHT:
if my_snake.vx < 0 or my_snake.check_collision("Right") is True:
pass
else:
my_snake.vx = scale
my_snake.vy = 0
my_snake.rotate = "Right"
elif event.key == pygame.K_UP:
if my_snake.vy > 0 or my_snake.check_collision("Up") is True:
pass
else:
my_snake.vx = 0
my_snake.vy = -scale
my_snake.rotate = "Up"
elif event.key == pygame.K_DOWN:
if my_snake.vy < 0 or my_snake.check_collision("Down") is True:
pass
else:
my_snake.vx = 0
my_snake.vy = scale
my_snake.rotate = "Down"
quit()
class Snake(object):
def __init__(self):
self.x = 0
self.y = play_field_height/2-scale
self.vx = scale
self.vy = 0
self.size = (scale,scale)
self.color = (green)
self.body = []
self.length = 3
self.score = 0
self.rotate = "Right"
self.rotate_position = []
def update(self):
self.x += self.vx
self.y += self.vy
#create body list
self.head = []
self.head.append(self.x)
self.head.append(self.y)
self.body.append(self.head)
#create rotation list for head and tail sprites
self.rotate_position.append(self.rotate)
#keeps snake correct length
if len(self.body) > self.length:
del self.body[0]
del self.rotate_position[0]
def grow(self):
self.length += 1
self.score += 1
def draw(self, display, bodylist):
#rotate snake head to face correct direction
if self.vx == scale:
head = pygame.transform.rotate(head_sprite, 270)
elif self.vx == -scale:
head = pygame.transform.rotate(head_sprite, 90)
elif self.vy == scale:
head = pygame.transform.rotate(head_sprite, 180)
elif self.vy == -scale:
head = pygame.transform.rotate(head_sprite, 0)
#rotate tail
if self.rotate_position[0] == 'Right':
tail = pygame.transform.rotate(tail_segment, 270)
elif self.rotate_position[0] == 'Left':
tail = pygame.transform.rotate(tail_segment, 90)
elif self.rotate_position[0] == 'Down':
tail = pygame.transform.rotate(tail_segment, 180)
elif self.rotate_position[0] == 'Up':
tail = pygame.transform.rotate(tail_segment, 0)
#draw the head
display.blit(head, self.body[-1])
#draw the body parts
for bodypart in bodylist[1:-1]:
display.blit(body_segment, bodypart)
#draw the tail
display.blit(tail, self.body[0])
#so you cannot get two direction changes before the snake moves and run into yourself when you shouldnt be able to
def check_collision(self, direction):
if direction == "Right":
if self.body[-1][0] + scale == self.body[-2][0]:
return True
else:
return False
if direction == "Left":
if self.body[-1][0] - scale == self.body[-2][0]:
return True
else:
return False
if direction == "Up":
if self.body[-1][1] - scale == self.body[-2][1]:
return True
else:
return False
if direction == "Down":
if self.body[-1][1] + scale == self.body[-2][1]:
return True
else:
return False
class Apple(object):
def __init__(self):
self.x = round(random.randrange(0, play_field_width-scale)/scale)*scale
self.y = round(random.randrange(0, play_field_height-scale)/scale)*scale
self.size = (scale,scale)
def update(self):
self.x = round(random.randrange(0, play_field_width-scale)/scale)*scale
self.y = round(random.randrange(0, play_field_height-scale)/scale)*scale
def draw(self, display):
display.blit(apple_image, (self.x, self.y))
class Gameboard(object):
def __init__(self):
self.x = 0
self.y = 0
self.color = black
self.size = (play_field_width,play_field_height)
def draw(self, display):
pygame.draw.rect(display, self.color, [(self.x, self.y), self.size])
Main ()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
import frappe
from frappe import _
import json
from frappe.model.document import Document
from frappe.desk.doctype.notification_log.notification_log import enqueue_create_notification,\
get_title, get_title_html
from frappe.desk.doctype.notification_settings.notification_settings\
import is_email_notifications_enabled_for_type, is_email_notifications_enabled
from frappe.utils import cint, get_fullname, getdate, get_link_to_form
class EnergyPointLog(Document):
def validate(self):
self.map_milestone_reference()
if self.type in ['Appreciation', 'Criticism'] and self.user == self.owner:
frappe.throw(_('You cannot give review points to yourself'))
def map_milestone_reference(self):
# link energy point to the original reference, if set by milestone
if self.reference_doctype == 'Milestone':
self.reference_doctype, self.reference_name = frappe.db.get_value('Milestone', self.reference_name,
['reference_type', 'reference_name'])
def after_insert(self):
alert_dict = get_alert_dict(self)
if alert_dict:
frappe.publish_realtime('energy_point_alert', message=alert_dict, user=self.user)
frappe.cache().hdel('energy_points', self.user)
frappe.publish_realtime('update_points', after_commit=True)
if self.type != 'Review':
reference_user = self.user if self.type == 'Auto' else self.owner
notification_doc = {
'type': 'Energy Point',
'document_type': self.reference_doctype,
'document_name': self.reference_name,
'subject': get_notification_message(self),
'from_user': reference_user,
'email_content': '<div>{}</div>'.format(self.reason) if self.reason else None
}
enqueue_create_notification(self.user, notification_doc)
def on_trash(self):
if self.type == 'Revert':
reference_log = frappe.get_doc('Energy Point Log', self.revert_of)
reference_log.reverted = 0
reference_log.save()
@frappe.whitelist()
def revert(self, reason, ignore_permissions=False):
if not ignore_permissions:
frappe.only_for('System Manager')
if self.type != 'Auto':
frappe.throw(_('This document cannot be reverted'))
if self.get('reverted'):
return
self.reverted = 1
self.save(ignore_permissions=True)
revert_log = frappe.get_doc({
'doctype': 'Energy Point Log',
'points': -(self.points),
'type': 'Revert',
'user': self.user,
'reason': reason,
'reference_doctype': self.reference_doctype,
'reference_name': self.reference_name,
'revert_of': self.name
}).insert(ignore_permissions=True)
return revert_log
def get_notification_message(doc):
owner_name = get_fullname(doc.owner)
points = doc.points
title = get_title(doc.reference_doctype, doc.reference_name)
if doc.type == 'Auto':
owner_name = frappe.bold('You')
if points == 1:
message = _('{0} gained {1} point for {2} {3}')
else:
message = _('{0} gained {1} points for {2} {3}')
message = message.format(owner_name, frappe.bold(points), doc.rule, get_title_html(title))
elif doc.type == 'Appreciation':
if points == 1:
message = _('{0} appreciated your work on {1} with {2} point')
else:
message = _('{0} appreciated your work on {1} with {2} points')
message = message.format(frappe.bold(owner_name), get_title_html(title), frappe.bold(points))
elif doc.type == 'Criticism':
if points == 1:
message = _('{0} criticized your work on {1} with {2} point')
else:
message = _('{0} criticized your work on {1} with {2} points')
message = message.format(frappe.bold(owner_name), get_title_html(title), frappe.bold(points))
elif doc.type == 'Revert':
if points == 1:
message = _('{0} reverted your point on {1}')
else:
message = _('{0} reverted your points on {1}')
message = message.format(frappe.bold(owner_name), get_title_html(title))
return message
def get_alert_dict(doc):
alert_dict = frappe._dict()
owner_name = get_fullname(doc.owner)
if doc.reference_doctype:
doc_link = get_link_to_form(doc.reference_doctype, doc.reference_name)
points = doc.points
bold_points = frappe.bold(doc.points)
if doc.type == 'Auto':
if points == 1:
message = _('You gained {0} point')
else:
message = _('You gained {0} points')
alert_dict.message = message.format(bold_points)
alert_dict.indicator = 'green'
elif doc.type == 'Appreciation':
if points == 1:
message = _('{0} appreciated your work on {1} with {2} point')
else:
message = _('{0} appreciated your work on {1} with {2} points')
alert_dict.message = message.format(
owner_name,
doc_link,
bold_points
)
alert_dict.indicator = 'green'
elif doc.type == 'Criticism':
if points == 1:
message = _('{0} criticized your work on {1} with {2} point')
else:
message = _('{0} criticized your work on {1} with {2} points')
alert_dict.message = message.format(
owner_name,
doc_link,
bold_points
)
alert_dict.indicator = 'red'
elif doc.type == 'Revert':
if points == 1:
message = _('{0} reverted your point on {1}')
else:
message = _('{0} reverted your points on {1}')
alert_dict.message = message.format(
owner_name,
doc_link,
)
alert_dict.indicator = 'red'
return alert_dict
def create_energy_points_log(ref_doctype, ref_name, doc, apply_only_once=False):
doc = frappe._dict(doc)
log_exists = check_if_log_exists(ref_doctype,
ref_name, doc.rule, None if apply_only_once else doc.user)
if log_exists:
return
new_log = frappe.new_doc('Energy Point Log')
new_log.reference_doctype = ref_doctype
new_log.reference_name = ref_name
new_log.update(doc)
new_log.insert(ignore_permissions=True)
return new_log
def check_if_log_exists(ref_doctype, ref_name, rule, user=None):
''''Checks if Energy Point Log already exists'''
filters = frappe._dict({
'rule': rule,
'reference_doctype': ref_doctype,
'reference_name': ref_name,
'reverted': 0
})
if user:
filters.user = user
return frappe.db.exists('Energy Point Log', filters)
def create_review_points_log(user, points, reason=None, doctype=None, docname=None):
return frappe.get_doc({
'doctype': 'Energy Point Log',
'points': points,
'type': 'Review',
'user': user,
'reason': reason,
'reference_doctype': doctype,
'reference_name': docname
}).insert(ignore_permissions=True)
@frappe.whitelist()
def add_review_points(user, points):
frappe.only_for('System Manager')
create_review_points_log(user, points)
@frappe.whitelist()
def get_energy_points(user):
# points = frappe.cache().hget('energy_points', user,
# lambda: get_user_energy_and_review_points(user))
# TODO: cache properly
points = get_user_energy_and_review_points(user)
return frappe._dict(points.get(user, {}))
@frappe.whitelist()
def get_user_energy_and_review_points(user=None, from_date=None, as_dict=True):
conditions = ''
given_points_condition = ''
values = frappe._dict()
if user:
conditions = 'WHERE `user` = %(user)s'
values.user = user
if from_date:
conditions += 'WHERE' if not conditions else 'AND'
given_points_condition += "AND `creation` >= %(from_date)s"
conditions += " `creation` >= %(from_date)s OR `type`='Review'"
values.from_date = from_date
points_list = frappe.db.sql("""
SELECT
SUM(CASE WHEN `type` != 'Review' THEN `points` ELSE 0 END) AS energy_points,
SUM(CASE WHEN `type` = 'Review' THEN `points` ELSE 0 END) AS review_points,
SUM(CASE
WHEN `type`='Review' AND `points` < 0 {given_points_condition}
THEN ABS(`points`)
ELSE 0
END) as given_points,
`user`
FROM `tabEnergy Point Log`
{conditions}
GROUP BY `user`
ORDER BY `energy_points` DESC
""".format(
conditions=conditions,
given_points_condition=given_points_condition
), values=values, as_dict=1)
if not as_dict:
return points_list
dict_to_return = frappe._dict()
for d in points_list:
dict_to_return[d.pop('user')] = d
return dict_to_return
@frappe.whitelist()
def review(doc, points, to_user, reason, review_type='Appreciation'):
current_review_points = get_energy_points(frappe.session.user).review_points
doc = doc.as_dict() if hasattr(doc, 'as_dict') else frappe._dict(json.loads(doc))
points = abs(cint(points))
if current_review_points < points:
frappe.msgprint(_('You do not have enough review points'))
return
review_doc = create_energy_points_log(doc.doctype, doc.name, {
'type': review_type,
'reason': reason,
'points': points if review_type == 'Appreciation' else -points,
'user': to_user
})
# deduct review points from reviewer
create_review_points_log(
user=frappe.session.user,
points=-points,
reason=reason,
doctype=review_doc.doctype,
docname=review_doc.name
)
return review_doc
@frappe.whitelist()
def get_reviews(doctype, docname):
return frappe.get_all('Energy Point Log', filters={
'reference_doctype': doctype,
'reference_name': docname,
'type': ['in', ('Appreciation', 'Criticism')],
}, fields=['points', 'owner', 'type', 'user', 'reason', 'creation'])
def send_weekly_summary():
send_summary('Weekly')
def send_monthly_summary():
send_summary('Monthly')
def send_summary(timespan):
from frappe.utils.user import get_enabled_system_users
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
if not is_energy_point_enabled():
return
if not is_email_notifications_enabled_for_type(frappe.session.user, 'Energy Point'):
return
from_date = frappe.utils.add_to_date(None, weeks=-1)
if timespan == 'Monthly':
from_date = frappe.utils.add_to_date(None, months=-1)
user_points = get_user_energy_and_review_points(from_date=from_date, as_dict=False)
# do not send report if no activity found
if not user_points or not user_points[0].energy_points: return
from_date = getdate(from_date)
to_date = getdate()
# select only those users that have energy point email notifications enabled
all_users = [user.email for user in get_enabled_system_users() if
is_email_notifications_enabled_for_type(user.name, 'Energy Point')]
frappe.sendmail(
subject = '{} energy points summary'.format(timespan),
recipients = all_users,
template = "energy_points_summary",
args = {
'top_performer': user_points[0],
'top_reviewer': max(user_points, key=lambda x:x['given_points']),
'standings': user_points[:10], # top 10
'footer_message': get_footer_message(timespan).format(from_date, to_date),
},
with_container = 1
)
def get_footer_message(timespan):
if timespan == 'Monthly':
return _("Stats based on last month's performance (from {0} to {1})")
else:
return _("Stats based on last week's performance (from {0} to {1})")
|
|
import calendar
import time
from operator import itemgetter
import csv
import re
import boto.s3
from boto.s3.connection import S3Connection
import provider.filesystem as fslib
"""
EJP data provider
Connects to S3, discovers, downloads, and parses files exported by EJP
"""
class EJP(object):
def __init__(self, settings=None, tmp_dir=None):
self.settings = settings
self.tmp_dir = tmp_dir
# Default tmp_dir if not specified
self.tmp_dir_default = "ejp_provider"
# Default S3 bucket name
self.bucket_name = None
if self.settings is not None:
self.bucket_name = self.settings.ejp_bucket
# S3 connection
self.s3_conn = None
# Filesystem provider
self.fs = None
# Some EJP file types we expect
self.author_default_filename = "authors.csv"
self.editor_default_filename = "editors.csv"
def connect(self):
"""
Connect to S3 using the settings
"""
s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
self.s3_conn = s3_conn
return self.s3_conn
def get_bucket(self, bucket_name=None):
"""
Using the S3 connection, lookup the bucket
"""
if self.s3_conn is None:
s3_conn = self.connect()
else:
s3_conn = self.s3_conn
if bucket_name is None:
# Use the object bucket_name if not provided
bucket_name = self.bucket_name
# Lookup the bucket
bucket = s3_conn.lookup(bucket_name)
return bucket
def get_s3key(self, s3_key_name, bucket=None):
"""
Get the S3 key from the bucket
If the bucket is not provided, use the object bucket
"""
if bucket is None:
bucket = self.get_bucket()
s3key = bucket.get_key(s3_key_name)
return s3key
def parse_author_file(self, document, filename=None):
"""
Given a filename to an author file, download
or copy it using the filesystem provider,
then parse it
"""
if self.fs is None:
self.fs = self.get_fs()
# Save the document to the tmp_dir
self.fs.write_document_to_tmp_dir(document, filename)
(column_headings, author_rows) = self.parse_author_data(self.fs.document)
return (column_headings, author_rows)
def parse_author_data(self, document):
"""
Given author data - CSV with header rows - parse
it and return an object representation
"""
column_headings = None
author_rows = []
f = self.fs.open_file_from_tmp_dir(document, mode='rb')
filereader = csv.reader(f)
for row in filereader:
# For now throw out header rows
if filereader.line_num <= 3:
pass
elif filereader.line_num == 4:
# Column headers
column_headings = row
else:
author_rows.append(row)
return (column_headings, author_rows)
def get_authors(self, doi_id=None, corresponding=None, document=None):
"""
Get a list of authors for an article
If doi_id is None, return all authors
If corresponding is
True, return corresponding authors
False, return all but corresponding authors
None, return all authors
If document is None, find the most recent authors file
"""
authors = []
# Check for the document
if document is None:
# No document? Find it on S3, save the content to
# the tmp_dir
if self.fs is None:
self.fs = self.get_fs()
s3_key_name = self.find_latest_s3_file_name(file_type="author")
s3_key = self.get_s3key(s3_key_name)
contents = s3_key.get_contents_as_string()
self.fs.write_content_to_document(contents, self.author_default_filename)
document = self.fs.get_document
# Parse the author file
filename = self.author_default_filename
(column_headings, author_rows) = self.parse_author_file(document, filename)
if author_rows:
for a in author_rows:
add = True
# Check doi_id column value
if doi_id is not None:
if int(doi_id) != int(a[0]):
add = False
# Check corresponding column value
if corresponding and add is True:
author_type_cde = a[4]
dual_corr_author_ind = a[5]
is_corr = self.is_corresponding_author(author_type_cde, dual_corr_author_ind)
if corresponding is True:
# If not a corresponding author, drop it
if is_corr is not True:
add = False
elif corresponding is False:
# If is a corresponding author, drop it
if is_corr is True:
add = False
# Finish up, add the author if we should
if add is True:
authors.append(a)
if len(authors) <= 0:
authors = None
return (column_headings, authors)
def is_corresponding_author(self, author_type_cde, dual_corr_author_ind):
"""
Logic for checking whether an author row is for
a corresponding author. Can be either "Corresponding Author"
or "dual_corr_author_ind" column is 1
"""
is_corr = None
if author_type_cde == "Corresponding Author" or dual_corr_author_ind == "1":
is_corr = True
else:
is_corr = False
return is_corr
def parse_editor_file(self, document, filename=None):
"""
Given a filename to an author file, download
or copy it using the filesystem provider,
then parse it
"""
if self.fs is None:
self.fs = self.get_fs()
# Save the document to the tmp_dir
self.fs.write_document_to_tmp_dir(document, filename)
(column_headings, editor_rows) = self.parse_editor_data(self.fs.document)
return (column_headings, editor_rows)
def parse_editor_data(self, document):
"""
Given editor data - CSV with header rows - parse
it and return an object representation
"""
column_headings = None
editor_rows = []
f = self.fs.open_file_from_tmp_dir(self.fs.document, mode='rb')
filereader = csv.reader(f)
for row in filereader:
# For now throw out header rows
if filereader.line_num <= 3:
pass
elif filereader.line_num == 4:
# Column headers
column_headings = row
else:
editor_rows.append(row)
return (column_headings, editor_rows)
def get_editors(self, doi_id=None, document=None):
"""
Get a list of editors for an article
If doi_id is None, return all editors
If document is None, find the most recent editors file
"""
editors = []
# Check for the document
if document is None:
# No document? Find it on S3, save the content to
# the tmp_dir
if self.fs is None:
self.fs = self.get_fs()
s3_key_name = self.find_latest_s3_file_name(file_type="editor")
s3_key = self.get_s3key(s3_key_name)
contents = s3_key.get_contents_as_string()
self.fs.write_content_to_document(contents, self.editor_default_filename)
document = self.fs.get_document
# Parse the file
filename = self.editor_default_filename
(column_headings, editor_rows) = self.parse_editor_file(document, filename)
if editor_rows:
for a in editor_rows:
add = True
# Check doi_id column value
if doi_id is not None:
if int(doi_id) != int(a[0]):
add = False
# Finish up, add the author if we should
if add is True:
editors.append(a)
if len(editors) <= 0:
editors = None
return (column_headings, editors)
def find_latest_s3_file_name(self, file_type, file_list=None):
"""
Given the file_type, find the name of the S3 key for the object
that is the latest file in the S3 bucket
file_type options: author, editor
Optional: for running tests, provide a file_list without connecting to S3
"""
s3_key_name = None
# For each file_type, specify a unique file name fragment to filter on
# with regular expression search
fn_fragment = {}
fn_fragment["author"] = "ejp_query_tool_query_id_152_15a"
fn_fragment["editor"] = "ejp_query_tool_query_id_158_15b"
fn_fragment["poa_manuscript"] = "ejp_query_tool_query_id_176_POA_Manuscript"
fn_fragment["poa_author"] = "ejp_query_tool_query_id_177_POA_Author"
fn_fragment["poa_license"] = "ejp_query_tool_query_id_178_POA_License"
fn_fragment["poa_subject_area"] = "ejp_query_tool_query_id_179_POA_Subject_Area"
fn_fragment["poa_received"] = "ejp_query_tool_query_id_180_POA_Received"
fn_fragment["poa_research_organism"] = "ejp_query_tool_query_id_182_POA_Research_Organism"
fn_fragment["poa_abstract"] = "ejp_query_tool_query_id_196_POA_Abstract"
fn_fragment["poa_title"] = "ejp_query_tool_query_id_191_POA_Title"
fn_fragment["poa_keywords"] = "ejp_query_tool_query_id_226_POA_Keywords"
fn_fragment["poa_group_authors"] = "ejp_query_tool_query_id_242_POA_Group_Authors"
fn_fragment["poa_datasets"] = "ejp_query_tool_query_id_199_POA_Datasets"
fn_fragment["poa_funding"] = "ejp_query_tool_query_id_345_POA_Funding"
fn_fragment["poa_ethics"] = "ejp_query_tool_query_id_198_POA_Ethics"
if file_list is None:
file_list = self.ejp_bucket_file_list()
if file_list:
good_file_list = []
pattern = fn_fragment[file_type]
# First copy all the good file names over
for s3_file in file_list:
if re.search(pattern, s3_file["name"]) is not None:
good_file_list.append(s3_file)
# Second, sort by last_updated_timestamp
s = sorted(good_file_list, key=itemgetter('last_modified_timestamp'), reverse=True)
if len(s) > 0:
# We still have a list, take the name of the first one
s3_key_name = s[0]["name"]
return s3_key_name
def ejp_bucket_file_list(self):
"""
Connect to the EJP bucket, as specified in the settings,
use boto to list all keys in the root of the bucket,
extract interesting values and collapse into JSON
so we can test it later
"""
bucket = self.get_bucket(self.settings.ejp_bucket)
# List bucket contents
(keys, folders) = self.get_keys_and_folders(bucket)
attr_list = ['name', 'last_modified']
file_list = []
for key in keys:
item_attrs = {}
for attr_name in attr_list:
raw_value = eval("key." + attr_name)
if raw_value:
string_value = str(raw_value)
item_attrs[attr_name] = string_value
try:
if item_attrs['last_modified']:
# Parse last_modified into a timestamp for easy computations
date_format = "%Y-%m-%dT%H:%M:%S.000Z"
date_str = time.strptime(item_attrs['last_modified'], date_format)
timestamp = calendar.timegm(date_str)
item_attrs['last_modified_timestamp'] = timestamp
except KeyError:
pass
# Finally, add to the file list
if len(item_attrs) > 0:
file_list.append(item_attrs)
if len(file_list) <= 0:
# Return None if no S3 keys were found
file_list = None
return file_list
def get_keys_and_folders(self, bucket, prefix=None, delimiter='/', headers=None):
# Get "keys" and "folders" from the bucket, with optional
# prefix for the "folder" of interest
# default delimiter is '/'
if bucket is None:
return None
folders = []
keys = []
bucketList = bucket.list(prefix=prefix, delimiter=delimiter, headers=headers)
for item in bucketList:
if isinstance(item, boto.s3.prefix.Prefix):
# Can loop through each prefix and search for objects
folders.append(item)
#print 'Prefix: ' + item.name
elif isinstance(item, boto.s3.key.Key):
keys.append(item)
#print 'Key: ' + item.name
return keys, folders
def get_fs(self):
"""
For running tests, return the filesystem provider
so it can be interrogated
"""
if self.fs is None:
# Create the filesystem provider
self.fs = fslib.Filesystem(self.get_tmp_dir())
return self.fs
def get_tmp_dir(self):
"""
Get the temporary file directory, but if not set
then make the directory
"""
if self.tmp_dir:
return self.tmp_dir
else:
self.tmp_dir = self.tmp_dir_default
return self.tmp_dir
def decode_cp1252(self, str):
"""
CSV files look to be in CP-1252 encoding (Western Europe)
Decoding to ASCII is normally fine, except when it gets an O umlaut, for example
In this case, values must be decoded from cp1252 in order to be added as unicode
to the final XML output.
This function helps do that in selected places, like on author surnames
"""
try:
# See if it is not safe to encode to ascii first
junk = str.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
# Wrap the decode in another exception to make sure this never fails
try:
str = str.decode('cp1252')
except:
pass
return str
|
|
from abc import ABC, abstractmethod
from typing import List, Optional, Pattern, Dict
from datetime import datetime
import regex
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import ExtractResult
from .constants import Constants, TimeTypeConstants
from .extractors import DateTimeExtractor
from .parsers import DateTimeParser, DateTimeParseResult
from .utilities import Token, merge_all_tokens, DateTimeResolutionResult
from .base_duration import BaseDurationParser
from .base_timeperiod import BaseTimePeriodParser
from .base_time import BaseTimeParser
from .base_date import BaseDateParser
from .base_datetime import BaseDateTimeParser, MatchedTimex
from .base_dateperiod import BaseDatePeriodParser
from .base_datetimeperiod import BaseDateTimePeriodParser
class SetExtractorConfiguration(ABC):
@property
@abstractmethod
def last_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def each_prefix_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def periodic_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def each_unit_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def each_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def before_each_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def set_week_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def set_each_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def duration_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_time_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
class BaseSetExtractor(DateTimeExtractor):
@property
def extractor_type_name(self) -> str:
return Constants.SYS_DATETIME_SET
def __init__(self, config: SetExtractorConfiguration):
self.config = config
def extract(self, source: str, reference: datetime = None) -> List[ExtractResult]:
if reference is None:
reference = datetime.now()
tokens: List[Token] = list()
tokens.extend(self.match_each_unit(source))
tokens.extend(self.match_periodic(source))
tokens.extend(self.match_each_duration(source, reference))
tokens.extend(self.time_everyday(source, reference))
tokens.extend(self.match_each(self.config.date_extractor, source, reference))
tokens.extend(self.match_each(self.config.time_extractor, source, reference))
tokens.extend(self.match_each(self.config.date_time_extractor, source, reference))
tokens.extend(self.match_each(self.config.date_period_extractor, source, reference))
tokens.extend(self.match_each(self.config.time_period_extractor, source, reference))
tokens.extend(self.match_each(self.config.date_time_period_extractor, source, reference))
result = merge_all_tokens(tokens, source, self.extractor_type_name)
return result
def match_each_unit(self, source: str) -> List[Token]:
for match in regex.finditer(self.config.each_unit_regex, source):
yield Token(match.start(), match.end())
def match_periodic(self, source: str) -> List[Token]:
for match in regex.finditer(self.config.periodic_regex, source):
yield Token(match.start(), match.end())
def match_each_duration(self, source: str, reference: datetime) -> List[Token]:
for extract_result in self.config.duration_extractor.extract(source, reference):
if regex.search(self.config.last_regex, extract_result.text):
continue
before_str = source[0:extract_result.start]
match = regex.search(self.config.each_prefix_regex, before_str)
if match:
yield Token(match.start(), extract_result.start + extract_result.length)
def time_everyday(self, source: str, reference: datetime) -> List[Token]:
for extract_result in self.config.time_extractor.extract(source, reference):
after_str = source[extract_result.start + extract_result.length:]
if not after_str and self.config.before_each_day_regex is not None:
before_str = source[0:extract_result.start]
before_match = regex.search(self.config.before_each_day_regex, before_str)
if before_match:
yield Token(before_match.start(), extract_result.start + extract_result.length)
else:
after_match = regex.search(self.config.each_day_regex, after_str)
if after_match:
yield Token(
extract_result.start,
extract_result.start + extract_result.length + len(after_match.group()))
def match_each(self, extractor: DateTimeExtractor, source: str, reference: datetime) -> List[Token]:
for match in regex.finditer(self.config.set_each_regex, source):
trimmed_source = source[0:match.start()] + source[match.end():]
for extract_result in extractor.extract(trimmed_source, reference):
if (extract_result.start <= match.start()
and extract_result.start + extract_result.length > match.start()):
yield Token(extract_result.start, extract_result.start + extract_result.length + len(match.group()))
for match in regex.finditer(self.config.set_week_day_regex, source):
trimmed_source = source[0:match.start()] + RegExpUtility.get_group(match, 'weekday') + source[match.end():]
for extract_result in extractor.extract(trimmed_source, reference):
if extract_result.start <= match.start() and RegExpUtility.get_group(match, 'weekday') in extract_result.text:
length = extract_result.length + 1
prefix = RegExpUtility.get_group(match, 'prefix')
if prefix:
length += len(prefix)
yield Token(extract_result.start, extract_result.start + length)
class SetParserConfiguration:
@property
@abstractmethod
def duration_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def duration_parser(self) -> BaseDurationParser:
raise NotImplementedError
@property
@abstractmethod
def time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_parser(self) -> BaseTimeParser:
raise NotImplementedError
@property
@abstractmethod
def date_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_parser(self) -> BaseDateParser:
raise NotImplementedError
@property
@abstractmethod
def date_time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_time_parser(self) -> BaseDateTimeParser:
raise NotImplementedError
@property
@abstractmethod
def date_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_period_parser(self) -> BaseDatePeriodParser:
raise NotImplementedError
@property
@abstractmethod
def time_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_period_parser(self) -> BaseTimePeriodParser:
raise NotImplementedError
@property
@abstractmethod
def date_time_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_time_period_parser(self) -> BaseDateTimePeriodParser:
raise NotImplementedError
@property
@abstractmethod
def unit_map(self) -> Dict[str, str]:
raise NotImplementedError
@property
@abstractmethod
def each_prefix_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def periodic_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def each_unit_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def each_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def set_week_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def set_each_regex(self) -> Pattern:
raise NotImplementedError
@abstractmethod
def get_matched_daily_timex(self, text: str) -> MatchedTimex:
raise NotImplementedError
@abstractmethod
def get_matched_unit_timex(self, text: str) -> MatchedTimex:
raise NotImplementedError
class BaseSetParser(DateTimeParser):
@property
def parser_type_name(self) -> str:
return Constants.SYS_DATETIME_SET
def __init__(self, config: SetParserConfiguration):
self.config = config
def parse(self, source: ExtractResult, reference: datetime = None) -> Optional[DateTimeParseResult]:
if reference is None:
reference = datetime.now()
result = DateTimeParseResult(source)
if source.type is self.parser_type_name:
inner_result = self.parse_each_unit(source.text)
if not inner_result.success:
inner_result = self.parse_each_duration(source.text, reference)
if not inner_result.success:
inner_result = self.parser_time_everyday(source.text, reference)
# NOTE: Please do not change the order of following function
# datetimeperiod>dateperiod>timeperiod>datetime>date>time
if not inner_result.success:
inner_result = self.parse_each(self.config.date_time_period_extractor,
self.config.date_time_period_parser, source.text, reference)
if not inner_result.success:
inner_result = self.parse_each(self.config.date_period_extractor,
self.config.date_period_parser, source.text, reference)
if not inner_result.success:
inner_result = self.parse_each(self.config.time_period_extractor,
self.config.time_period_parser, source.text, reference)
if not inner_result.success:
inner_result = self.parse_each(self.config.date_time_extractor,
self.config.date_time_parser, source.text, reference)
if not inner_result.success:
inner_result = self.parse_each(self.config.date_extractor,
self.config.date_parser, source.text, reference)
if not inner_result.success:
inner_result = self.parse_each(self.config.time_extractor,
self.config.time_parser, source.text, reference)
if inner_result.success:
inner_result.future_resolution[TimeTypeConstants.SET] = inner_result.future_value
inner_result.past_resolution[TimeTypeConstants.SET] = inner_result.past_value
result.value = inner_result
result.timex_str = inner_result.timex if inner_result is not None else ''
result.resolution_str = ''
return result
def parse_each_unit(self, source: str) -> DateTimeResolutionResult:
result = DateTimeResolutionResult()
# handle "daily", "weekly"
match = regex.match(self.config.periodic_regex, source)
if match:
get_matched_daily_timex = self.config.get_matched_daily_timex(source)
if not get_matched_daily_timex.matched:
return result
result.timex = get_matched_daily_timex.timex
result.future_value = result.past_value = 'Set: ' + result.timex
result.success = True
# handle "each month"
match = regex.match(self.config.each_unit_regex, source)
if match and len(match.group()) == len(source):
source_unit = RegExpUtility.get_group(match, 'unit')
if source_unit and source_unit in self.config.unit_map:
get_matched_unit_timex = self.config.get_matched_unit_timex(source_unit)
if not get_matched_unit_timex.matched:
return result
if RegExpUtility.get_group(match, 'other'):
get_matched_unit_timex = MatchedTimex(matched=get_matched_unit_timex.matched, timex=get_matched_unit_timex.timex.replace('1', '2'))
result.timex = get_matched_unit_timex.timex
result.future_value = result.past_value = 'Set: ' + result.timex
result.success = True
return result
def parse_each_duration(self, source: str, reference: datetime) -> DateTimeResolutionResult:
result = DateTimeResolutionResult()
ers = self.config.duration_extractor.extract(source, reference)
if len(ers) != 1 or source[ers[0].start+ers[0].length:]:
return result
before_str = source[0:ers[0].start]
matches = regex.match(self.config.each_prefix_regex, before_str)
if matches:
pr = self.config.duration_parser.parse(ers[0], datetime.now())
result.timex = pr.timex_str
result.future_value = result.past_value = 'Set: ' + pr.timex_str
result.success = True
return result
def parser_time_everyday(self, source: str, reference: datetime) -> DateTimeResolutionResult:
result = DateTimeResolutionResult()
ers = self.config.time_extractor.extract(source, reference)
if len(ers) != 1:
return result
after_str = source.replace(ers[0].text, '')
matches = regex.match(self.config.each_day_regex, after_str)
if matches:
pr = self.config.time_parser.parse(ers[0], datetime.now())
result.timex = pr.timex_str
result.future_value = result.past_value = 'Set: ' + result.timex
result.success = True
return result
def parse_each(self, extractor: DateTimeExtractor, parser: DateTimeParser,
source: str, reference: datetime) -> DateTimeResolutionResult:
result = DateTimeResolutionResult()
success = False
er: List[ExtractResult] = list()
match = regex.search(self.config.set_each_regex, source)
if match:
trimmed_text = source[0:match.start()] + source[match.end():]
er = extractor.extract(trimmed_text, reference)
if(len(er) == 1 and er[0].length == len(trimmed_text)):
success = True
match = regex.search(self.config.set_week_day_regex, source)
if match:
trimmed_text = source[0:match.start()] + RegExpUtility.get_group(match, 'weekday') + source[match.end():]
er = extractor.extract(trimmed_text, reference)
if len(er) == 1 and er[0].length == len(trimmed_text):
success = True
if success:
pr = parser.parse(er[0])
result.timex = pr.timex_str
result.future_value = 'Set: ' + pr.timex_str
result.past_value = 'Set: ' + pr.timex_str
result.success = True
return result
|
|
#!/usr/bin/python
#
# offwaketime Summarize blocked time by kernel off-CPU stack + waker stack
# For Linux, uses BCC, eBPF.
#
# USAGE: offwaketime [-h] [-p PID | -u | -k] [-U | -K] [-f] [duration]
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 20-Jan-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from time import sleep
import argparse
import signal
import errno
from sys import stderr
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
def stack_id_err(stack_id):
# -EFAULT in get_stackid normally means the stack-trace is not availible,
# Such as getting kernel stack trace in userspace code
return (stack_id < 0) and (stack_id != -errno.EFAULT)
# arguments
examples = """examples:
./offwaketime # trace off-CPU + waker stack time until Ctrl-C
./offwaketime 5 # trace for 5 seconds only
./offwaketime -f 5 # 5 seconds, and output in folded format
./offwaketime -m 1000 # trace only events that last more than 1000 usec
./offwaketime -M 9000 # trace only events that last less than 9000 usec
./offwaketime -p 185 # only trace threads for PID 185
./offwaketime -t 188 # only trace thread 188
./offwaketime -u # only trace user threads (no kernel)
./offwaketime -k # only trace kernel threads (no user)
./offwaketime -U # only show user space stacks (no kernel)
./offwaketime -K # only show kernel space stacks (no user)
"""
parser = argparse.ArgumentParser(
description="Summarize blocked time by kernel stack trace + waker stack",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
thread_group = parser.add_mutually_exclusive_group()
# Note: this script provides --pid and --tid flags but their arguments are
# referred to internally using kernel nomenclature: TGID and PID.
thread_group.add_argument("-p", "--pid", metavar="PID", dest="tgid",
help="trace this PID only", type=positive_int)
thread_group.add_argument("-t", "--tid", metavar="TID", dest="pid",
help="trace this TID only", type=positive_int)
thread_group.add_argument("-u", "--user-threads-only", action="store_true",
help="user threads only (no kernel threads)")
thread_group.add_argument("-k", "--kernel-threads-only", action="store_true",
help="kernel threads only (no user threads)")
stack_group = parser.add_mutually_exclusive_group()
stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
help="show stacks from user space only (no kernel space stacks)")
stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
help="show stacks from kernel space only (no user space stacks)")
parser.add_argument("-d", "--delimited", action="store_true",
help="insert delimiter between kernel/user stacks")
parser.add_argument("-f", "--folded", action="store_true",
help="output folded format")
parser.add_argument("--stack-storage-size", default=1024,
type=positive_nonzero_int,
help="the number of unique stack traces that can be stored and "
"displayed (default 1024)")
parser.add_argument("duration", nargs="?", default=99999999,
type=positive_nonzero_int,
help="duration of trace, in seconds")
parser.add_argument("-m", "--min-block-time", default=1,
type=positive_nonzero_int,
help="the amount of time in microseconds over which we " +
"store traces (default 1)")
parser.add_argument("-M", "--max-block-time", default=(1 << 64) - 1,
type=positive_nonzero_int,
help="the amount of time in microseconds under which we " +
"store traces (default U64_MAX)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
folded = args.folded
duration = int(args.duration)
# signal handler
def signal_ignore(signal, frame):
print()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#define MINBLOCK_US MINBLOCK_US_VALUEULL
#define MAXBLOCK_US MAXBLOCK_US_VALUEULL
struct key_t {
char waker[TASK_COMM_LEN];
char target[TASK_COMM_LEN];
int w_k_stack_id;
int w_u_stack_id;
int t_k_stack_id;
int t_u_stack_id;
u32 t_pid;
u32 t_tgid;
u32 w_pid;
u32 w_tgid;
};
BPF_HASH(counts, struct key_t);
// Key of this hash is PID of waiting Process,
// value is timestamp when it went into waiting
BPF_HASH(start, u32);
struct wokeby_t {
char name[TASK_COMM_LEN];
int k_stack_id;
int u_stack_id;
int w_pid;
int w_tgid;
};
// Key of the hash is PID of the Process to be waken, value is information
// of the Process who wakes it
BPF_HASH(wokeby, u32, struct wokeby_t);
BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE);
int waker(struct pt_regs *ctx, struct task_struct *p) {
// PID and TGID of the target Process to be waken
u32 pid = p->pid;
u32 tgid = p->tgid;
if (!(THREAD_FILTER)) {
return 0;
}
// Construct information about current (the waker) Process
struct wokeby_t woke = {};
bpf_get_current_comm(&woke.name, sizeof(woke.name));
woke.k_stack_id = KERNEL_STACK_GET;
woke.u_stack_id = USER_STACK_GET;
woke.w_pid = bpf_get_current_pid_tgid();
woke.w_tgid = bpf_get_current_pid_tgid() >> 32;
wokeby.update(&pid, &woke);
return 0;
}
int oncpu(struct pt_regs *ctx, struct task_struct *p) {
// PID and TGID of the previous Process (Process going into waiting)
u32 pid = p->pid;
u32 tgid = p->tgid;
u64 *tsp;
u64 ts = bpf_ktime_get_ns();
// Record timestamp for the previous Process (Process going into waiting)
if (THREAD_FILTER) {
start.update(&pid, &ts);
}
// Calculate current Process's wait time by finding the timestamp of when
// it went into waiting.
// pid and tgid are now the PID and TGID of the current (waking) Process.
pid = bpf_get_current_pid_tgid();
tgid = bpf_get_current_pid_tgid() >> 32;
tsp = start.lookup(&pid);
if (tsp == 0) {
// Missed or filtered when the Process went into waiting
return 0;
}
u64 delta = ts - *tsp;
start.delete(&pid);
delta = delta / 1000;
if ((delta < MINBLOCK_US) || (delta > MAXBLOCK_US)) {
return 0;
}
// create map key
struct key_t key = {};
struct wokeby_t *woke;
bpf_get_current_comm(&key.target, sizeof(key.target));
key.t_pid = pid;
key.t_tgid = tgid;
key.t_k_stack_id = KERNEL_STACK_GET;
key.t_u_stack_id = USER_STACK_GET;
woke = wokeby.lookup(&pid);
if (woke) {
key.w_k_stack_id = woke->k_stack_id;
key.w_u_stack_id = woke->u_stack_id;
key.w_pid = woke->w_pid;
key.w_tgid = woke->w_tgid;
__builtin_memcpy(&key.waker, woke->name, TASK_COMM_LEN);
wokeby.delete(&pid);
}
counts.increment(key, delta);
return 0;
}
"""
# set thread filter
thread_context = ""
if args.tgid is not None:
thread_context = "PID %d" % args.tgid
thread_filter = 'tgid == %d' % args.tgid
elif args.pid is not None:
thread_context = "TID %d" % args.pid
thread_filter = 'pid == %d' % args.pid
elif args.user_threads_only:
thread_context = "user threads"
thread_filter = '!(p->flags & PF_KTHREAD)'
elif args.kernel_threads_only:
thread_context = "kernel threads"
thread_filter = 'p->flags & PF_KTHREAD'
else:
thread_context = "all threads"
thread_filter = '1'
bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
# set stack storage size
bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
bpf_text = bpf_text.replace('MINBLOCK_US_VALUE', str(args.min_block_time))
bpf_text = bpf_text.replace('MAXBLOCK_US_VALUE', str(args.max_block_time))
# handle stack args
kernel_stack_get = "stack_traces.get_stackid(ctx, 0)"
user_stack_get = "stack_traces.get_stackid(ctx, BPF_F_USER_STACK)"
stack_context = ""
if args.user_stacks_only:
stack_context = "user"
kernel_stack_get = "-1"
elif args.kernel_stacks_only:
stack_context = "kernel"
user_stack_get = "-1"
else:
stack_context = "user + kernel"
bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
if args.ebpf:
print(bpf_text)
exit()
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
b.attach_kprobe(event="try_to_wake_up", fn_name="waker")
matched = b.num_open_kprobes()
if matched == 0:
print("0 functions traced. Exiting.")
exit()
# header
if not folded:
print("Tracing blocked time (us) by %s off-CPU and waker stack" %
stack_context, end="")
if duration < 99999999:
print(" for %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take many seconds, trap Ctrl-C:
# print a newline for folded output on Ctrl-C
signal.signal(signal.SIGINT, signal_ignore)
if not folded:
print()
missing_stacks = 0
has_enomem = False
counts = b.get_table("counts")
stack_traces = b.get_table("stack_traces")
need_delimiter = args.delimited and not (args.kernel_stacks_only or
args.user_stacks_only)
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
# handle get_stackid errors
if not args.user_stacks_only:
missing_stacks += int(stack_id_err(k.w_k_stack_id))
missing_stacks += int(stack_id_err(k.t_k_stack_id))
has_enomem = has_enomem or (k.w_k_stack_id == -errno.ENOMEM) or \
(k.t_k_stack_id == -errno.ENOMEM)
if not args.kernel_stacks_only:
missing_stacks += int(stack_id_err(k.w_u_stack_id))
missing_stacks += int(stack_id_err(k.t_u_stack_id))
has_enomem = has_enomem or (k.w_u_stack_id == -errno.ENOMEM) or \
(k.t_u_stack_id == -errno.ENOMEM)
waker_user_stack = [] if k.w_u_stack_id < 1 else \
reversed(list(stack_traces.walk(k.w_u_stack_id))[1:])
waker_kernel_stack = [] if k.w_k_stack_id < 1 else \
reversed(list(stack_traces.walk(k.w_k_stack_id))[1:])
target_user_stack = [] if k.t_u_stack_id < 1 else \
stack_traces.walk(k.t_u_stack_id)
target_kernel_stack = [] if k.t_k_stack_id < 1 else \
stack_traces.walk(k.t_k_stack_id)
if folded:
# print folded stack output
line = [k.target.decode('utf-8', 'replace')]
if not args.kernel_stacks_only:
if stack_id_err(k.t_u_stack_id):
line.append("[Missed User Stack]")
else:
line.extend([b.sym(addr, k.t_tgid).decode('utf-8', 'replace')
for addr in reversed(list(target_user_stack)[1:])])
if not args.user_stacks_only:
line.extend(["-"] if (need_delimiter and k.t_k_stack_id > 0 and k.t_u_stack_id > 0) else [])
if stack_id_err(k.t_k_stack_id):
line.append("[Missed Kernel Stack]")
else:
line.extend([b.ksym(addr).decode('utf-8', 'replace')
for addr in reversed(list(target_kernel_stack)[1:])])
line.append("--")
if not args.user_stacks_only:
if stack_id_err(k.w_k_stack_id):
line.append("[Missed Kernel Stack]")
else:
line.extend([b.ksym(addr).decode('utf-8', 'replace')
for addr in reversed(list(waker_kernel_stack))])
if not args.kernel_stacks_only:
line.extend(["-"] if (need_delimiter and k.w_u_stack_id > 0 and k.w_k_stack_id > 0) else [])
if stack_id_err(k.w_u_stack_id):
line.append("[Missed User Stack]")
else:
line.extend([b.sym(addr, k.w_tgid).decode('utf-8', 'replace')
for addr in reversed(list(waker_user_stack))])
line.append(k.waker.decode('utf-8', 'replace'))
print("%s %d" % (";".join(line), v.value))
else:
# print wakeup name then stack in reverse order
print(" %-16s %s %s" % ("waker:", k.waker.decode('utf-8', 'replace'), k.t_pid))
if not args.kernel_stacks_only:
if stack_id_err(k.w_u_stack_id):
print(" [Missed User Stack]")
else:
for addr in waker_user_stack:
print(" %s" % b.sym(addr, k.w_tgid))
if not args.user_stacks_only:
if need_delimiter and k.w_u_stack_id > 0 and k.w_k_stack_id > 0:
print(" -")
if stack_id_err(k.w_k_stack_id):
print(" [Missed Kernel Stack]")
else:
for addr in waker_kernel_stack:
print(" %s" % b.ksym(addr))
# print waker/wakee delimiter
print(" %-16s %s" % ("--", "--"))
if not args.user_stacks_only:
if stack_id_err(k.t_k_stack_id):
print(" [Missed Kernel Stack]")
else:
for addr in target_kernel_stack:
print(" %s" % b.ksym(addr))
if not args.kernel_stacks_only:
if need_delimiter and k.t_u_stack_id > 0 and k.t_k_stack_id > 0:
print(" -")
if stack_id_err(k.t_u_stack_id):
print(" [Missed User Stack]")
else:
for addr in target_user_stack:
print(" %s" % b.sym(addr, k.t_tgid))
print(" %-16s %s %s" % ("target:", k.target.decode('utf-8', 'replace'), k.w_pid))
print(" %d\n" % v.value)
if missing_stacks > 0:
enomem_str = " Consider increasing --stack-storage-size."
print("WARNING: %d stack traces lost and could not be displayed.%s" %
(missing_stacks, (enomem_str if has_enomem else "")),
file=stderr)
|
|
import os.path
import time
import sys
import platform
import queue
from collections import namedtuple
from functools import partial
from electrum.i18n import _
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_EXPIRED
pr_icons = {
PR_UNPAID:":icons/unpaid.png",
PR_PAID:":icons/confirmed.png",
PR_EXPIRED:":icons/expired.png"
}
pr_tooltips = {
PR_UNPAID:_('Pending'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
expiration_values = [
(_('1 hour'), 60*60),
(_('1 day'), 24*60*60),
(_('1 week'), 7*24*60*60),
(_('Never'), None)
]
class Timer(QThread):
stopped = False
timer_signal = pyqtSignal()
def run(self):
while not self.stopped:
self.timer_signal.emit()
time.sleep(0.5)
def stop(self):
self.stopped = True
self.wait()
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
QMessageBox.information(self, 'Help', self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(20)
self.clicked.connect(self.onclick)
def onclick(self):
QMessageBox.information(self, 'Help', self.help_text)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed
if isinstance(child, classes) and child.isVisible():
return self.top_level_window_recurse(child)
return window
def top_level_window(self):
return self.top_level_window_recurse()
def question(self, msg, parent=None, title=None, icon=None):
Yes, No = QMessageBox.Yes, QMessageBox.No
return self.msg_box(icon or QMessageBox.Question,
parent, title or '',
msg, buttons=Yes|No, defaultButton=No) == Yes
def show_warning(self, msg, parent=None, title=None):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg)
def show_error(self, msg, parent=None):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg)
def show_critical(self, msg, parent=None, title=None):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg)
def show_message(self, msg, parent=None, title=None):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg)
def msg_box(self, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton):
parent = parent or self.top_level_window()
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst runnning a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent, message, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
vbox = QVBoxLayout(self)
vbox.addWidget(QLabel(message))
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(parent, title, label, ok_label, default=None):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = ScanQRTextEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p, __ = QFileDialog.getSaveFileName(None, select_msg, text, _filter)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def createEditor(self, parent, option, index):
return self.parent().createEditor(parent, option, index)
class MyTreeWidget(QTreeWidget):
def __init__(self, parent, create_menu, headers, stretch_column=None,
editable_columns=None):
QTreeWidget.__init__(self, parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# extend the syntax for consistency
self.addChild = self.addTopLevelItem
self.insertChild = self.insertTopLevelItem
# Control which columns are editable
self.editor = None
self.pending_update = False
if editable_columns is None:
editable_columns = [stretch_column]
self.editable_columns = editable_columns
self.setItemDelegate(ElectrumItemDelegate(self))
self.itemDoubleClicked.connect(self.on_doubleclick)
self.update_headers(headers)
self.current_filter = ""
def update_headers(self, headers):
self.setColumnCount(len(headers))
self.setHeaderLabels(headers)
self.header().setStretchLastSection(False)
for col in range(len(headers)):
sm = QHeaderView.Stretch if col == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col, sm)
def editItem(self, item, column):
if column in self.editable_columns:
self.editing_itemcol = (item, column, item.text(column))
# Calling setFlags causes on_changed events for some reason
item.setFlags(item.flags() | Qt.ItemIsEditable)
QTreeWidget.editItem(self, item, column)
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ] and self.editor is None:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def permit_edit(self, item, column):
return (column in self.editable_columns
and self.on_permit_edit(item, column))
def on_permit_edit(self, item, column):
return True
def on_doubleclick(self, item, column):
if self.permit_edit(item, column):
self.editItem(item, column)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def createEditor(self, parent, option, index):
self.editor = QStyledItemDelegate.createEditor(self.itemDelegate(),
parent, option, index)
self.editor.editingFinished.connect(self.editing_finished)
return self.editor
def editing_finished(self):
# Long-time QT bug - pressing Enter to finish editing signals
# editingFinished twice. If the item changed the sequence is
# Enter key: editingFinished, on_change, editingFinished
# Mouse: on_change, editingFinished
# This mess is the cleanest way to ensure we make the
# on_edited callback with the updated item
if self.editor:
(item, column, prior_text) = self.editing_itemcol
if self.editor.text() == prior_text:
self.editor = None # Unchanged - ignore any 2nd call
elif item.text(column) == prior_text:
pass # Buggy first call on Enter key, item not yet updated
else:
# What we want - the updated item
self.on_edited(*self.editing_itemcol)
self.editor = None
# Now do any pending updates
if self.editor is None and self.pending_update:
self.pending_update = False
self.on_update()
def on_edited(self, item, column, prior):
'''Called only when the text actually changes'''
key = item.data(0, Qt.UserRole)
text = item.text(column)
self.parent.wallet.set_label(key, text)
self.parent.history_list.update_labels()
self.parent.update_completions()
def update(self):
# Defer updates if editing
if self.editor:
self.pending_update = True
else:
self.setUpdatesEnabled(False)
self.on_update()
self.setUpdatesEnabled(True)
if self.current_filter:
self.filter(self.current_filter)
def on_update(self):
pass
def get_leaves(self, root):
child_count = root.childCount()
if child_count == 0:
yield root
for i in range(child_count):
item = root.child(i)
for x in self.get_leaves(item):
yield x
def filter(self, p):
columns = self.__class__.filter_columns
p = p.lower()
self.current_filter = p
for item in self.get_leaves(self.invisibleRootItem()):
item.setHidden(all([item.text(column).lower().find(p) == -1
for column in columns]))
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = []
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(QIcon(icon_name))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton(":icons/copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
Task = namedtuple("Task", "task cb_success cb_done cb_error")
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get()
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb:
cb(result)
def stop(self):
self.tasks.put(None)
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget):
if ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
|
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import sys
import os.path
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
StringIO, string_types, iteritems
try:
import ctags
except ImportError:
ctags = None
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): u'&',
ord('<'): u'<',
ord('>'): u'>',
ord('"'): u'"',
ord("'"): u''',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`anchorlines` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
.. versionadded:: 0.9
If you select the ``'table'`` line numbers, the wrapping table will
have a CSS class of this string plus ``'table'``, the default is
accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
.. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file.
.. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
.. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``).
.. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks.
.. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines.
.. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript.
.. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
.. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
.. versionadded:: 1.6
**Subclassing the HTML formatter**
.. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = options.get('anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: #%s; ' % ndef['color']
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: #%s; ' % ndef['bgcolor']
if ndef['border']:
style += 'border: 1px solid #%s; ' % ndef['border']
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title = self.title,
cssfile = self.cssfile,
encoding = self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title = self.title,
styledefs = self.get_style_defs('body'),
encoding = self.encoding))
for t, line in inner:
yield t, line
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
if sp:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if i % sp == 0:
if aln:
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
(la, i, mw, i))
else:
lines.append('<span class="special">%*d</span>' % (mw, i))
else:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
else:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
if nocls:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td><div class="linenodiv" '
'style="background-color: #f0f0f0; padding-right: 10px">'
'<pre style="line-height: 125%">' +
ls + '</pre></div></td><td class="code">')
else:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(lines) + num - 1))
if self.noclasses:
if sp:
for t, line in lines:
if num%sp == 0:
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
else:
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
yield 1, '<span style="%s">%*s </span>' % (
style, mw, (num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, ('<span style="background-color: #f0f0f0; '
'padding: 0 5px 0 5px">%*s </span>' % (
mw, (num%st and ' ' or num)) + line)
num += 1
elif sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s </span>' % (
num%sp == 0 and ' special' or '', mw,
(num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s </span>' % (
mw, (num%st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
i = self.linenostart - 1 # subtract 1 since we have to increment i
# *before* yielding
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
+ (style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield tup
yield 0, '</pre>'
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = ''
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_class(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line += (lspan and '</span>') + cspan + part + \
(cspan and '</span>') + lsep
else: # both are the same
line += part + (lspan and '</span>') + lsep
yield 1, line
line = ''
elif part:
yield 1, cspan + part + (cspan and '</span>') + lsep
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line += (lspan and '</span>') + cspan + parts[-1]
lspan = cspan
else:
line += parts[-1]
elif parts[-1]:
line = cspan + parts[-1]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
yield 1, line + (lspan and '</span>') + lsep
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token, 0):
return entry['file'], entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
|
|
import os.path
import shutil
import sys
import tarfile
import tempfile
import traceback
import urllib2
import webbrowser
try:
import simplejson as json
except ImportError:
import ioJSON
import pprint
import StringIO
from ConfigParser import SafeConfigParser
from argparse import ArgumentParser
from subprocess import call, check_call, STDOUT
import fnmatch
from ordereddict import OrderedDict
from setuptools import find_packages
from pkg_resources import WorkingSet, Requirement, resource_stream
from openmdao.main.factorymanager import get_available_types, plugin_groups
from openmdao.util.fileutil import build_directory, find_files, \
get_ancestor_dir, find_module
from openmdao.util.dep import PythonSourceTreeAnalyser
from openmdao.util.dumpdistmeta import get_metadata
from openmdao.util.git import download_github_tar
from openmdao.util.view_docs import view_docs
# from sphinx.setup_command import BuildDoc
import sphinx
def _load_templates():
''' Reads templates from files in the plugin_templates directory.
conf.py:
This is the template for the file that Sphinx uses to configure itself.
It's intended to match the conf.py for the OpenMDAO docs, so if those
change, this may need to be updated.
index.rst
Template for the top level file in the Sphinx docs for the plugin.
usage.rst
Template for the file where the user may add specific usage documentation
for the plugin.
setup.py
Template for the file that packages and install the plugin using
setuptools.
MANIFEST.in
Template for the file that tells setuptools/distutils what extra data
files to include in the distribution for the plugin.
README.txt
Template for the README.txt file.
setup.cfg
Template for the setup configuration file, where all of the user
supplied metadata is located. This file may be hand edited by the
plugin developer.
'''
# There are a number of string templates that are used to produce various
# files within the plugin distribution. These templates are stored in the
# templates dict, with the key being the name of the file that the
# template corresponds to.
templates = {}
for item in ['index.rst', 'usage.rst', 'MANIFEST.in',
'README.txt', 'setup.cfg']:
infile = resource_stream(__name__,
os.path.join('plugin_templates', item))
templates[item] = infile.read()
infile.close()
infile = resource_stream(__name__,
os.path.join('plugin_templates', 'setup_py_template'))
templates['setup.py'] = infile.read()
infile.close()
infile = resource_stream(__name__,
os.path.join('plugin_templates', 'conf_py_template'))
templates['conf.py'] = infile.read()
infile.close()
# This dict contains string templates corresponding to skeleton python
# source files for each of the recognized plugin types.
# TODO: These should be updated to reflect best practices because most
# plugin developers will start with these when they create new plugins.
class_templates = {}
for item in ['openmdao.component', 'openmdao.driver', 'openmdao.variable',
'openmdao.surrogatemodel']:
infile = resource_stream(__name__,
os.path.join('plugin_templates', item))
class_templates[item] = infile.read()
infile.close()
infile = resource_stream(__name__,
os.path.join('plugin_templates', 'test_template'))
test_template = infile.read()
infile.close()
return templates, class_templates, test_template
def _get_srcdocs(destdir, name, srcdir='src'):
""" Return RST for source docs. """
startdir = os.getcwd()
srcdir = os.path.join(destdir, srcdir)
if os.path.exists(srcdir):
os.chdir(srcdir)
try:
srcmods = _get_src_modules('.',
dirpred=lambda d: not d.startswith('_') and d not in ['docs'])
finally:
os.chdir(startdir)
else:
srcmods = ["%s.%s" % (name, name)]
contents = [
"""
.. _%s_src_label:
====================
Source Documentation
====================
""" % name
]
for mod in sorted(srcmods):
pkgfile = '%s.py' % mod
pkg, dot, name = mod.rpartition('.')
pyfile = '%s.py' % name
underline = '-' * len(pyfile)
contents.append("""
.. index:: %s
.. _%s:
%s
%s
.. automodule:: %s
:members:
:undoc-members:
:show-inheritance:
""" % (pyfile, pkgfile, pyfile, underline, mod))
return ''.join(contents)
def _get_pkgdocs(cfg):
"""Return a string in reST format that contains the metadata
for the package.
cfg: ConfigParser
ConfigParser object used to read the setup.cfg file.
"""
lines = ['\n',
'================\n',
'Package Metadata\n',
'================\n',
'\n']
metadata = {}
if cfg.has_section('metadata'):
metadata.update(dict([item for item in cfg.items('metadata')]))
if cfg.has_section('openmdao'):
metadata.update(dict([item for item in cfg.items('openmdao')]))
tuplist = list(metadata.items())
tuplist.sort()
for key, value in tuplist:
if value.strip():
if '\n' in value:
lines.append("- **%s**:: \n\n" % key)
for v in [vv.strip() for vv in value.split('\n')]:
if v:
lines.append(" %s\n" % v)
lines.append('\n')
elif value != 'UNKNOWN':
lines.append("- **%s:** %s\n\n" % (key, value))
return ''.join(lines)
def _get_setup_options(distdir, metadata, srcdir='src'):
""" Return dictionary of setup options. """
# a set of names of variables that are supposed to be lists
lists = set([
'keywords',
'install_requires',
'packages',
'classifiers',
])
# mapping of new metadata names to old ones
mapping = {
'name': 'name',
'version': 'version',
'keywords': 'keywords',
'summary': 'description',
'description': 'long_description',
'home-page': 'url',
'download-url': 'download_url',
'author': 'author',
'author-email': 'author_email',
'maintainer': 'maintainer',
'maintainer-email': 'maintainer_email',
'license': 'license',
'classifier': 'classifiers',
'requires-dist': 'install_requires',
'entry_points': 'entry_points',
# 'py_modules': 'py_modules',
'packages': 'packages',
}
# populate the package data with sphinx docs
# we have to list all of the files because setuptools doesn't
# handle nested directories very well
pkgdir = os.path.abspath(os.path.join(distdir, srcdir, metadata['name']))
plen = len(pkgdir) + 1
sphinxdir = os.path.join(pkgdir, 'sphinx_build', 'html')
testdir = os.path.join(pkgdir, 'test')
pkglist = list(find_files(sphinxdir))
pkglist.extend(list(find_files(testdir, exclude="*.py[co]")))
pkglist = [p[plen:] for p in pkglist]
setup_options = {
# 'packages': [metadata['name']],
'package_data': {
metadata['name']: pkglist # [
# 'sphinx_build/html/*.*',
# 'sphinx_build/html/_modules/*',
# 'sphinx_build/html/_sources/*',
# 'sphinx_build/html/_static/*',
# ]
},
'package_dir': {'': srcdir},
'zip_safe': False,
'include_package_data': True,
}
for key, val in metadata.items():
if key in mapping:
if isinstance(val, basestring):
if mapping[key] in lists:
val = [p.strip() for p in val.split('\n') if p.strip()]
else:
val = val.strip()
setup_options[mapping[key]] = val
return setup_options
def _pretty(obj):
""" Return pretty-printed `obj`. """
sio = StringIO.StringIO()
pprint.pprint(obj, sio)
return sio.getvalue()
def _get_py_files(distdir, pred=None, dirpred=None):
if pred is None:
def pred(fname):
parts = fname.split(os.sep)
if parts[-1] in ['setup.py', '__init__.py'] or 'test' in parts:
return False
return fname.endswith('.py')
return list(find_files(distdir, match=pred, dirmatch=dirpred))
def _get_src_modules(topdir, pred=None, dirpred=None):
topdir = os.path.abspath(os.path.expandvars(os.path.expanduser(topdir)))
pyfiles = _get_py_files(topdir, pred, dirpred)
noexts = [os.path.splitext(f)[0] for f in pyfiles]
rel = [f[len(topdir) + 1:] for f in noexts]
return ['.'.join(f.split(os.sep)) for f in rel]
def _get_dirs(start):
dirs = []
for root, dirlist, filelist in os.walk(start):
newdlist = []
for d in dirlist:
if d.startswith('.') or d.endswith('.egg-info') or \
d in ['docs', 'build', 'dists', 'sphinx_build']:
continue
newdlist.append(d)
dirlist[:] = newdlist
dirs.extend([os.path.join(root[len(start) + 1:], d) for d in dirlist])
return dirs
def _get_template_options(distdir, cfg, **kwargs):
""" Return dictionary of options for template substitution. """
if cfg.has_section('metadata'):
metadata = dict([item for item in cfg.items('metadata')])
else:
metadata = {}
if cfg.has_section('openmdao'):
openmdao_metadata = dict([item for item in cfg.items('openmdao')])
else:
openmdao_metadata = {}
if 'static_path' not in openmdao_metadata:
openmdao_metadata['static_path'] = ''
if 'packages' in kwargs:
metadata['packages'] = kwargs['packages']
else:
metadata['packages'] = [metadata['name']]
setup_options = _get_setup_options(distdir, metadata,
srcdir=kwargs.get('srcdir', 'src'))
template_options = {
'copyright': '',
'summary': '',
'setup_options': _pretty(setup_options),
'add_to_sys_path': _get_dirs(distdir),
}
template_options.update(setup_options)
template_options.update(openmdao_metadata)
template_options.update(kwargs)
name = template_options['name']
version = template_options['version']
template_options.setdefault('release', version)
template_options.setdefault('title_marker',
'=' * (len(name) + len(' Documentation')))
return template_options
def plugin_quickstart(parser, options, args=None):
"""A command-line script (plugin quickstart) points to this. It generates a
directory structure for an openmdao plugin package along with Sphinx docs.
usage: plugin quickstart <dist_name> [-v <version>] [-d <dest_dir>] [-g <plugin_group>] [-c class_name]
"""
if args:
print_sub_help(parser, 'quickstart')
return -1
name = options.dist_name
if options.classname:
classname = options.classname
else:
classname = "%s%s" % ((name.upper())[0], name[1:])
version = options.version
options.dest = os.path.abspath(os.path.expandvars(os.path.expanduser(options.dest)))
if not options.group.startswith('openmdao.'):
options.group = 'openmdao.' + options.group
templates, class_templates, test_template = _load_templates()
startdir = os.getcwd()
try:
os.chdir(options.dest)
if os.path.exists(name):
raise OSError("Can't create directory '%s' because it already"
" exists." % os.path.join(options.dest, name))
cfg = SafeConfigParser(dict_type=OrderedDict)
stream = StringIO.StringIO(templates['setup.cfg'] % {'name': name,
'version': version})
cfg.readfp(stream, 'setup.cfg')
cfgcontents = StringIO.StringIO()
cfg.write(cfgcontents)
template_options = \
_get_template_options(os.path.join(options.dest, name),
cfg, classname=classname)
template_options['srcmod'] = name
dirstruct = {
name: {
'setup.py': templates['setup.py'] % template_options,
'setup.cfg': cfgcontents.getvalue(),
'MANIFEST.in': templates['MANIFEST.in'] % template_options,
'README.txt': templates['README.txt'] % template_options,
'src': {
name: {
'__init__.py': '', # 'from %s import %s\n' % (name,classname),
'%s.py' % name: class_templates[options.group] % template_options,
'test': {
'test_%s.py' % name: test_template % template_options,
'__init__.py': """ """
},
},
},
'docs': {
'conf.py': templates['conf.py'] % template_options,
'index.rst': templates['index.rst'] % template_options,
'srcdocs.rst': _get_srcdocs(options.dest, name),
'pkgdocs.rst': _get_pkgdocs(cfg),
'usage.rst': templates['usage.rst'] % template_options,
'_static': {},
},
},
}
build_directory(dirstruct)
finally:
os.chdir(startdir)
return 0
def _verify_dist_dir(dpath):
"""Try to make sure that the directory we've been pointed to actually
contains a distribution.
"""
if not os.path.isdir(dpath):
raise IOError("directory '%s' does not exist" % dpath)
expected = ['docs', 'setup.py', 'setup.cfg', 'MANIFEST.in',
os.path.join('docs', 'conf.py'),
os.path.join('docs', 'index.rst'),
os.path.join('docs', 'srcdocs.rst')]
for fname in expected:
if not os.path.exists(os.path.join(dpath, fname)):
raise IOError("directory '%s' does not contain '%s'" %
(dpath, fname))
_EXCL_SET = set(['test', 'docs', 'sphinx_build', '_downloads'])
def _exclude_funct(path):
return len(_EXCL_SET.intersection(path.split(os.sep))) > 0
#
# FIXME: this still needs some work, but for testing purposes it's ok for now
#
def find_all_plugins(searchdir):
"""Return a dict containing lists of each plugin type found, keyed by
plugin group name, e.g., openmdao.component, openmdao.variable, etc.
"""
dct = {}
psta = PythonSourceTreeAnalyser(searchdir, exclude=_exclude_funct)
for key, lst in plugin_groups.items():
epset = set(psta.find_inheritors(lst[0]))
if epset:
dct[key] = epset
return dct
def _get_entry_points(startdir):
""" Return formatted list of entry points. """
plugins = find_all_plugins(startdir)
entrypoints = StringIO.StringIO()
for key, val in plugins.items():
epts = []
for v in val:
if v.startswith('openmdao.'):
continue
mod, cname = v.rsplit('.', 1)
epts.append('%s.%s=%s:%s' % (mod, cname, mod, cname))
if epts:
entrypoints.write("\n[%s]\n" % key)
for ept in epts:
entrypoints.write("%s\n" % ept)
return entrypoints.getvalue()
def plugin_makedist(parser, options, args=None, capture=None, srcdir='src'):
"""A command-line script (plugin makedist) points to this. It creates a
source distribution containing Sphinx documentation for the specified
distribution directory. If no directory is specified, the current directory
is assumed.
usage: plugin makedist [dist_dir_path]
"""
if args:
print_sub_help(parser, 'makedist')
return -1
dist_dir = os.path.abspath(os.path.expandvars(os.path.expanduser(options.dist_dir_path)))
_verify_dist_dir(dist_dir)
startdir = os.getcwd()
os.chdir(dist_dir)
templates, class_templates, test_template = _load_templates()
try:
plugin_build_docs(parser, options)
cfg = SafeConfigParser(dict_type=OrderedDict)
cfg.readfp(open('setup.cfg', 'r'), 'setup.cfg')
print "collecting entry point information..."
cfg.set('metadata', 'entry_points', _get_entry_points(srcdir))
template_options = _get_template_options(options.dist_dir_path, cfg,
packages=find_packages(srcdir))
dirstruct = {
'setup.py': templates['setup.py'] % template_options,
}
name = cfg.get('metadata', 'name')
version = cfg.get('metadata', 'version')
if sys.platform == 'win32': # pragma no cover
disttar = "%s-%s.zip" % (name, version)
else:
disttar = "%s-%s.tar.gz" % (name, version)
disttarpath = os.path.join(startdir, disttar)
if os.path.exists(disttarpath):
print "Removing existing distribution %s" % disttar
os.remove(disttarpath)
build_directory(dirstruct, force=True)
cmdargs = [sys.executable, 'setup.py', 'sdist', '-d', startdir]
if capture:
stdout = open(capture, 'w')
stderr = STDOUT
else: # pragma no cover
stdout = None
stderr = None
try:
retcode = call(cmdargs, stdout=stdout, stderr=stderr)
finally:
if stdout is not None:
stdout.close()
if retcode:
cmd = ' '.join(cmdargs)
sys.stderr.write("\nERROR: command '%s' returned error code: %s\n"
% (cmd, retcode))
return retcode
finally:
os.chdir(startdir)
if os.path.exists(disttar):
print "Created distribution %s" % disttar
return 0
else:
sys.stderr.write("\nERROR: failed to make distribution %s" % disttar)
return -1
# This brings up a browser window which can be a problem during testing.
def plugin_docs(parser, options, args=None): # pragma no cover
"""A command-line script (plugin docs) points to this. It brings up
the Sphinx documentation for the named plugin in a browser.
"""
if args:
print_sub_help(parser, 'docs')
return -1
if options.plugin_dist_name is None:
view_docs(options.browser)
else:
url = find_docs_url(options.plugin_dist_name)
wb = webbrowser.get(options.browser)
wb.open(url)
def find_docs_url(plugin_name=None, build_if_needed=True):
"""Returns a url for the Sphinx docs for the named plugin.
The plugin must be importable in the current environment.
plugin_name: str
Name of the plugin distribution, module, or class.
"""
parts = plugin_name.split('.')
if len(parts) == 1:
# assume it's a class name and try to find unambiguous module
modname = None
# loop over available types to find a class name that matches
for name, version in get_available_types():
mname, cname = name.rsplit('.', 1)
if cname == plugin_name:
if modname and modname != mname:
raise RuntimeError("Can't determine module for class '%s'"
" unambiguously. found in %s"
% (cname, [mname, modname]))
modname = mname
parts = modname.split('.')
if modname is None:
# didn't find a class, so assume plugin_name is a dist name
parts = [plugin_name, plugin_name]
for i in range(len(parts) - 1):
mname = '.'.join(parts[:len(parts) - i])
try:
__import__(mname)
mod = sys.modules[mname]
modname = mname
modfile = os.path.abspath(mod.__file__)
break
except ImportError:
# we may be able to locate the docs even if the import fails
modfile = find_module(mname)
modname = mname
if modfile:
break
else:
# Possibly something in contrib that's a directory.
try:
__import__(plugin_name)
mod = sys.modules[plugin_name]
modname = plugin_name
modfile = os.path.abspath(mod.__file__)
except ImportError:
raise RuntimeError("Can't locate package/module '%s'" % plugin_name)
url = 'file://'
if modname.startswith('openmdao.'): # lookup in builtin docs
import openmdao.main
fparts = mod.__file__.split(os.sep)
pkg = '.'.join(modname.split('.')[:2])
anchorpath = '/'.join(['srcdocs', 'packages',
'%s.html#module-%s' % (pkg, modname)])
if any([p.endswith('.egg') and p.startswith('openmdao.')
for p in fparts]):
# this is a release version, so use docs packaged with openmdao.main
htmldir = os.path.join(os.path.dirname(openmdao.main.__file__), "docs")
else: # it's a developer version, so use locally built docs
htmldir = os.path.join(get_ancestor_dir(sys.executable, 3), 'docs',
'_build', 'html')
if not os.path.isfile(os.path.join(htmldir, 'index.html')) and build_if_needed:
# make sure the local docs are built
print "local docs not found.\nbuilding them now...\n"
check_call(['openmdao', 'build_docs'])
url += os.path.join(htmldir, anchorpath)
else:
url += os.path.join(os.path.dirname(modfile),
'sphinx_build', 'html', 'index.html')
url = url.replace('\\', '/')
return url
def plugin_install(parser, options, args=None, capture=None):
"""A command-line script (plugin install) points to this. It installs
the specified plugin distribution into the current environment.
"""
if args:
print_sub_help(parser, 'install')
return -1
# Interact with github (but not when testing).
if options.github or options.all: # pragma no cover
plugin_url = 'https://api.github.com/orgs/OpenMDAO-Plugins/repos?type=public'
github_plugins = []
if options.all:
# go get names of all the github plugins
plugin_page = urllib2.urlopen(plugin_url)
for line in plugin_page.fp:
text = json.loads(line)
for item in sorted(text):
github_plugins.append(item['name'])
else:
# just use the name of the specific plugin requested
github_plugins.append(options.dist_name)
for plugin in github_plugins:
try:
print "Installing plugin:", plugin
_github_install(plugin, options.findlinks)
except Exception:
traceback.print_exc()
else: # Install plugin from local file or directory
develop = False
if not options.dist_name:
print "installing distribution from current directory as a 'develop' egg"
develop = True
if develop:
cmdargs = [sys.executable, 'setup.py', 'develop', '-N']
else:
cmdargs = ['easy_install', '-f', options.findlinks, options.dist_name]
cmd = ' '.join(cmdargs)
if capture:
stdout = open(capture, 'w')
stderr = STDOUT
else: # pragma no cover
stdout = None
stderr = None
try:
retcode = call(cmdargs, stdout=stdout, stderr=stderr)
finally:
if stdout is not None:
stdout.close()
if retcode:
sys.stderr.write("\nERROR: command '%s' returned error code: %s\n"
% (cmd, retcode))
return -1
return 0
def _github_install(dist_name, findLinks):
# Get plugin from github.
# FIXME: this should support all valid version syntax (>=, <=, etc.)
pieces = dist_name.split('==')
name = pieces[0]
# User specified version using easy_install style ("plugin==version")
if len(pieces) > 1:
version = pieces[1]
# Get most recent version from our tag list
else:
url = 'https://api.github.com/repos/OpenMDAO-Plugins/%s/tags' % name
try:
resp = urllib2.urlopen(url)
except urllib2.HTTPError:
print "\nERROR: plugin named '%s' not found in OpenMDAO-Plugins" % name
return -1
for line in resp.fp:
text = json.loads(line)
tags = []
for item in text:
tags.append(item['name'])
try:
tags.sort(key=lambda s: map(int, s.split('.')))
except ValueError:
print "\nERROR: the releases for the plugin named '%s' have" \
" not been tagged correctly for installation." % name
print "You may want to contact the repository owner"
return -1
if not tags:
print "\nERROR: plugin named '%s' has no tagged releases." % name
print "You may want to contact the repository owner to create a tag"
return -1
version = tags[-1]
url = 'https://nodeload.github.com/OpenMDAO-Plugins/%s/tarball/%s' \
% (name, version)
print url
build_docs_and_install(name, version, findLinks)
def _bld_sdist_and_install(deps=True):
check_call([sys.executable, 'setup.py', 'sdist', '-d', '.'])
if sys.platform.startswith('win'):
tars = fnmatch.filter(os.listdir('.'), "*.zip")
else:
tars = fnmatch.filter(os.listdir('.'), "*.tar.gz")
if len(tars) != 1:
raise RuntimeError("should have found a single archive file,"
" but found %s instead" % tars)
if deps:
opts = '-NZ'
else:
opts = '-Z'
check_call(['easy_install', opts, tars[0]])
return tars[0]
# This requires Internet connectivity to github.
def build_docs_and_install(name, version, findlinks): # pragma no cover
tdir = tempfile.mkdtemp()
startdir = os.getcwd()
os.chdir(tdir)
try:
tarpath = download_github_tar('OpenMDAO-Plugins', name, version)
# extract the repo tar file
tar = tarfile.open(tarpath)
tar.extractall()
tar.close()
files = os.listdir('.')
files.remove(os.path.basename(tarpath))
if len(files) != 1:
raise RuntimeError("after untarring, found multiple directories: %s"
% files)
os.chdir(files[0]) # should be in distrib directory now
cfg = SafeConfigParser(dict_type=OrderedDict)
cfg.readfp(open('setup.cfg', 'r'), 'setup.cfg')
if cfg.has_option('metadata', 'requires-dist'):
reqs = cfg.get('metadata', 'requires-dist').strip()
reqs = reqs.replace(',', ' ')
reqs = [n.strip() for n in reqs.split()]
else:
# couldn't find requires-dist in setup.cfg, so
# create an sdist so we can query metadata for distrib dependencies
tarname = _bld_sdist_and_install(deps=False)
# now find any dependencies
metadict = get_metadata(tarname)
reqs = metadict.get('requires', [])
# install dependencies (some may be needed by sphinx)
ws = WorkingSet()
for r in reqs:
print "Installing dependency '%s'" % r
req = Requirement.parse(r)
dist = ws.find(req)
if dist is None:
try:
check_call(['easy_install', '-Z', '-f', findlinks, r])
except Exception:
traceback.print_exc()
# build sphinx docs
check_call(['plugin', 'build_docs', files[0]])
# make a new sdist with docs in it and install it
tarname = _bld_sdist_and_install()
finally:
os.chdir(startdir)
shutil.rmtree(tdir, ignore_errors=True)
def _plugin_build_docs(destdir, cfg, src='src'):
"""Builds the Sphinx docs for the plugin distribution, assuming it has
a structure like the one created by plugin quickstart.
"""
name = cfg.get('metadata', 'name')
version = cfg.get('metadata', 'version')
docdir = os.path.join(destdir, 'docs')
srcdir = os.path.abspath(os.path.join(destdir, src))
sphinx.main(argv=['', '-E', '-a', '-b', 'html',
'-Dversion=%s' % version,
'-Drelease=%s' % version,
'-d', os.path.join(srcdir, name, 'sphinx_build', 'doctrees'),
docdir,
os.path.join(srcdir, name, 'sphinx_build', 'html')])
def plugin_build_docs(parser, options, args=None):
"""A command-line script (plugin build_docs) points to this. It builds the
Sphinx documentation for the specified distribution directory.
If no directory is specified, the current directory is assumed.
usage: plugin build_docs [dist_dir_path]
"""
if args is not None and len(args) > 1:
print_sub_help(parser, 'build_docs')
return -1
if args:
dist_dir = args[0]
else:
dist_dir = '.'
dist_dir = os.path.abspath(os.path.expandvars(os.path.expanduser(dist_dir)))
_verify_dist_dir(dist_dir)
# pfiles = fnmatch.filter(os.listdir(options.srcdir), '*.py')
# if not pfiles:
# options.srcdir = dist_dir
cfgfile = os.path.join(dist_dir, 'setup.cfg')
cfg = SafeConfigParser(dict_type=OrderedDict)
cfg.readfp(open(cfgfile, 'r'), cfgfile)
cfg.set('metadata', 'entry_points',
_get_entry_points(os.path.join(dist_dir, options.srcdir)))
templates, class_templates, test_template = _load_templates()
template_options = _get_template_options(dist_dir, cfg, srcdir=options.srcdir)
dirstruct = {
'docs': {
'conf.py': templates['conf.py'] % template_options,
'pkgdocs.rst': _get_pkgdocs(cfg),
'srcdocs.rst': _get_srcdocs(dist_dir,
template_options['name'],
srcdir=options.srcdir),
},
}
build_directory(dirstruct, force=True, topdir=dist_dir)
_plugin_build_docs(dist_dir, cfg, src=options.srcdir)
return 0
def plugin_list(parser, options, args=None):
""" List GitHub/external/built-in plugins. """
if args:
print_sub_help(parser, 'list')
return -1
# Requires Internet to access github.
if options.github: # pragma no cover
_list_github_plugins()
return 0
groups = []
for group in options.groups:
if not group.startswith('openmdao.'):
group = 'openmdao.' + group
groups.append(group)
show_all = (options.external == options.builtin)
if show_all:
title_type = ''
elif options.external:
title_type = 'external'
else:
title_type = 'built-in'
title_groups = ','.join([g.split('.')[1] for g in groups])
parts = title_groups.rsplit(',', 1)
if len(parts) > 1:
title_groups = ' and '.join(parts)
if not groups:
groups = None
all_types = get_available_types(groups)
plugins = set()
for type in all_types:
if show_all:
plugins.add((type[0], type[1]['version']))
else:
name = type[0].split('.')[0]
if name == 'openmdao':
if options.builtin:
plugins.add((type[0], type[1]['version']))
else:
if options.external:
plugins.add((type[0], type[1]['version']))
title = "Installed %s %s plugins" % (title_type, title_groups)
title = title.replace(' ', ' ')
under = '-' * len(title)
print ""
print title
print under
print ""
for plugin in sorted(plugins):
print plugin[0], plugin[1]
print "\n"
return 0
def print_sub_help(parser, subname):
"""Prints a usage message for the given subparser name."""
for obj in parser._subparsers._actions:
if obj.dest != 'help':
obj.choices[subname].print_help()
return
raise NameError("unknown subparser name '%s'" % subname)
# Requires Internet to access github.
def _list_github_plugins(): # pragma no cover
url = 'https://api.github.com/orgs/OpenMDAO-Plugins/repos?type=public'
print "\nAvailable plugin distributions"
print "==============================\n"
resp = urllib2.urlopen(url)
for line in resp.fp:
text = json.loads(line)
for item in sorted(text):
print '%20s -- %s' % (item['name'], item['description'])
print '\n'
def _get_plugin_parser():
"""Sets up the plugin arg parser and all of its subcommand parsers."""
top_parser = ArgumentParser()
subparsers = top_parser.add_subparsers(title='commands')
parser = subparsers.add_parser('list', help="List installed plugins")
parser.usage = "plugin list [options]"
parser.add_argument("--github",
help='List plugins in the official Openmdao-Plugins'
' repository on github',
action='store_true')
parser.add_argument("-b", "--builtin",
help='List all installed plugins that are part of the'
' OpenMDAO distribution',
action='store_true')
parser.add_argument("-e", "--external",
help='List all installed plugins that are not part of'
' the OpenMDAO distribution',
action='store_true')
parser.add_argument("-g", "--group", action="append", type=str,
dest='groups', default=[],
choices=[p.split('.', 1)[1]
for p in plugin_groups.keys()],
help="specify plugin group")
parser.set_defaults(func=plugin_list)
parser = subparsers.add_parser('install',
help="install an OpenMDAO plugin into the"
" current environment")
parser.usage = "plugin install [plugin_distribution] [options]"
parser.add_argument('dist_name',
help='name of plugin distribution'
' (defaults to distrib found in current dir)',
nargs='?')
parser.add_argument("--github",
help='Find plugin in the official OpenMDAO-Plugins'
' repository on github',
action='store_true')
parser.add_argument("-f", "--find-links", action="store", type=str,
dest='findlinks', default='http://openmdao.org/dists',
help="URL of find-links server")
parser.add_argument("--all", help='Install all plugins in the official'
' OpenMDAO-Plugins repository on github',
action='store_true')
parser.set_defaults(func=plugin_install)
parser = subparsers.add_parser('build_docs',
help="build sphinx doc files for a plugin")
parser.usage = "plugin build_docs <dist_dir_path>"
parser.add_argument('dist_dir_path', default='.',
help='path to distribution source directory')
parser.add_argument("-s", "--srcdir", action="store", type=str,
dest='srcdir', default='src',
help="top directory in the distribution where python"
" source is located")
parser.set_defaults(func=plugin_build_docs)
parser = subparsers.add_parser('docs',
help="display docs for a plugin")
parser.usage = "plugin docs <plugin_dist_name>"
parser.add_argument('plugin_dist_name', help='name of plugin distribution')
parser.add_argument("-b", "--browser", action="store", type=str,
dest='browser', choices=webbrowser._browsers.keys(),
help="browser name")
parser.set_defaults(func=plugin_docs)
parser = subparsers.add_parser('quickstart', help="generate some skeleton"
" files for a plugin")
parser.usage = "plugin quickstart <dist_name> [options]"
parser.add_argument('dist_name', help='name of distribution')
parser.add_argument("-v", "--version", action="store", type=str,
dest='version', default='0.1',
help="version id of the plugin (defaults to 0.1)")
parser.add_argument("-c", "--class", action="store", type=str,
dest='classname', help="plugin class name")
parser.add_argument("-d", "--dest", action="store", type=str, dest='dest',
default='.',
help="directory where new plugin directory will be"
" created (defaults to current dir)")
parser.add_argument("-g", "--group", action="store", type=str, dest='group',
default='openmdao.component',
help="specify plugin group %s (defaults to"
" 'openmdao.component')" % plugin_groups.keys())
parser.set_defaults(func=plugin_quickstart)
parser = subparsers.add_parser('makedist', help="create a source"
" distribution for a plugin")
parser.usage = "plugin makedist [dist_dir_path]"
parser.add_argument('dist_dir_path', nargs='?',
default='.',
help='directory where plugin distribution is found'
' (defaults to current dir')
parser.add_argument("-s", "--srcdir", action="store", type=str,
dest='srcdir', default='src',
help="top directory in the distribution where python"
" source is located")
parser.set_defaults(func=plugin_makedist)
return top_parser
# Calls sys.exit().
def plugin(): # pragma no cover
parser = _get_plugin_parser()
options, args = parser.parse_known_args()
sys.exit(options.func(parser, options, args))
|
|
import numpy as np
from STObject import STObject
from hagelslag.util.munkres import Munkres
class ObjectMatcher(object):
"""
ObjectMatcher calculates distances between two sets of objects and determines the optimal object assignments
based on the Hungarian object matching algorithm. ObjectMatcher supports the use of the weighted average of
multiple cost functions to determine the distance between objects. Upper limits to each distance component are used
to exclude the matching of objects that are too far apart.
"""
def __init__(self, cost_function_components, weights, max_values):
self.cost_function_components = cost_function_components
self.weights = weights
self.max_values = max_values
if self.weights.sum() != 1:
self.weights /= float(self.weights.sum())
return
def match_objects(self, set_a, set_b, time_a, time_b):
"""
Match two sets of objects at particular times.
:param set_a: list of STObjects
:param set_b: list of STObjects
:param time_a: time at which set_a is being evaluated for matching
:param time_b: time at which set_b is being evaluated for matching
:return: list of tuples containing (set_a index, set_b index) for each match
"""
costs = self.cost_matrix(set_a, set_b, time_a, time_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[0]
assignments = []
if len(good_rows) > 0 and len(good_cols) > 0:
munk = Munkres()
initial_assignments = munk.compute(costs[np.meshgrid(good_rows, good_cols, indexing='ij')].tolist())
initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]
for a in initial_assignments:
if costs[a[0], a[1]] < 100:
assignments.append(a)
return assignments
def cost_matrix(self, set_a, set_b, time_a, time_b):
costs = np.zeros((len(set_a), len(set_b)))
for a, item_a in enumerate(set_a):
for b, item_b in enumerate(set_b):
costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b)
return costs
def total_cost_function(self, item_a, item_b, time_a, time_b):
distances = np.zeros(len(self.weights))
for c, component in enumerate(self.cost_function_components):
distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])
total_distance = np.sum(self.weights * distances)
return total_distance
class TrackMatcher(object):
"""
TrackMatcher
"""
def __init__(self, cost_function_components, weights, max_values):
self.cost_function_components = cost_function_components
self.weights = weights if weights.sum() == 1 else weights / weights.sum()
self.max_values = max_values
def match_tracks(self, set_a, set_b):
costs = self.track_cost_matrix(set_a, set_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[0]
assignments = []
if len(good_rows) > 0 and len(good_cols) > 0:
munk = Munkres()
initial_assignments = munk.compute(costs[np.meshgrid(good_rows, good_cols, indexing='ij')].tolist())
initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]
for a in initial_assignments:
if costs[a[0], a[1]] < 100:
assignments.append(a)
return assignments
def track_cost_matrix(self, set_a, set_b):
costs = np.zeros((len(set_a), len(set_b)))
for a, item_a in enumerate(set_a):
for b, item_b in enumerate(set_b):
costs[a, b] = self.track_cost_function(item_a, item_b)
return costs
def track_cost_function(self, item_a, item_b):
distances = np.zeros(len(self.weights))
for c, component in enumerate(self.cost_function_components):
distances[c] = component(item_a, item_b, self.max_values[c])
total_distance = np.sum(self.weights * distances)
return total_distance
def centroid_distance(item_a, time_a, item_b, time_b, max_value):
"""
Euclidean distance between the centroids of item_a and item_b.
:param item_a:
:param time_a:
:param item_b:
:param time_b:
:param max_value:
:return:
"""
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value):
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
if time_a < time_b:
bx = bx - item_b.u
by = by - item_b.v
else:
ax = ax - item_a.u
ay = ay - item_a.v
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
def closest_distance(item_a, time_a, item_b, time_b, max_value):
"""
Euclidean distance between the pixels in item_a and item_b closest to each other.
"""
return np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value)
def percentile_distance(item_a, time_a, item_b, time_b, max_value, percentile=2):
return np.minimum(item_a.percentile_distance(time_a, item_b, time_b, percentile), max_value) / float(max_value)
def ellipse_distance(item_a, time_a, item_b, time_b, max_value):
"""
Calculate differences in the properties of ellipses fitted to each object.
"""
ts = np.array([0, np.pi])
ell_a = item_a.get_ellipse_model(time_a)
ell_b = item_b.get_ellipse_model(time_b)
ends_a = ell_a.predict_xy(ts)
ends_b = ell_b.predict_xy(ts)
distances = np.sqrt((ends_a[:, 0:1] - ends_b[:, 0:1].T) ** 2 + (ends_a[:, 1:] - ends_b[:, 1:].T) ** 2)
return np.minimum(distances[0, 1], max_value) / float(max_value)
def nonoverlap(item_a, time_a, item_b, time_b, max_value):
"""
Percentage of pixels in each object that do not overlap with the other object
"""
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value)
def max_intensity(item_a, time_a, item_b, time_b, max_value):
"""
RMS Difference in intensities.
"""
intensity_a = item_a.max_intensity(time_a)
intensity_b = item_b.max_intensity(time_b)
diff = np.sqrt((intensity_a - intensity_b) ** 2)
return np.minimum(diff, max_value) / float(max_value)
def area_difference(item_a, time_a, item_b, time_b, max_value):
"""
RMS Difference in object areas.
"""
size_a = item_a.size(time_a)
size_b = item_b.size(time_b)
diff = np.sqrt((size_a - size_b) ** 2)
return np.minimum(diff, max_value) / float(max_value)
def mean_minimum_centroid_distance(item_a, item_b, max_value):
"""
RMS difference in the minimum distances from the centroids of one track to the centroids of another track
"""
centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times])
centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times])
distance_matrix = (centroids_a[:, 0:1] - centroids_b.T[0:1]) ** 2 + (centroids_a[:, 1:] - centroids_b.T[1:]) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())
return mean_min_distances / float(max_value)
def mean_min_time_distance(item_a, item_b, max_value):
times_a = item_a.times.reshape((item_a.times.size, 1))
times_b = item_b.times.reshape((1, item_b.times.size))
distance_matrix = (times_a - times_b) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())
return mean_min_distances / float(max_value)
def duration_distance(item_a, item_b, max_value):
duration_a = item_a.times.size
duration_b = item_b.times.size
return np.abs(duration_a - duration_b) / float(max_value)
|
|
# -*- coding: utf-8 -*-
import copy
import hashlib
from bson.json_util import dumps
from datetime import datetime, timedelta
from eve.tests import TestBase
from eve.utils import parse_request, str_to_date, config, weak_date, \
date_to_str, querydef, document_etag, extract_key_values, \
debug_error_message
class TestUtils(TestBase):
""" collection, document and home_link methods (and resource_uri, which is
used by all of them) are tested in 'tests.methods' since we need an active
flaskapp context
"""
def setUp(self):
super(TestUtils, self).setUp()
self.dt_fmt = config.DATE_FORMAT
self.datestr = 'Tue, 18 Sep 2012 10:12:30 GMT'
self.valid = datetime.strptime(self.datestr, self.dt_fmt)
self.etag = '56eaadbbd9fa287e7270cf13a41083c94f52ab9b'
def test_parse_request_where(self):
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = \
['ref']
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).where, None)
with self.app.test_request_context('/?where=hello'):
self.assertEqual(parse_request(self.known_resource).where, 'hello')
def test_parse_request_sort(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).sort, None)
with self.app.test_request_context('/?sort=hello'):
self.assertEqual(parse_request(self.known_resource).sort, 'hello')
def test_parse_request_page(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=2'):
self.assertEqual(parse_request(self.known_resource).page, 2)
with self.app.test_request_context('/?page=-1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=0'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=1.1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=string'):
self.assertEqual(parse_request(self.known_resource).page, 1)
def test_parse_request_max_results(self):
default = config.PAGINATION_DEFAULT
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_max_results_disabled_pagination(self):
self.app.config['DOMAIN'][self.known_resource]['pagination'] = False
default = 0
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit + 1)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_if_modified_since(self):
ims = 'If-Modified-Since'
with self.app.test_request_context():
self.assertEqual(parse_request(
self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers=None):
self.assertEqual(
parse_request(self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers={ims: self.datestr}):
self.assertEqual(
parse_request(self.known_resource).if_modified_since,
self.valid + timedelta(seconds=1))
with self.app.test_request_context(headers={ims: 'not-a-date'}):
self.assertRaises(ValueError, parse_request, self.known_resource)
with self.app.test_request_context(
headers={ims:
self.datestr.replace('GMT', 'UTC')}):
self.assertRaises(ValueError, parse_request, self.known_resource)
self.assertRaises(ValueError, parse_request, self.known_resource)
def test_parse_request_if_none_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers={'If-None-Match':
self.etag}):
self.assertEqual(parse_request(self.known_resource).if_none_match,
self.etag)
def test_parse_request_if_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers={'If-Match': self.etag}):
self.assertEqual(parse_request(self.known_resource).if_match,
self.etag)
def test_weak_date(self):
with self.app.test_request_context():
self.app.config['DATE_FORMAT'] = '%Y-%m-%d'
self.assertEqual(weak_date(self.datestr), self.valid +
timedelta(seconds=1))
def test_str_to_date(self):
self.assertEqual(str_to_date(self.datestr), self.valid)
self.assertRaises(ValueError, str_to_date, 'not-a-date')
self.assertRaises(ValueError, str_to_date,
self.datestr.replace('GMT', 'UTC'))
def test_date_to_str(self):
self.assertEqual(date_to_str(self.valid), self.datestr)
def test_querydef(self):
self.assertEqual(querydef(max_results=10), '?max_results=10')
self.assertEqual(querydef(page=10), '?page=10')
self.assertEqual(querydef(where='wherepart'), '?where=wherepart')
self.assertEqual(querydef(sort='sortpart'), '?sort=sortpart')
self.assertEqual(querydef(where='wherepart', sort='sortpart'),
'?where=wherepart&sort=sortpart')
self.assertEqual(querydef(max_results=10, sort='sortpart'),
'?max_results=10&sort=sortpart')
def test_document_etag(self):
test = {'key1': 'value1', 'another': 'value2'}
challenge = dumps(test, sort_keys=True).encode('utf-8')
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test))
def test_extract_key_values(self):
test = {
'key1': 'value1',
'key2': {
'key1': 'value2',
'nested': {
'key1': 'value3'
}
}
}
self.assertEqual(list(extract_key_values('key1', test)),
['value1', 'value2', 'value3'])
def test_debug_error_message(self):
with self.app.test_request_context():
self.app.config['DEBUG'] = False
self.assertEqual(debug_error_message('An error message'), None)
self.app.config['DEBUG'] = True
self.assertEqual(debug_error_message('An error message'),
'An error message')
class DummyEvent(object):
"""
Even handler that records the call parameters and asserts a check
Usage::
app = Eve()
app.on_my_event = DummyEvent(element_not_deleted)
In the test::
assert app.on_my_event.called[0] == expected_param_0
"""
def __init__(self, check, deepcopy=False):
"""
:param check: method checking the state of something during the event.
:type: check: callable returning bool
:param deepcopy: Do we need to store a copy of the argument calls? In
some events arguments are changed after the event, so keeping a
reference to the original object doesn't allow a test to check what
was passed. The default is False.
:type deepcopy: bool
"""
self.__called = None
self.__check = check
self.__deepcopy = deepcopy
def __call__(self, *args):
assert self.__check()
# In some method the arguments are changed after the events
if self.__deepcopy:
args = copy.deepcopy(args)
self.__called = args
@property
def called(self):
"""
The results of the call to the event.
:rtype: It returns None if the event hasn't been called or a tuple with
the positional arguments of the last call if called.
"""
return self.__called
|
|
__source__ = 'https://leetcode.com/problems/maximal-rectangle/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/maximal-rectangle.py
# Time: O(n^2)
# Space: O(n)
# DP
#
# Description: Leetcode # 85. Maximal Rectangle
#
# Given a 2D binary matrix filled with 0's and 1's,
# find the largest rectangle containing only 1's and return its area.
#
# For example, given the following matrix:
#
# 1 0 1 0 0
# 1 0 1 1 1
# 1 1 1 1 1
# 1 0 0 1 0
# Return 6.
#
# Companies
# Facebook
# Related Topics
# Array Hash Table Stack Dynamic Programming
# Similar Questions
# Largest Rectangle in Histogram Maximal Square
#
import unittest
class Solution:
# @param matrix, a list of lists of 1 length string
# @return an integer
def maximalRectangle(self, matrix):
if not matrix:
return 0
result = 0
m = len(matrix)
n = len(matrix[0])
L = [0 for _ in xrange(n)]
H = [0 for _ in xrange(n)]
R = [0 for _ in xrange(n)]
for i in xrange(m):
left = 0
for j in xrange(n):
if matrix[i][j] == '1':
L[j] = max(L[j], left)
H[j] += 1
else:
L[j] = 0
H[j] = 0
R[j] = n
left = j + 1
right = n
for j in reversed(xrange(n)):
if matrix[i][j] == '1':
R[j] = min(R[j], right)
result = max(result, H[j] * (R[j] - L[j]))
else:
right = j
#print i, j, L, left, result, right
print L
print H
print R
return result
# http://www.cnblogs.com/zuoyuan/p/3784252.html
# http://jelices.blogspot.com/2014/05/leetcode-python-maximal-rectangle.html diff solution create a temp matrix
class SolutionOther:
# @param matrix, a list of lists of 1 length string
# @return an integer
def maximalRectangle(self, matrix):
if matrix == []: return 0
a = [0 for i in range(len(matrix[0]))]
maxArea = 0
for i in range(len(matrix)):
for j in range(len(matrix[i])):
a[j] = a[j] + 1 if matrix[i][j] == '1' else 0
maxArea = max(maxArea, self.largestRectangleArea(a))
return maxArea
def largestRectangleArea(self, height):
stack = []
i = 0
area = 0
while i < len(height):
if stack == [] or height[i] > height[stack[len(stack) - 1]]:
stack.append(i)
else:
curr = stack.pop()
width = i if stack == [] else i - stack[len(stack) - 1] - 1
area = max(area, width * height[curr])
i -= 1
i += 1
while stack != []:
curr = stack.pop()
width = i if stack == [] else len(height) - stack[len(stack) - 1] - 1
area = max(area, width * height[curr])
return area
#test
class TestMethods(unittest.TestCase):
def test_Local(self):
test = SolutionOther()
matrix1 = ["1010", "1011", "1011", "1111"]
matrix2 = ["1111", "1111", "1111", "1111"]
print test.maximalRectangle(matrix2)
matrix = ["11101",
"11010",
"01110",
"11110",
"11111",
"00000"]
print Solution().maximalRectangle(matrix)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
A O(n^2) solution based on Largest Rectangle in Histogram
This question is similar as [Largest Rectangle in Histogram]:
You can maintain a row length of Integer array H recorded its height of '1's,
and scan and update row by row to find out the largest rectangle of each row.
For each row, if matrix[row][i] == '1'. H[i] +=1, or reset the H[i] to zero.
and according the algorithm of [Largest Rectangle in Histogram], to update the maximum area.
# 13ms 69.94%
class Solution {
public int maximalRectangle(char[][] matrix) {
int m = matrix.length;
int n = m == 0 ? 0 : matrix[0].length;
if (m == 0 || n == 0) {
return 0;
}
int[] height = new int[n];
int result = 0;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (matrix[i][j] == '1') {
height[j]++;
} else {
height[j] = 0;
}
}
for (int j = 0; j < n; j++) {
if (j < n - 1 && height[j] <= height[j + 1]) {
continue;
}
int minHeight = height[j];
for (int k = j; k >= 0; k--) {
minHeight = Math.min(minHeight, height[k]);
if (minHeight == 0) break; // become 74 %
result = Math.max(result, (j - k + 1) * minHeight);
}
}
}
return result;
}
}
# 45ms 14%
class Solution {
public int maximalRectangle(char[][] matrix) {
int m = matrix.length;
int n = m == 0 ? 0 : matrix[0].length;
if (m == 0 || n == 0) {
return 0;
}
int result = 0;
Stack<Integer> stack = new Stack<>();
int[] height = new int[n];
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (matrix[i][j] == '1') {
height[j]++;
} else {
height[j] = 0;
}
while (!stack.isEmpty() && height[stack.peek()] > height[j]) {
int top = stack.pop();
result = Math.max(result, height[top] * (stack.isEmpty() ? j : j - stack.peek() - 1));
}
stack.push(j);
}
while (!stack.isEmpty()) {
int top = stack.pop();
result = Math.max(result, height[top] * (stack.isEmpty() ? n : n - stack.peek() - 1));
}
}
return result;
}
}
Thought:
The DP solution proceeds row by row, starting from the first row. Let the maximal rectangle area
at row i and column j be computed by [right(i,j) - left(i,j)]*height(i,j).
All the 3 variables left, right, and height can be determined by the information from previous row,
and also information from the current row. So it can be regarded as a DP solution.
The transition equations are:
left(i,j) = max(left(i-1,j), cur_left), cur_left can be determined from the current row
right(i,j) = min(right(i-1,j), cur_right), cur_right can be determined from the current row
height(i,j) = height(i-1,j) + 1, if matrix[i][j]=='1';
height(i,j) = 0, if matrix[i][j]=='0'
If you think this algorithm is not easy to understand, you can try this example:
0 0 0 1 0 0 0
0 0 1 1 1 0 0
0 1 1 1 1 1 0
The vector "left" and "right" from row 0 to row 2 are as follows
row 0:
l: 0 0 0 3 0 0 0
r: 7 7 7 4 7 7 7
row 1:
l: 0 0 2 3 2 0 0
r: 7 7 5 4 5 7 7
row 2:
l: 0 1 2 3 2 1 0
r: 7 6 5 4 5 6 7
The vector "left" is computing the left boundary.
Take (i,j)=(1,3) for example. On current row 1,
the left boundary is at j=2. However, because matrix[1][3] is 1,
you need to consider the left boundary on previous row as well, which is 3. So the real left boundary at (1,3) is 3.
I hope this additional explanation makes things clearer.
# DP
# 11ms 76.83%
class Solution {
public int maximalRectangle(char[][] matrix) {
int m = matrix.length;
int n = m == 0 ? 0 : matrix[0].length;
if (m == 0 || n == 0) {
return 0;
}
int[] height = new int[n];
int[] left = new int[n];
int[] right = new int[n];
for (int j = 0; j < n; j++) right[j] = n;
int result = 0;
for (int i = 0; i < m; i++) {
int cur_left = 0, cur_right = n;
// compute height (can do this from either side)
for (int j = 0; j < n; j++) {
if (matrix[i][j] == '1') height[j]++;
else height[j] = 0;
}
// compute right (from right to left)
for (int j = n-1; j>=0; j--) {
if (matrix[i][j] == '1') right[j] = Math.min(right[j], cur_right);
else {
right[j] = n;
cur_right = j;
}
}
// compute left (from left to right)
for (int j = 0; j < n; j++) {
if (matrix[i][j] == '1') {
left[j] = Math.max(left[j], cur_left);
} else {
left[j] = 0;
cur_left = j + 1;
}
}
// compute the area of rectangle (can do this from either side)
for (int j = 0; j < n; j++) {
result = Math.max(result, (right[j] - left[j]) * height[j]);
}
}
return result;
}
}
# 8ms 88.93%
class Solution {
public int maximalRectangle(char[][] matrix) {
/**
* idea: using [LC84 Largest Rectangle in Histogram]. For each row
* of the matrix, construct the histogram based on the current row
* and the previous histogram (up to the previous row), then compute
* the largest rectangle area using LC84.
*/
int m = matrix.length, n;
if (m == 0 || (n = matrix[0].length) == 0)
return 0;
int i, j, res = 0;
int[] heights = new int[n];
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
if (matrix[i][j] == '0')
heights[j] = 0;
else
heights[j] += 1;
}
res = Math.max(res, largestRectangleArea(heights));
}
return res;
}
public int largestRectangleArea(int[] heights) {
/**
* idea: scan and store if a[i-1]<=a[i] (increasing), then as long
* as a[i]<a[i-1], then we can compute the largest rectangle area
* with base a[j], for j<=i-1, and a[j]>a[i], which is a[j]*(i-j).
* And meanwhile, all these bars (a[j]'s) are already done, and thus
* are throwable (using pop() with a stack).
*
* We can use an array nLeftGeq[] of size n to simulate a stack.
* nLeftGeq[i] = the number of elements to the left of [i] having
* value greater than or equal to a[i] (including a[i] itself). It
* is also the index difference between [i] and the next index on
* the top of the stack.
*/
int n = heights.length;
if (n == 0)
return 0;
int[] nLeftGeq = new int[n]; // the number of elements to the left
// of [i] with value >= heights[i]
nLeftGeq[0] = 1;
// preIdx=the index of stack.peek(), res=max area so far
int preIdx = 0, res = 0;
for (int i = 1; i < n; i++) {
nLeftGeq[i] = 1;
// notice that preIdx = i - 1 = peek()
while (preIdx >= 0 && heights[i] < heights[preIdx]) {
res = Math.max(res, heights[preIdx] * (nLeftGeq[preIdx] + i - preIdx - 1));
nLeftGeq[i] += nLeftGeq[preIdx]; // pop()
preIdx = preIdx - nLeftGeq[preIdx]; // peek() current top
}
if (preIdx >= 0 && heights[i] == heights[preIdx])
nLeftGeq[i] += nLeftGeq[preIdx]; // pop()
// otherwise nothing to do
preIdx = i;
}
// compute the rest largest rectangle areas with (indices of) bases
// on stack
while (preIdx >= 0 && 0 < heights[preIdx]) {
res = Math.max(res, heights[preIdx] * (nLeftGeq[preIdx] + n - preIdx - 1));
preIdx = preIdx - nLeftGeq[preIdx]; // peek() current top
}
return res;
}
}
'''
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstrap for cbuildbot.
This script is intended to checkout chromite on the branch specified by -b or
--branch (as normally accepted by cbuildbot), and then invoke cbuildbot. Most
arguments are not parsed, only passed along. If a branch is not specified, this
script will use 'master'.
Among other things, this allows us to invoke build configs that exist on a given
branch, but not on TOT.
"""
from __future__ import print_function
import base64
import functools
import os
import sys
import time
from chromite.cbuildbot import repository
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import boto_compat
from chromite.lib import build_summary
from chromite.lib import config_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import cros_sdk_lib
from chromite.lib import metrics
from chromite.lib import osutils
from chromite.lib import timeout_util
from chromite.lib import ts_mon_config
from chromite.scripts import cbuildbot
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# This number should be incremented when we change the layout of the buildroot
# in a non-backwards compatible way. This wipes all buildroots.
BUILDROOT_BUILDROOT_LAYOUT = 2
_DISTFILES_CACHE_EXPIRY_HOURS = 8 * 24
# Metrics reported to Monarch.
METRIC_PREFIX = 'chromeos/chromite/cbuildbot_launch/'
METRIC_ACTIVE = METRIC_PREFIX + 'active'
METRIC_INVOKED = METRIC_PREFIX + 'invoked'
METRIC_COMPLETED = METRIC_PREFIX + 'completed'
METRIC_PREP = METRIC_PREFIX + 'prep_completed'
METRIC_CLEAN = METRIC_PREFIX + 'clean_buildroot_durations'
METRIC_INITIAL = METRIC_PREFIX + 'initial_checkout_durations'
METRIC_CBUILDBOT = METRIC_PREFIX + 'cbuildbot_durations'
METRIC_CBUILDBOT_INSTANCE = METRIC_PREFIX + 'cbuildbot_instance_durations'
METRIC_CLOBBER = METRIC_PREFIX + 'clobber'
METRIC_BRANCH_CLEANUP = METRIC_PREFIX + 'branch_cleanup'
METRIC_DISTFILES_CLEANUP = METRIC_PREFIX + 'distfiles_cleanup'
METRIC_CHROOT_CLEANUP = METRIC_PREFIX + 'chroot_cleanup'
# Builder state
BUILDER_STATE_FILENAME = '.cbuildbot_build_state.json'
def StageDecorator(functor):
"""A Decorator that adds buildbot stage tags around a method.
It uses the method name as the stage name, and assumes failure on a true
return value, or an exception.
"""
@functools.wraps(functor)
def wrapped_functor(*args, **kwargs):
try:
logging.PrintBuildbotStepName(functor.__name__)
result = functor(*args, **kwargs)
except Exception:
logging.PrintBuildbotStepFailure()
raise
if result:
logging.PrintBuildbotStepFailure()
return result
return wrapped_functor
def field(fields, **kwargs):
"""Helper for inserting more fields into a metrics fields dictionary.
Args:
fields: Dictionary of metrics fields.
kwargs: Each argument is a key/value pair to insert into dict.
Returns:
Copy of original dictionary with kwargs set as fields.
"""
f = fields.copy()
f.update(kwargs)
return f
def PrependPath(prepend):
"""Generate path with new directory at the beginning.
Args:
prepend: Directory to add at the beginning of the path.
Returns:
Extended path as a string.
"""
return os.pathsep.join([prepend, os.environ.get('PATH', os.defpath)])
def PreParseArguments(argv):
"""Extract the branch name from cbuildbot command line arguments.
Args:
argv: The command line arguments to parse.
Returns:
Branch as a string ('master' if nothing is specified).
"""
parser = cbuildbot.CreateParser()
options = cbuildbot.ParseCommandLine(parser, argv)
if not options.cache_dir:
options.cache_dir = os.path.join(options.buildroot,
'repository', '.cache')
options.Freeze()
# This option isn't required for cbuildbot, but is for us.
if not options.buildroot:
cros_build_lib.Die('--buildroot is a required option.')
return options
def GetCurrentBuildState(options, branch):
"""Extract information about the current build state from command-line args.
Args:
options: A parsed options object from a cbuildbot parser.
branch: The name of the branch this builder was called with.
Returns:
A BuildSummary object describing the current build.
"""
build_state = build_summary.BuildSummary(
status=constants.BUILDER_STATUS_INFLIGHT,
buildroot_layout=BUILDROOT_BUILDROOT_LAYOUT,
branch=branch)
if options.buildnumber:
build_state.build_number = options.buildnumber
if options.buildbucket_id:
build_state.buildbucket_id = options.buildbucket_id
if options.master_build_id:
build_state.master_build_id = options.master_build_id
return build_state
def GetLastBuildState(root):
"""Fetch the state of the last build run from |root|.
If the saved state file can't be read or doesn't contain valid JSON, a default
state will be returned.
Args:
root: Root of the working directory tree as a string.
Returns:
A BuildSummary object representing the previous build.
"""
state_file = os.path.join(root, BUILDER_STATE_FILENAME)
state = build_summary.BuildSummary()
try:
state_raw = osutils.ReadFile(state_file)
state.from_json(state_raw)
except IOError as e:
logging.warning('Unable to read %s: %s', state_file, e)
return state
except ValueError as e:
logging.warning('Saved state file %s is not valid JSON: %s', state_file, e)
return state
if not state.is_valid():
logging.warning('Previous build state is not valid. Ignoring.')
state = build_summary.BuildSummary()
return state
def SetLastBuildState(root, new_state):
"""Save the state of the last build under |root|.
Args:
root: Root of the working directory tree as a string.
new_state: BuildSummary object containing the state to be saved.
"""
state_file = os.path.join(root, BUILDER_STATE_FILENAME)
osutils.WriteFile(state_file, new_state.to_json())
# Remove old state file. Its contents have been migrated into the new file.
old_state_file = os.path.join(root, '.cbuildbot_launch_state')
osutils.SafeUnlink(old_state_file)
def _MaybeCleanDistfiles(cache_dir, distfiles_ts):
"""Cleans the distfiles directory if too old.
Args:
cache_dir: Directory of the cache, as a string.
distfiles_ts: A timestamp str for the last time distfiles was cleaned. May
be None.
Returns:
The new distfiles_ts to persist in state.
"""
# distfiles_ts can be None for a fresh environment, which means clean.
if distfiles_ts is None:
return time.time()
distfiles_age = (time.time() - distfiles_ts) / 3600.0
if distfiles_age < _DISTFILES_CACHE_EXPIRY_HOURS:
return distfiles_ts
logging.info('Remove old distfiles cache (cache expiry %d hours)',
_DISTFILES_CACHE_EXPIRY_HOURS)
osutils.RmDir(os.path.join(cache_dir, 'distfiles'),
ignore_missing=True, sudo=True)
metrics.Counter(METRIC_DISTFILES_CLEANUP).increment(
fields=field({}, reason='cache_expired'))
# Cleaned cache, so reset distfiles_ts
return time.time()
def SanitizeCacheDir(cache_dir):
"""Make certain the .cache directory is valid.
Args:
cache_dir: Directory of the cache, as a string.
"""
logging.info('Cleaning up cache dir at %s', cache_dir)
# Verify that .cache is writable by the current user.
try:
osutils.Touch(os.path.join(cache_dir, '.cbuildbot_launch'), makedirs=True)
except IOError:
logging.info('Bad Permissions for cache dir, wiping: %s', cache_dir)
osutils.RmDir(cache_dir, sudo=True)
osutils.Touch(os.path.join(cache_dir, '.cbuildbot_launch'), makedirs=True)
osutils.RmDir(os.path.join(cache_dir, 'paygen_cache'),
ignore_missing=True, sudo=True)
logging.info('Finished cleaning cache_dir.')
@StageDecorator
def CleanBuildRoot(root, repo, cache_dir, build_state):
"""Some kinds of branch transitions break builds.
This method ensures that cbuildbot's buildroot is a clean checkout on the
given branch when it starts. If necessary (a branch transition) it will wipe
assorted state that cannot be safely reused from the previous build.
Args:
root: Root directory owned by cbuildbot_launch.
repo: repository.RepoRepository instance.
cache_dir: Cache directory.
build_state: BuildSummary object containing the current build state that
will be saved into the cleaned root. The distfiles_ts property will
be updated if the distfiles cache is cleaned.
"""
previous_state = GetLastBuildState(root)
SetLastBuildState(root, build_state)
SanitizeCacheDir(cache_dir)
build_state.distfiles_ts = _MaybeCleanDistfiles(
cache_dir, previous_state.distfiles_ts)
if previous_state.buildroot_layout != BUILDROOT_BUILDROOT_LAYOUT:
logging.PrintBuildbotStepText('Unknown layout: Wiping buildroot.')
metrics.Counter(METRIC_CLOBBER).increment(
fields=field({}, reason='layout_change'))
chroot_dir = os.path.join(root, constants.DEFAULT_CHROOT_DIR)
if os.path.exists(chroot_dir) or os.path.exists(chroot_dir + '.img'):
cros_sdk_lib.CleanupChrootMount(chroot_dir, delete=True)
osutils.RmDir(root, ignore_missing=True, sudo=True)
osutils.RmDir(cache_dir, ignore_missing=True, sudo=True)
else:
if previous_state.branch != repo.branch:
logging.PrintBuildbotStepText('Branch change: Cleaning buildroot.')
logging.info('Unmatched branch: %s -> %s', previous_state.branch,
repo.branch)
metrics.Counter(METRIC_BRANCH_CLEANUP).increment(
fields=field({}, old_branch=previous_state.branch))
logging.info('Remove Chroot.')
chroot_dir = os.path.join(repo.directory, constants.DEFAULT_CHROOT_DIR)
if os.path.exists(chroot_dir) or os.path.exists(chroot_dir + '.img'):
cros_sdk_lib.CleanupChrootMount(chroot_dir, delete=True)
logging.info('Remove Chrome checkout.')
osutils.RmDir(os.path.join(repo.directory, '.cache', 'distfiles'),
ignore_missing=True, sudo=True)
try:
# If there is any failure doing the cleanup, wipe everything.
# The previous run might have been killed in the middle leaving stale git
# locks. Clean those up, first.
repo.PreLoad()
# If the previous build didn't exit normally, run an expensive step to
# cleanup abandoned git locks.
if previous_state.status not in (constants.BUILDER_STATUS_FAILED,
constants.BUILDER_STATUS_PASSED):
repo.CleanStaleLocks()
repo.BuildRootGitCleanup(prune_all=True)
except Exception:
logging.info('Checkout cleanup failed, wiping buildroot:', exc_info=True)
metrics.Counter(METRIC_CLOBBER).increment(
fields=field({}, reason='repo_cleanup_failure'))
repository.ClearBuildRoot(repo.directory)
# Ensure buildroot exists. Save the state we are prepped for.
osutils.SafeMakedirs(repo.directory)
SetLastBuildState(root, build_state)
@StageDecorator
def InitialCheckout(repo):
"""Preliminary ChromeOS checkout.
Perform a complete checkout of ChromeOS on the specified branch. This does NOT
match what the build needs, but ensures the buildroot both has a 'hot'
checkout, and is close enough that the branched cbuildbot can successfully get
the right checkout.
This checks out full ChromeOS, even if a ChromiumOS build is going to be
performed. This is because we have no knowledge of the build config to be
used.
Args:
repo: repository.RepoRepository instance.
"""
logging.PrintBuildbotStepText('Branch: %s' % repo.branch)
logging.info('Bootstrap script starting initial sync on branch: %s',
repo.branch)
repo.PreLoad('/preload/chromeos')
repo.Sync(detach=True)
def ShouldFixBotoCerts(options):
"""Decide if FixBotoCerts should be applied for this branch."""
try:
# Only apply to factory and firmware branches.
branch = options.branch or ''
prefix = branch.split('-')[0]
if prefix not in ('factory', 'firmware'):
return False
# Only apply to "old" branches.
if branch.endswith('.B'):
version = branch[:-2].split('-')[-1]
major = int(version.split('.')[0])
return major <= 9667 # This is the newest known to be failing.
return False
except Exception as e:
logging.warning(' failed: %s', e)
# Conservatively continue without the fix.
return False
@StageDecorator
def Cbuildbot(buildroot, depot_tools_path, argv):
"""Start cbuildbot in specified directory with all arguments.
Args:
buildroot: Directory to be passed to cbuildbot with --buildroot.
depot_tools_path: Directory for depot_tools to be used by cbuildbot.
argv: Command line options passed to cbuildbot_launch.
Returns:
Return code of cbuildbot as an integer.
"""
logging.info('Bootstrap cbuildbot in: %s', buildroot)
# Fixup buildroot parameter.
argv = argv[:]
for i, arg in enumerate(argv):
if arg in ('-r', '--buildroot'):
argv[i + 1] = buildroot
# This filters out command line arguments not supported by older versions
# of cbuildbot.
parser = cbuildbot.CreateParser()
options = cbuildbot.ParseCommandLine(parser, argv)
cbuildbot_path = os.path.join(buildroot, 'chromite', 'bin', 'cbuildbot')
cmd = sync_stages.BootstrapStage.FilterArgsForTargetCbuildbot(
buildroot, cbuildbot_path, options)
# We want cbuildbot to use branched depot_tools scripts from our manifest,
# so that depot_tools is branched to match cbuildbot.
logging.info('Adding depot_tools into PATH: %s', depot_tools_path)
extra_env = {'PATH': PrependPath(depot_tools_path)}
# TODO(crbug.com/845304): Remove once underlying boto issues are resolved.
fix_boto = ShouldFixBotoCerts(options)
with boto_compat.FixBotoCerts(activate=fix_boto):
result = cros_build_lib.run(
cmd, extra_env=extra_env, check=False, cwd=buildroot)
return result.returncode
@StageDecorator
def CleanupChroot(buildroot):
"""Unmount/clean up an image-based chroot without deleting the backing image.
Args:
buildroot: Directory containing the chroot to be cleaned up.
"""
chroot_dir = os.path.join(buildroot, constants.DEFAULT_CHROOT_DIR)
logging.info('Cleaning up chroot at %s', chroot_dir)
if os.path.exists(chroot_dir) or os.path.exists(chroot_dir + '.img'):
try:
cros_sdk_lib.CleanupChrootMount(chroot_dir, delete=False)
except timeout_util.TimeoutError:
logging.exception('Cleaning up chroot timed out')
# Dump debug info to help https://crbug.com/1000034.
cros_build_lib.run(['mount'], check=True)
cros_build_lib.run(['uname', '-a'], check=True)
cros_build_lib.sudo_run(['losetup', '-a'], check=True)
cros_build_lib.run(['dmesg'], check=True)
logging.warning('Assuming the bot is going to reboot, so ignoring this '
'failure; see https://crbug.com/1000034')
# NB: We ignore errors at this point because this stage runs last. If the
# chroot failed to unmount, we're going to reboot the system once we're done,
# and that will implicitly take care of cleaning things up. If the bots stop
# rebooting after every run, we'll need to make this fatal all the time.
#
# TODO(crbug.com/1000034): This should be fatal all the time.
def ConfigureGlobalEnvironment():
"""Setup process wide environmental changes."""
# Set umask to 022 so files created by buildbot are readable.
os.umask(0o22)
# These variables can interfere with LANG / locale behavior.
unwanted_local_vars = [
'LC_ALL', 'LC_CTYPE', 'LC_COLLATE', 'LC_TIME', 'LC_NUMERIC',
'LC_MONETARY', 'LC_MESSAGES', 'LC_PAPER', 'LC_NAME', 'LC_ADDRESS',
'LC_TELEPHONE', 'LC_MEASUREMENT', 'LC_IDENTIFICATION',
]
for v in unwanted_local_vars:
os.environ.pop(v, None)
# This variable is required for repo sync's to work in all cases.
os.environ['LANG'] = 'en_US.UTF-8'
def _main(options, argv):
"""main method of script.
Args:
options: preparsed options object for the build.
argv: All command line arguments to pass as list of strings.
Returns:
Return code of cbuildbot as an integer.
"""
branchname = options.branch or 'master'
root = options.buildroot
buildroot = os.path.join(root, 'repository')
workspace = os.path.join(root, 'workspace')
depot_tools_path = os.path.join(buildroot, constants.DEPOT_TOOLS_SUBPATH)
# Does the entire build pass or fail.
with metrics.Presence(METRIC_ACTIVE), \
metrics.SuccessCounter(METRIC_COMPLETED) as s_fields:
# Preliminary set, mostly command line parsing.
with metrics.SuccessCounter(METRIC_INVOKED):
if options.enable_buildbot_tags:
logging.EnableBuildbotMarkers()
ConfigureGlobalEnvironment()
# Prepare the buildroot with source for the build.
with metrics.SuccessCounter(METRIC_PREP):
manifest_url = config_lib.GetSiteParams().MANIFEST_INT_URL
repo = repository.RepoRepository(manifest_url, buildroot,
branch=branchname,
git_cache_dir=options.git_cache_dir)
previous_build_state = GetLastBuildState(root)
# Clean up the buildroot to a safe state.
with metrics.SecondsTimer(METRIC_CLEAN):
build_state = GetCurrentBuildState(options, branchname)
CleanBuildRoot(root, repo, options.cache_dir, build_state)
# Get a checkout close enough to the branch that cbuildbot can handle it.
if options.sync:
with metrics.SecondsTimer(METRIC_INITIAL):
InitialCheckout(repo)
# Run cbuildbot inside the full ChromeOS checkout, on the specified branch.
with metrics.SecondsTimer(METRIC_CBUILDBOT), \
metrics.SecondsInstanceTimer(METRIC_CBUILDBOT_INSTANCE):
if previous_build_state.is_valid():
argv.append('--previous-build-state')
argv.append(base64.b64encode(previous_build_state.to_json().encode(
'utf-8')).decode('utf-8'))
argv.extend(['--workspace', workspace])
if not options.cache_dir_specified:
argv.extend(['--cache-dir', options.cache_dir])
result = Cbuildbot(buildroot, depot_tools_path, argv)
s_fields['success'] = (result == 0)
build_state.status = (
constants.BUILDER_STATUS_PASSED
if result == 0 else constants.BUILDER_STATUS_FAILED)
SetLastBuildState(root, build_state)
with metrics.SecondsTimer(METRIC_CHROOT_CLEANUP):
CleanupChroot(buildroot)
return result
def main(argv):
options = PreParseArguments(argv)
metric_fields = {
'branch_name': options.branch or 'master',
'build_config': options.build_config_name,
'tryjob': options.remote_trybot,
}
# Enable Monarch metrics gathering.
with ts_mon_config.SetupTsMonGlobalState('cbuildbot_launch',
common_metric_fields=metric_fields,
indirect=True):
return _main(options, argv)
|
|
# -*- coding: utf-8 -*-
# stationarity tests
from pmdarima.arima.stationarity import ADFTest, PPTest, KPSSTest
from pmdarima.arima.utils import ndiffs
from pmdarima.utils.array import diff
from pmdarima.datasets import load_austres
from sklearn.utils import check_random_state
from numpy.testing import assert_array_almost_equal, assert_almost_equal, \
assert_array_equal
import numpy as np
import pytest
# for testing rand of len 400 for m==365
random_state = check_random_state(42)
austres = load_austres()
def test_ndiffs_stationary():
# show that for a stationary vector, ndiffs returns 0
x = np.ones(10)
assert ndiffs(x, alpha=0.05, test='kpss', max_d=2) == 0
assert ndiffs(x, alpha=0.05, test='pp', max_d=2) == 0
assert ndiffs(x, alpha=0.05, test='adf', max_d=2) == 0
@pytest.mark.parametrize("cls", (KPSSTest, PPTest, ADFTest))
def test_embedding(cls):
x = np.arange(5)
expected = np.array([
[1, 2, 3, 4],
[0, 1, 2, 3]
])
assert_array_almost_equal(cls._embed(x, 2), expected)
y = np.array([1, -1, 0, 2, -1, -2, 3])
assert_array_almost_equal(cls._embed(y, 1),
np.array([
[1, -1, 0, 2, -1, -2, 3]
]))
assert_array_almost_equal(cls._embed(y, 2).T,
np.array([
[-1, 1],
[0, -1],
[2, 0],
[-1, 2],
[-2, -1],
[3, -2]
]))
assert_array_almost_equal(cls._embed(y, 3).T,
np.array([
[0, -1, 1],
[2, 0, -1],
[-1, 2, 0],
[-2, -1, 2],
[3, -2, -1]
]))
# Where K close to y dim
assert_array_almost_equal(cls._embed(y, 6).T,
np.array([
[-2, -1, 2, 0, -1, 1],
[3, -2, -1, 2, 0, -1]
]))
# Where k == y dim
assert_array_almost_equal(cls._embed(y, 7).T,
np.array([
[3, -2, -1, 2, 0, -1, 1]
]))
# Assert we fail when k > dim
with pytest.raises(ValueError):
cls._embed(y, 8)
def test_adf_ols():
# Test the _ols function of the ADF test
x = np.array([1, -1, 0, 2, -1, -2, 3])
k = 2
y = diff(x)
assert_array_equal(y, [-2, 1, 2, -3, -1, 5])
z = ADFTest._embed(y, k).T
res = ADFTest._ols(x, y, z, k)
# Assert on the params of the OLS. The comparisons are those obtained
# from the R function.
expected = np.array([1.0522, -3.1825, -0.1609, 1.4690])
assert np.allclose(res.params, expected, rtol=0.001)
# Now assert on the standard error
stat = ADFTest._ols_std_error(res)
assert np.allclose(stat, -100.2895) # derived from R code
def test_adf_p_value():
# Assert on the ADF test's p-value
p_val, do_diff = \
ADFTest(alpha=0.05).should_diff(np.array([1, -1, 0, 2, -1, -2, 3]))
assert np.isclose(p_val, 0.01)
assert not do_diff
@pytest.mark.parametrize('null', ('level', 'trend'))
def test_kpss(null):
test = KPSSTest(alpha=0.05, null=null, lshort=True)
pval, do_diff = test.should_diff(austres)
assert do_diff # show it is significant
assert_almost_equal(pval, 0.01)
# Test on the data provided in issue #67
x = np.array([1, -1, 0, 2, -1, -2, 3])
pval2, do_diff2 = test.should_diff(x)
# We expect Trend to be significant, but NOT Level
if null == 'level':
assert not do_diff2
assert_almost_equal(pval2, 0.1)
else:
assert do_diff2
assert_almost_equal(pval2, 0.01)
# test the ndiffs with the KPSS test
assert ndiffs(austres, test='kpss', max_d=5, null=null) == 2
def test_non_default_kpss():
test = KPSSTest(alpha=0.05, null='trend', lshort=False)
pval, do_diff = test.should_diff(austres)
assert do_diff # show it is significant
assert np.allclose(pval, 0.01, atol=0.005)
# test the ndiffs with the KPSS test
assert ndiffs(austres, test='kpss', max_d=2) == 2
def test_kpss_corner():
test = KPSSTest(alpha=0.05, null='something-else', lshort=True)
with pytest.raises(ValueError):
test.should_diff(austres)
def test_pp():
test = PPTest(alpha=0.05, lshort=True)
pval, do_diff = test.should_diff(austres)
assert do_diff
# Result from R code: 0.9786066
# > pp.test(austres, lshort=TRUE)$p.value
assert_almost_equal(pval, 0.9786066, decimal=5)
# test n diffs
assert ndiffs(austres, test='pp', max_d=2) == 1
# If we use lshort is FALSE, it will be different
test = PPTest(alpha=0.05, lshort=False)
pval, do_diff = test.should_diff(austres)
assert do_diff
# Result from R code: 0.9514589
# > pp.test(austres, lshort=FALSE)$p.value
assert_almost_equal(pval, 0.9514589, decimal=5)
assert ndiffs(austres, test='pp', max_d=2, lshort=False) == 1
def test_adf():
# Test where k = 1
test = ADFTest(alpha=0.05, k=1)
pval, do_diff = test.should_diff(austres)
# R's value: 0.8488036
# > adf.test(austres, k=1, alternative='stationary')$p.value
assert np.isclose(pval, 0.8488036)
assert do_diff
# Test for k = 2. R's value: 0.7060733
# > adf.test(austres, k=2, alternative='stationary')$p.value
test = ADFTest(alpha=0.05, k=2)
pval, do_diff = test.should_diff(austres)
assert np.isclose(pval, 0.7060733)
assert do_diff
# Test for k is None. R's value: 0.3493465
# > adf.test(austres, alternative='stationary')$p.value
test = ADFTest(alpha=0.05, k=None)
pval, do_diff = test.should_diff(austres)
assert np.isclose(pval, 0.3493465, rtol=0.0001)
assert do_diff
def test_adf_corner():
with pytest.raises(ValueError):
ADFTest(alpha=0.05, k=-1)
# show we can fit with k is None
test = ADFTest(alpha=0.05, k=None)
test.should_diff(austres)
def test_ndiffs_corner_cases():
with pytest.raises(ValueError):
ndiffs(austres, max_d=0)
def test_base_cases():
classes = (ADFTest, KPSSTest, PPTest)
for cls in classes:
instance = cls()
# Also show we get a warning with the deprecated func
with pytest.warns(DeprecationWarning):
p_val, is_stationary = instance.is_stationary(None)
# results of base-case
assert np.isnan(p_val)
assert not is_stationary
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Wed Dec 18 11:50:03 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(630, 687)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.splitter = QtGui.QSplitter(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.groupBoxPreview = QtGui.QGroupBox(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBoxPreview.sizePolicy().hasHeightForWidth())
self.groupBoxPreview.setSizePolicy(sizePolicy)
self.groupBoxPreview.setObjectName(_fromUtf8("groupBoxPreview"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBoxPreview)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.groupBox_2 = QtGui.QGroupBox(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.radioBrowser = QtGui.QRadioButton(self.groupBox_2)
self.radioBrowser.setChecked(True)
self.radioBrowser.setObjectName(_fromUtf8("radioBrowser"))
self.horizontalLayout_3.addWidget(self.radioBrowser)
self.radioRecent = QtGui.QRadioButton(self.groupBox_2)
self.radioRecent.setObjectName(_fromUtf8("radioRecent"))
self.horizontalLayout_3.addWidget(self.radioRecent)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.buttonBrowse = QtGui.QPushButton(self.groupBox_2)
self.buttonBrowse.setObjectName(_fromUtf8("buttonBrowse"))
self.verticalLayout.addWidget(self.buttonBrowse)
self.listFiles = QtGui.QListWidget(self.groupBox_2)
self.listFiles.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.listFiles.setProperty("showDropIndicator", False)
self.listFiles.setDragEnabled(True)
self.listFiles.setDragDropMode(QtGui.QAbstractItemView.NoDragDrop)
self.listFiles.setAlternatingRowColors(True)
self.listFiles.setViewMode(QtGui.QListView.ListMode)
self.listFiles.setObjectName(_fromUtf8("listFiles"))
self.verticalLayout.addWidget(self.listFiles)
self.verticalLayout_3.addWidget(self.splitter)
self.groupBox_3 = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.radioMerge = QtGui.QRadioButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radioMerge.sizePolicy().hasHeightForWidth())
self.radioMerge.setSizePolicy(sizePolicy)
self.radioMerge.setChecked(True)
self.radioMerge.setObjectName(_fromUtf8("radioMerge"))
self.horizontalLayout.addWidget(self.radioMerge)
self.radioConcat = QtGui.QRadioButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radioConcat.sizePolicy().hasHeightForWidth())
self.radioConcat.setSizePolicy(sizePolicy)
self.radioConcat.setObjectName(_fromUtf8("radioConcat"))
self.horizontalLayout.addWidget(self.radioConcat)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.horizontalLayout_2.setContentsMargins(-1, -1, 0, -1)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.widgetMerge = QtGui.QWidget(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetMerge.sizePolicy().hasHeightForWidth())
self.widgetMerge.setSizePolicy(sizePolicy)
self.widgetMerge.setObjectName(_fromUtf8("widgetMerge"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.widgetMerge)
self.verticalLayout_5.setMargin(0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.gridLayoutMergePages = QtGui.QGridLayout()
self.gridLayoutMergePages.setContentsMargins(-1, 0, -1, -1)
self.gridLayoutMergePages.setObjectName(_fromUtf8("gridLayoutMergePages"))
self.buttonRemoveFront = QtGui.QPushButton(self.widgetMerge)
self.buttonRemoveFront.setEnabled(False)
self.buttonRemoveFront.setObjectName(_fromUtf8("buttonRemoveFront"))
self.gridLayoutMergePages.addWidget(self.buttonRemoveFront, 1, 0, 1, 1)
self.buttonRemoveBack = QtGui.QPushButton(self.widgetMerge)
self.buttonRemoveBack.setEnabled(False)
self.buttonRemoveBack.setObjectName(_fromUtf8("buttonRemoveBack"))
self.gridLayoutMergePages.addWidget(self.buttonRemoveBack, 1, 1, 1, 1)
self.label = QtGui.QLabel(self.widgetMerge)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayoutMergePages.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.widgetMerge)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayoutMergePages.addWidget(self.label_2, 0, 1, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayoutMergePages)
self.buttonExecuteMerge = QtGui.QPushButton(self.widgetMerge)
self.buttonExecuteMerge.setEnabled(False)
self.buttonExecuteMerge.setObjectName(_fromUtf8("buttonExecuteMerge"))
self.verticalLayout_5.addWidget(self.buttonExecuteMerge)
self.horizontalLayout_2.addWidget(self.widgetMerge)
self.widgetConcat = QtGui.QWidget(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetConcat.sizePolicy().hasHeightForWidth())
self.widgetConcat.setSizePolicy(sizePolicy)
self.widgetConcat.setMinimumSize(QtCore.QSize(0, 0))
self.widgetConcat.setObjectName(_fromUtf8("widgetConcat"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.widgetConcat)
self.verticalLayout_6.setMargin(0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.label_3 = QtGui.QLabel(self.widgetConcat)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout_6.addWidget(self.label_3)
self.listWidget_3 = QtGui.QListWidget(self.widgetConcat)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidget_3.sizePolicy().hasHeightForWidth())
self.listWidget_3.setSizePolicy(sizePolicy)
self.listWidget_3.setObjectName(_fromUtf8("listWidget_3"))
self.verticalLayout_6.addWidget(self.listWidget_3)
self.buttonExecuteConcat = QtGui.QPushButton(self.widgetConcat)
self.buttonExecuteConcat.setEnabled(False)
self.buttonExecuteConcat.setObjectName(_fromUtf8("buttonExecuteConcat"))
self.verticalLayout_6.addWidget(self.buttonExecuteConcat)
self.horizontalLayout_2.addWidget(self.widgetConcat)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.verticalLayout_3.addWidget(self.groupBox_3)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 630, 18))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.radioBrowser, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.buttonBrowse.setVisible)
QtCore.QObject.connect(self.radioMerge, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.widgetMerge.setVisible)
QtCore.QObject.connect(self.radioConcat, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.widgetConcat.setVisible)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "pysoftwareduplex", None))
self.groupBoxPreview.setTitle(_translate("MainWindow", "Preview", None))
self.groupBox_2.setTitle(_translate("MainWindow", "Files", None))
self.radioBrowser.setText(_translate("MainWindow", "Browser", None))
self.radioRecent.setText(_translate("MainWindow", "Recent activities", None))
self.buttonBrowse.setText(_translate("MainWindow", "Browse...", None))
self.groupBox_3.setTitle(_translate("MainWindow", "Actions", None))
self.radioMerge.setText(_translate("MainWindow", "Merge/Zip", None))
self.radioConcat.setText(_translate("MainWindow", "Concatenate", None))
self.buttonRemoveFront.setText(_translate("MainWindow", "Remove", None))
self.buttonRemoveBack.setText(_translate("MainWindow", "Remove", None))
self.label.setText(_translate("MainWindow", "Front Pages Document", None))
self.label_2.setText(_translate("MainWindow", "Back Pages Document", None))
self.buttonExecuteMerge.setText(_translate("MainWindow", "Execute...", None))
self.label_3.setText(_translate("MainWindow", "Documents", None))
self.buttonExecuteConcat.setText(_translate("MainWindow", "Execute...", None))
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import base64
import fileinput
import getpass
import stat
import tempfile
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
import re
import shutil
import urllib2
import time
import sys
import logging
from ambari_commons.exceptions import FatalException, NonFatalException
from ambari_commons.logging_utils import print_warning_msg, print_error_msg, print_info_msg, get_verbose
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import is_root, set_file_permissions, \
run_os_command, search_file, is_valid_filepath, change_owner, get_ambari_repo_file_full_name, get_file_owner
from ambari_server.serverConfiguration import configDefaults, parse_properties_file, \
encrypt_password, find_jdk, find_properties_file, get_alias_string, get_ambari_properties, get_conf_dir, \
get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, get_db_type, write_property, \
get_original_master_key, get_value_from_properties, get_java_exe_path, is_alias_string, read_ambari_user, \
read_passwd_for_alias, remove_password_file, save_passwd_for_alias, store_password_file, update_properties_2, \
BLIND_PASSWORD, BOOTSTRAP_DIR_PROPERTY, IS_LDAP_CONFIGURED, JDBC_PASSWORD_FILENAME, JDBC_PASSWORD_PROPERTY, \
JDBC_RCA_PASSWORD_ALIAS, JDBC_RCA_PASSWORD_FILE_PROPERTY, JDBC_USE_INTEGRATED_AUTH_PROPERTY, \
LDAP_MGR_PASSWORD_ALIAS, LDAP_MGR_PASSWORD_FILENAME, LDAP_MGR_PASSWORD_PROPERTY, LDAP_MGR_USERNAME_PROPERTY, \
LDAP_PRIMARY_URL_PROPERTY, SECURITY_IS_ENCRYPTION_ENABLED, SECURITY_KEY_ENV_VAR_NAME, SECURITY_KERBEROS_JASS_FILENAME, \
SECURITY_PROVIDER_KEY_CMD, SECURITY_MASTER_KEY_FILENAME, SSL_TRUSTSTORE_PASSWORD_ALIAS, \
SSL_TRUSTSTORE_PASSWORD_PROPERTY, SSL_TRUSTSTORE_PATH_PROPERTY, SSL_TRUSTSTORE_TYPE_PROPERTY, \
SSL_API, SSL_API_PORT, DEFAULT_SSL_API_PORT, CLIENT_API_PORT, JDK_NAME_PROPERTY, JCE_NAME_PROPERTY, JAVA_HOME_PROPERTY, \
get_resources_location, SECURITY_MASTER_KEY_LOCATION, SETUP_OR_UPGRADE_MSG, CHECK_AMBARI_KRB_JAAS_CONFIGURATION_PROPERTY
from ambari_server.serverUtils import is_server_runing, get_ambari_server_api_base
from ambari_server.setupActions import SETUP_ACTION, LDAP_SETUP_ACTION
from ambari_server.userInput import get_validated_string_input, get_prompt_default, read_password, get_YN_input, quit_if_has_answer
from ambari_server.serverClassPath import ServerClassPath
from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers, \
get_jdbc_driver_path, ensure_jdbc_driver_is_installed, LINUX_DBMS_KEYS_LIST
logger = logging.getLogger(__name__)
REGEX_IP_ADDRESS = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
REGEX_HOSTNAME = "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
REGEX_HOSTNAME_PORT = "^(.*:[0-9]{1,5}$)"
REGEX_TRUE_FALSE = "^(true|false)?$"
REGEX_SKIP_CONVERT = "^(skip|convert)?$"
REGEX_REFERRAL = "^(follow|ignore)?$"
REGEX_ANYTHING = ".*"
LDAP_TO_PAM_MIGRATION_HELPER_CMD = "{0} -cp {1} " + \
"org.apache.ambari.server.security.authentication.LdapToPamMigrationHelper" + \
" >> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
CLIENT_SECURITY_KEY = "client.security"
AUTO_GROUP_CREATION = "auto.group.creation"
SERVER_API_LDAP_URL = 'ldap_sync_events'
PAM_CONFIG_FILE = 'pam.configuration'
def read_master_key(isReset=False, options = None):
passwordPattern = ".*"
passwordPrompt = "Please provide master key for locking the credential store: "
passwordDescr = "Invalid characters in password. Use only alphanumeric or "\
"_ or - characters"
passwordDefault = ""
if isReset:
passwordPrompt = "Enter new Master Key: "
input = True
while(input):
masterKey = get_validated_string_input(passwordPrompt, passwordDefault, passwordPattern, passwordDescr,
True, True, answer = options.master_key)
if not masterKey:
print "Master Key cannot be empty!"
continue
masterKey2 = get_validated_string_input("Re-enter master key: ", passwordDefault, passwordPattern, passwordDescr,
True, True, answer = options.master_key)
if masterKey != masterKey2:
print "Master key did not match!"
continue
input = False
return masterKey
def save_master_key(options, master_key, key_location, persist=True):
if master_key:
jdk_path = find_jdk()
if jdk_path is None:
print_error_msg("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
return 1
serverClassPath = ServerClassPath(get_ambari_properties(), options)
command = SECURITY_PROVIDER_KEY_CMD.format(get_java_exe_path(),
serverClassPath.get_full_ambari_classpath_escaped_for_shell(), master_key, key_location, persist)
(retcode, stdout, stderr) = run_os_command(command)
print_info_msg("Return code from credential provider save KEY: " +
str(retcode))
else:
print_error_msg("Master key cannot be None.")
def adjust_directory_permissions(ambari_user):
properties = get_ambari_properties()
bootstrap_dir = os.path.abspath(get_value_from_properties(properties, BOOTSTRAP_DIR_PROPERTY))
print_info_msg("Cleaning bootstrap directory ({0}) contents...".format(bootstrap_dir))
if os.path.exists(bootstrap_dir):
shutil.rmtree(bootstrap_dir) #Ignore the non-existent dir error
if not os.path.exists(bootstrap_dir):
try:
os.makedirs(bootstrap_dir)
except Exception, ex:
print_warning_msg("Failed recreating the bootstrap directory: {0}".format(str(ex)))
pass
else:
print_warning_msg("Bootstrap directory lingering around after 5s. Unable to complete the cleanup.")
pass
# Add master key and credential store if exists
keyLocation = get_master_key_location(properties)
masterKeyFile = search_file(SECURITY_MASTER_KEY_FILENAME, keyLocation)
if masterKeyFile:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((masterKeyFile, configDefaults.MASTER_KEY_FILE_PERMISSIONS, "{0}", False))
credStoreFile = get_credential_store_location(properties)
if os.path.exists(credStoreFile):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((credStoreFile, configDefaults.CREDENTIALS_STORE_FILE_PERMISSIONS, "{0}", False))
trust_store_location = properties[SSL_TRUSTSTORE_PATH_PROPERTY]
if trust_store_location:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((trust_store_location, configDefaults.TRUST_STORE_LOCATION_PERMISSIONS, "{0}", False))
# Update JDK and JCE permissions
resources_dir = get_resources_location(properties)
jdk_file_name = properties.get_property(JDK_NAME_PROPERTY)
jce_file_name = properties.get_property(JCE_NAME_PROPERTY)
java_home = properties.get_property(JAVA_HOME_PROPERTY)
if jdk_file_name:
jdk_file_path = os.path.abspath(os.path.join(resources_dir, jdk_file_name))
if(os.path.exists(jdk_file_path)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_file_path, "644", "{0}", False))
if jce_file_name:
jce_file_path = os.path.abspath(os.path.join(resources_dir, jce_file_name))
if(os.path.exists(jce_file_path)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jce_file_path, "644", "{0}", False))
if java_home:
jdk_security_dir = os.path.abspath(os.path.join(java_home, configDefaults.JDK_SECURITY_DIR))
if(os.path.exists(jdk_security_dir)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_security_dir + "/*", "644", "{0}", True))
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_security_dir, "755", "{0}", False))
# Grant read permissions to all users. This is required when a non-admin user is configured to setup ambari-server.
# However, do not change ownership of the repo file to ambari user.
ambari_repo_file = get_ambari_repo_file_full_name()
if ambari_repo_file:
if (os.path.exists(ambari_repo_file)):
ambari_repo_file_owner = get_file_owner(ambari_repo_file)
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((ambari_repo_file, "644", ambari_repo_file_owner, False))
print "Adjusting ambari-server permissions and ownership..."
for pack in configDefaults.NR_ADJUST_OWNERSHIP_LIST:
file = pack[0]
mod = pack[1]
user = pack[2].format(ambari_user)
recursive = pack[3]
print_info_msg("Setting file permissions: {0} {1} {2} {3}".format(file, mod, user, recursive))
set_file_permissions(file, mod, user, recursive)
for pack in configDefaults.NR_CHANGE_OWNERSHIP_LIST:
path = pack[0]
user = pack[1].format(ambari_user)
recursive = pack[2]
print_info_msg("Changing ownership: {0} {1} {2}".format(path, user, recursive))
change_owner(path, user, recursive)
def configure_ldap_password(options):
passwordDefault = ""
passwordPrompt = 'Enter Manager Password* : '
passwordPattern = ".*"
passwordDescr = "Invalid characters in password."
password = read_password(passwordDefault, passwordPattern, passwordPrompt, passwordDescr, options.ldap_manager_password)
return password
#
# Get the principal names from the given CSV file and set them on the given LDAP event specs.
#
def get_ldap_event_spec_names(file, specs, new_specs):
try:
if os.path.exists(file):
new_spec = new_specs[0]
with open(file, 'r') as names_file:
names = names_file.read()
new_spec['names'] = names.replace('\n', '').replace('\t', '')
names_file.close()
specs += new_specs
else:
err = 'Sync event creation failed. File ' + file + ' not found.'
raise FatalException(1, err)
except Exception as exception:
err = 'Caught exception reading file ' + file + ' : ' + str(exception)
raise FatalException(1, err)
class LdapSyncOptions:
def __init__(self, options):
try:
self.ldap_sync_all = options.ldap_sync_all
except AttributeError:
self.ldap_sync_all = False
try:
self.ldap_sync_existing = options.ldap_sync_existing
except AttributeError:
self.ldap_sync_existing = False
try:
self.ldap_sync_users = options.ldap_sync_users
except AttributeError:
self.ldap_sync_users = None
try:
self.ldap_sync_groups = options.ldap_sync_groups
except AttributeError:
self.ldap_sync_groups = None
try:
self.ldap_sync_admin_name = options.ldap_sync_admin_name
except AttributeError:
self.ldap_sync_admin_name = None
try:
self.ldap_sync_admin_password = options.ldap_sync_admin_password
except AttributeError:
self.ldap_sync_admin_password = None
def no_ldap_sync_options_set(self):
return not self.ldap_sync_all and not self.ldap_sync_existing and self.ldap_sync_users is None and self.ldap_sync_groups is None
#
# Sync users and groups with configured LDAP
#
def sync_ldap(options):
logger.info("Sync users and groups with configured LDAP.")
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'pam':
err = "PAM is configured. Can not sync LDAP."
raise FatalException(1, err)
server_status, pid = is_server_runing()
if not server_status:
err = 'Ambari Server is not running.'
raise FatalException(1, err)
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
ldap_configured = properties.get_property(IS_LDAP_CONFIGURED)
if ldap_configured != 'true':
err = "LDAP is not configured. Run 'ambari-server setup-ldap' first."
raise FatalException(1, err)
# set ldap sync options
ldap_sync_options = LdapSyncOptions(options)
if ldap_sync_options.no_ldap_sync_options_set():
err = 'Must specify a sync option (all, existing, users or groups). Please invoke ambari-server.py --help to print the options.'
raise FatalException(1, err)
admin_login = ldap_sync_options.ldap_sync_admin_name\
if ldap_sync_options.ldap_sync_admin_name is not None and ldap_sync_options.ldap_sync_admin_name \
else get_validated_string_input(prompt="Enter Ambari Admin login: ", default=None,
pattern=None, description=None,
is_pass=False, allowEmpty=False)
admin_password = ldap_sync_options.ldap_sync_admin_password \
if ldap_sync_options.ldap_sync_admin_password is not None and ldap_sync_options.ldap_sync_admin_password \
else get_validated_string_input(prompt="Enter Ambari Admin password: ", default=None,
pattern=None, description=None,
is_pass=True, allowEmpty=False)
url = get_ambari_server_api_base(properties) + SERVER_API_LDAP_URL
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
if ldap_sync_options.ldap_sync_all:
sys.stdout.write('Syncing all.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"all"},{"principal_type":"groups","sync_type":"all"}]}}]
elif ldap_sync_options.ldap_sync_existing:
sys.stdout.write('Syncing existing.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"existing"},{"principal_type":"groups","sync_type":"existing"}]}}]
else:
sys.stdout.write('Syncing specified users and groups.')
bodies = [{"Event":{"specs":[]}}]
body = bodies[0]
events = body['Event']
specs = events['specs']
if ldap_sync_options.ldap_sync_users is not None:
new_specs = [{"principal_type":"users","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_users, specs, new_specs)
if ldap_sync_options.ldap_sync_groups is not None:
new_specs = [{"principal_type":"groups","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_groups, specs, new_specs)
if get_verbose():
sys.stdout.write('\nCalling API ' + url + ' : ' + str(bodies) + '\n')
request.add_data(json.dumps(bodies))
request.get_method = lambda: 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
err = 'Sync event creation failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 201:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
url = response_body['resources'][0]['href']
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
body = [{"LDAP":{"synced_groups":"*","synced_users":"*"}}]
request.add_data(json.dumps(body))
request.get_method = lambda: 'GET'
request_in_progress = True
while request_in_progress:
sys.stdout.write('.')
sys.stdout.flush()
try:
response = urllib2.urlopen(request)
except Exception as e:
request_in_progress = False
err = 'Sync event check failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 200:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
sync_info = response_body['Event']
if sync_info['status'] == 'ERROR':
raise FatalException(1, str(sync_info['status_detail']))
elif sync_info['status'] == 'COMPLETE':
print '\n\nCompleted LDAP Sync.'
print 'Summary:'
for principal_type, summary in sync_info['summary'].iteritems():
print ' {0}:'.format(principal_type)
for action, amount in summary.iteritems():
print ' {0} = {1!s}'.format(action, amount)
request_in_progress = False
else:
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
def setup_master_key(options):
if not is_root():
warn = 'ambari-server setup-https is run as ' \
'non-root user, some sudo privileges might be required'
print warn
properties = get_ambari_properties()
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
db_windows_auth_prop = properties.get_property(JDBC_USE_INTEGRATED_AUTH_PROPERTY)
db_sql_auth = False if db_windows_auth_prop and db_windows_auth_prop.lower() == 'true' else True
db_password = properties.get_property(JDBC_PASSWORD_PROPERTY)
# Encrypt passwords cannot be called before setup
if db_sql_auth and not db_password:
print 'Please call "setup" before "encrypt-passwords". Exiting...'
return 1
# Check configuration for location of master key
isSecure = get_is_secure(properties)
(isPersisted, masterKeyFile) = get_is_persisted(properties)
# Read clear text DB password from file
if db_sql_auth and not is_alias_string(db_password) and os.path.isfile(db_password):
with open(db_password, 'r') as passwdfile:
db_password = passwdfile.read()
ldap_password = properties.get_property(LDAP_MGR_PASSWORD_PROPERTY)
if ldap_password:
# Read clear text LDAP password from file
if not is_alias_string(ldap_password) and os.path.isfile(ldap_password):
with open(ldap_password, 'r') as passwdfile:
ldap_password = passwdfile.read()
ts_password = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
resetKey = False
masterKey = None
if isSecure:
print "Password encryption is enabled."
resetKey = True if options.security_option is not None else get_YN_input("Do you want to reset Master Key? [y/n] (n): ", False)
# For encrypting of only unencrypted passwords without resetting the key ask
# for master key if not persisted.
if isSecure and not isPersisted and not resetKey:
print "Master Key not persisted."
masterKey = get_original_master_key(properties, options)
pass
# Make sure both passwords are clear-text if master key is lost
if resetKey:
if not isPersisted:
print "Master Key not persisted."
masterKey = get_original_master_key(properties, options)
# Unable get the right master key or skipped question <enter>
if not masterKey:
print "To disable encryption, do the following:"
print "- Edit " + find_properties_file() + \
" and set " + SECURITY_IS_ENCRYPTION_ENABLED + " = " + "false."
err = "{0} is already encrypted. Please call {1} to store unencrypted" \
" password and call 'encrypt-passwords' again."
if db_sql_auth and db_password and is_alias_string(db_password):
print err.format('- Database password', "'" + SETUP_ACTION + "'")
if ldap_password and is_alias_string(ldap_password):
print err.format('- LDAP manager password', "'" + LDAP_SETUP_ACTION + "'")
if ts_password and is_alias_string(ts_password):
print err.format('TrustStore password', "'" + LDAP_SETUP_ACTION + "'")
return 1
pass
pass
pass
# Read back any encrypted passwords
if db_sql_auth and db_password and is_alias_string(db_password):
db_password = read_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, masterKey)
if ldap_password and is_alias_string(ldap_password):
ldap_password = read_passwd_for_alias(LDAP_MGR_PASSWORD_ALIAS, masterKey)
if ts_password and is_alias_string(ts_password):
ts_password = read_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, masterKey)
# Read master key, if non-secure or reset is true
if resetKey or not isSecure:
masterKey = read_master_key(resetKey, options)
persist = get_YN_input("Do you want to persist master key. If you choose " \
"not to persist, you need to provide the Master " \
"Key while starting the ambari server as an env " \
"variable named " + SECURITY_KEY_ENV_VAR_NAME + \
" or the start will prompt for the master key."
" Persist [y/n] (y)? ", True, options.master_key_persist)
if persist:
save_master_key(options, masterKey, get_master_key_location(properties) + os.sep +
SECURITY_MASTER_KEY_FILENAME, persist)
elif not persist and masterKeyFile:
try:
os.remove(masterKeyFile)
print_info_msg("Deleting master key file at location: " + str(
masterKeyFile))
except Exception, e:
print 'ERROR: Could not remove master key file. %s' % e
# Blow up the credential store made with previous key, if any
store_file = get_credential_store_location(properties)
if os.path.exists(store_file):
try:
os.remove(store_file)
except:
print_warning_msg("Failed to remove credential store file.")
pass
pass
pass
propertyMap = {SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
# Encrypt only un-encrypted passwords
if db_password and not is_alias_string(db_password):
retCode = save_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, db_password, masterKey)
if retCode != 0:
print 'Failed to save secure database password.'
else:
propertyMap[JDBC_PASSWORD_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
remove_password_file(JDBC_PASSWORD_FILENAME)
if properties.get_property(JDBC_RCA_PASSWORD_FILE_PROPERTY):
propertyMap[JDBC_RCA_PASSWORD_FILE_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
pass
if ldap_password and not is_alias_string(ldap_password):
retCode = save_passwd_for_alias(LDAP_MGR_PASSWORD_ALIAS, ldap_password, masterKey)
if retCode != 0:
print 'Failed to save secure LDAP password.'
else:
propertyMap[LDAP_MGR_PASSWORD_PROPERTY] = get_alias_string(LDAP_MGR_PASSWORD_ALIAS)
remove_password_file(LDAP_MGR_PASSWORD_FILENAME)
pass
if ts_password and not is_alias_string(ts_password):
retCode = save_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, masterKey)
if retCode != 0:
print 'Failed to save secure TrustStore password.'
else:
propertyMap[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS)
pass
update_properties_2(properties, propertyMap)
# Since files for store and master are created we need to ensure correct
# permissions
ambari_user = read_ambari_user()
if ambari_user:
adjust_directory_permissions(ambari_user)
return 0
def setup_ambari_krb5_jaas(options):
jaas_conf_file = search_file(SECURITY_KERBEROS_JASS_FILENAME, get_conf_dir())
if os.path.exists(jaas_conf_file):
print 'Setting up Ambari kerberos JAAS configuration to access ' + \
'secured Hadoop daemons...'
principal = get_validated_string_input('Enter ambari server\'s kerberos '
'principal name ([email protected]): ', '[email protected]', '.*', '', False,
False, answer = options.jaas_principal)
keytab = get_validated_string_input('Enter keytab path for ambari '
'server\'s kerberos principal: ',
'/etc/security/keytabs/ambari.keytab', '.*', False, False,
validatorFunction=is_valid_filepath, answer = options.jaas_keytab)
for line in fileinput.FileInput(jaas_conf_file, inplace=1):
line = re.sub('keyTab=.*$', 'keyTab="' + keytab + '"', line)
line = re.sub('principal=.*$', 'principal="' + principal + '"', line)
print line,
write_property(CHECK_AMBARI_KRB_JAAS_CONFIGURATION_PROPERTY, "true")
else:
raise NonFatalException('No jaas config file found at location: ' +
jaas_conf_file)
class LdapPropTemplate:
def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
self.prop_name = i_prop_name
self.option = i_option
self.ldap_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
self.ldap_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.ldap_prop_name))
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def init_ldap_properties_list_reqd(properties, options):
# python2.x dict is not ordered
ldap_properties = [
LdapPropTemplate(properties, options.ldap_url, "authentication.ldap.primaryUrl", "Primary URL* {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, False),
LdapPropTemplate(properties, options.ldap_secondary_url, "authentication.ldap.secondaryUrl", "Secondary URL {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, True),
LdapPropTemplate(properties, options.ldap_ssl, "authentication.ldap.useSSL", "Use SSL* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
LdapPropTemplate(properties, options.ldap_user_attr, "authentication.ldap.usernameAttribute", "User name attribute* {0}: ", REGEX_ANYTHING, False, "uid"),
LdapPropTemplate(properties, options.ldap_base_dn, "authentication.ldap.baseDn", "Base DN* {0}: ", REGEX_ANYTHING, False),
LdapPropTemplate(properties, options.ldap_referral, "authentication.ldap.referral", "Referral method [follow/ignore] {0}: ", REGEX_REFERRAL, True),
LdapPropTemplate(properties, options.ldap_bind_anonym, "authentication.ldap.bindAnonymously" "Bind anonymously* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false")
]
return ldap_properties
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def init_ldap_properties_list_reqd(properties, options):
ldap_properties = [
LdapPropTemplate(properties, options.ldap_url, LDAP_PRIMARY_URL_PROPERTY, "Primary URL* {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, False),
LdapPropTemplate(properties, options.ldap_secondary_url, "authentication.ldap.secondaryUrl", "Secondary URL {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, True),
LdapPropTemplate(properties, options.ldap_ssl, "authentication.ldap.useSSL", "Use SSL* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
LdapPropTemplate(properties, options.ldap_user_class, "authentication.ldap.userObjectClass", "User object class* {0}: ", REGEX_ANYTHING, False, "posixAccount"),
LdapPropTemplate(properties, options.ldap_user_attr, "authentication.ldap.usernameAttribute", "User name attribute* {0}: ", REGEX_ANYTHING, False, "uid"),
LdapPropTemplate(properties, options.ldap_group_class, "authentication.ldap.groupObjectClass", "Group object class* {0}: ", REGEX_ANYTHING, False, "posixGroup"),
LdapPropTemplate(properties, options.ldap_group_attr, "authentication.ldap.groupNamingAttr", "Group name attribute* {0}: ", REGEX_ANYTHING, False, "cn"),
LdapPropTemplate(properties, options.ldap_member_attr, "authentication.ldap.groupMembershipAttr", "Group member attribute* {0}: ", REGEX_ANYTHING, False, "memberUid"),
LdapPropTemplate(properties, options.ldap_dn, "authentication.ldap.dnAttribute", "Distinguished name attribute* {0}: ", REGEX_ANYTHING, False, "dn"),
LdapPropTemplate(properties, options.ldap_base_dn, "authentication.ldap.baseDn", "Base DN* {0}: ", REGEX_ANYTHING, False),
LdapPropTemplate(properties, options.ldap_referral, "authentication.ldap.referral", "Referral method [follow/ignore] {0}: ", REGEX_REFERRAL, True),
LdapPropTemplate(properties, options.ldap_bind_anonym, "authentication.ldap.bindAnonymously", "Bind anonymously* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
LdapPropTemplate(properties, options.ldap_sync_username_collisions_behavior, "ldap.sync.username.collision.behavior", "Handling behavior for username collisions [convert/skip] for LDAP sync* {0}: ", REGEX_SKIP_CONVERT, False, "convert"),
]
return ldap_properties
def setup_ldap(options):
logger.info("Setup LDAP.")
if not is_root():
err = 'Ambari-server setup-ldap should be run with ' \
'root-level privileges'
raise FatalException(4, err)
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'pam':
query = "PAM is currently configured, do you wish to use LDAP instead [y/n] (n)? "
if get_YN_input(query, False):
pass
else:
err = "PAM is configured. Can not setup LDAP."
raise FatalException(1, err)
isSecure = get_is_secure(properties)
ldap_property_list_reqd = init_ldap_properties_list_reqd(properties, options)
ldap_property_list_opt = ["authentication.ldap.managerDn",
LDAP_MGR_PASSWORD_PROPERTY,
SSL_TRUSTSTORE_TYPE_PROPERTY,
SSL_TRUSTSTORE_PATH_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY]
ldap_property_list_truststore=[SSL_TRUSTSTORE_TYPE_PROPERTY,
SSL_TRUSTSTORE_PATH_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY]
ldap_property_list_passwords=[LDAP_MGR_PASSWORD_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY]
LDAP_MGR_DN_DEFAULT = get_value_from_properties(properties, ldap_property_list_opt[0])
SSL_TRUSTSTORE_TYPE_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_TYPE_PROPERTY, "jks")
SSL_TRUSTSTORE_PATH_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_PATH_PROPERTY)
ldap_property_value_map = {}
for ldap_prop in ldap_property_list_reqd:
input = get_validated_string_input(ldap_prop.ldap_prop_val_prompt, ldap_prop.ldap_prop_name, ldap_prop.prompt_regex,
"Invalid characters in the input!", False, ldap_prop.allow_empty_prompt,
answer = ldap_prop.option)
if input is not None and input != "":
ldap_property_value_map[ldap_prop.prop_name] = input
bindAnonymously = ldap_property_value_map["authentication.ldap.bindAnonymously"]
anonymous = (bindAnonymously and bindAnonymously.lower() == 'true')
mgr_password = None
# Ask for manager credentials only if bindAnonymously is false
if not anonymous:
username = get_validated_string_input("Manager DN* {0}: ".format(
get_prompt_default(LDAP_MGR_DN_DEFAULT)), LDAP_MGR_DN_DEFAULT, ".*",
"Invalid characters in the input!", False, False, answer = options.ldap_manager_dn)
ldap_property_value_map[LDAP_MGR_USERNAME_PROPERTY] = username
mgr_password = configure_ldap_password(options)
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = mgr_password
useSSL = ldap_property_value_map["authentication.ldap.useSSL"]
ldaps = (useSSL and useSSL.lower() == 'true')
ts_password = None
if ldaps:
truststore_default = "n"
truststore_set = bool(SSL_TRUSTSTORE_PATH_DEFAULT)
if truststore_set:
truststore_default = "y"
custom_trust_store = True if options.trust_store_path is not None and options.trust_store_path else False
if not custom_trust_store:
custom_trust_store = get_YN_input("Do you want to provide custom TrustStore for Ambari [y/n] ({0})?".
format(truststore_default),
truststore_set)
if custom_trust_store:
ts_type = get_validated_string_input("TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(SSL_TRUSTSTORE_TYPE_DEFAULT)),
SSL_TRUSTSTORE_TYPE_DEFAULT, "^(jks|jceks|pkcs12)?$", "Wrong type", False, answer=options.trust_store_type)
ts_path = None
while True:
ts_path = get_validated_string_input("Path to TrustStore file {0}:".format(get_prompt_default(SSL_TRUSTSTORE_PATH_DEFAULT)),
SSL_TRUSTSTORE_PATH_DEFAULT, ".*", False, False, answer = options.trust_store_path)
if os.path.exists(ts_path):
break
else:
print 'File not found.'
hasAnswer = options.trust_store_path is not None and options.trust_store_path
quit_if_has_answer(hasAnswer)
ts_password = read_password("", ".*", "Password for TrustStore:", "Invalid characters in password", options.trust_store_password)
ldap_property_value_map[SSL_TRUSTSTORE_TYPE_PROPERTY] = ts_type
ldap_property_value_map[SSL_TRUSTSTORE_PATH_PROPERTY] = ts_path
ldap_property_value_map[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = ts_password
pass
elif properties.get_property(SSL_TRUSTSTORE_TYPE_PROPERTY):
print 'The TrustStore is already configured: '
print ' ' + SSL_TRUSTSTORE_TYPE_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_TYPE_PROPERTY)
print ' ' + SSL_TRUSTSTORE_PATH_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_PATH_PROPERTY)
print ' ' + SSL_TRUSTSTORE_PASSWORD_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
if get_YN_input("Do you want to remove these properties [y/n] (y)? ", True, options.trust_store_reconfigure):
properties.removeOldProp(SSL_TRUSTSTORE_TYPE_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PATH_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
pass
pass
print '=' * 20
print 'Review Settings'
print '=' * 20
for property in ldap_property_list_reqd:
if property in ldap_property_value_map:
print("%s: %s" % (property, ldap_property_value_map[property]))
for property in ldap_property_list_opt:
if ldap_property_value_map.has_key(property):
if property not in ldap_property_list_passwords:
print("%s: %s" % (property, ldap_property_value_map[property]))
else:
print("%s: %s" % (property, BLIND_PASSWORD))
save_settings = True if options.ldap_save_settings is not None else get_YN_input("Save settings [y/n] (y)? ", True)
if save_settings:
ldap_property_value_map[CLIENT_SECURITY_KEY] = 'ldap'
if isSecure:
if mgr_password:
encrypted_passwd = encrypt_password(LDAP_MGR_PASSWORD_ALIAS, mgr_password, options)
if mgr_password != encrypted_passwd:
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = encrypted_passwd
pass
if ts_password:
encrypted_passwd = encrypt_password(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, options)
if ts_password != encrypted_passwd:
ldap_property_value_map[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = encrypted_passwd
pass
pass
# Persisting values
ldap_property_value_map[IS_LDAP_CONFIGURED] = "true"
if mgr_password:
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = store_password_file(mgr_password, LDAP_MGR_PASSWORD_FILENAME)
update_properties_2(properties, ldap_property_value_map)
print 'Saving...done'
return 0
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def generate_env(options, ambari_user, current_user):
properties = get_ambari_properties()
isSecure = get_is_secure(properties)
(isPersisted, masterKeyFile) = get_is_persisted(properties)
environ = os.environ.copy()
# Need to handle master key not persisted scenario
if isSecure and not masterKeyFile:
prompt = False
masterKey = environ.get(SECURITY_KEY_ENV_VAR_NAME)
if masterKey is not None and masterKey != "":
pass
else:
keyLocation = environ.get(SECURITY_MASTER_KEY_LOCATION)
if keyLocation is not None:
try:
# Verify master key can be read by the java process
with open(keyLocation, 'r'):
pass
except IOError:
print_warning_msg("Cannot read Master key from path specified in "
"environemnt.")
prompt = True
else:
# Key not provided in the environment
prompt = True
if prompt:
import pwd
masterKey = get_original_master_key(properties)
environ[SECURITY_KEY_ENV_VAR_NAME] = masterKey
tempDir = tempfile.gettempdir()
tempFilePath = tempDir + os.sep + "masterkey"
save_master_key(options, masterKey, tempFilePath, True)
if ambari_user != current_user:
uid = pwd.getpwnam(ambari_user).pw_uid
gid = pwd.getpwnam(ambari_user).pw_gid
os.chown(tempFilePath, uid, gid)
else:
os.chmod(tempFilePath, stat.S_IREAD | stat.S_IWRITE)
if tempFilePath is not None:
environ[SECURITY_MASTER_KEY_LOCATION] = tempFilePath
return environ
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def generate_env(options, ambari_user, current_user):
return os.environ.copy()
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def ensure_can_start_under_current_user(ambari_user):
#Ignore the requirement to run as root. In Windows, by default the child process inherits the security context
# and the environment from the parent process.
return ""
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def ensure_can_start_under_current_user(ambari_user):
current_user = getpass.getuser()
if ambari_user is None:
err = "Unable to detect a system user for Ambari Server.\n" + SETUP_OR_UPGRADE_MSG
raise FatalException(1, err)
if current_user != ambari_user and not is_root():
err = "Unable to start Ambari Server as user {0}. Please either run \"ambari-server start\" " \
"command as root, as sudo or as user \"{1}\"".format(current_user, ambari_user)
raise FatalException(1, err)
return current_user
class PamPropTemplate:
def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
self.prop_name = i_prop_name
self.option = i_option
self.pam_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
self.pam_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.pam_prop_name))
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
def init_pam_properties_list_reqd(properties, options):
properties = [
PamPropTemplate(properties, options.pam_config_file, PAM_CONFIG_FILE, "PAM configuration file* {0}: ", REGEX_ANYTHING, False, "/etc/pam.d/ambari"),
PamPropTemplate(properties, options.pam_auto_create_groups, AUTO_GROUP_CREATION, "Do you want to allow automatic group creation* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
]
return properties
def setup_pam(options):
if not is_root():
err = 'Ambari-server setup-pam should be run with root-level privileges'
raise FatalException(4, err)
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'ldap':
query = "LDAP is currently configured, do you wish to use PAM instead [y/n] (n)? "
if get_YN_input(query, False):
pass
else:
err = "LDAP is configured. Can not setup PAM."
raise FatalException(1, err)
pam_property_list_reqd = init_pam_properties_list_reqd(properties, options)
pam_property_value_map = {}
pam_property_value_map[CLIENT_SECURITY_KEY] = 'pam'
for pam_prop in pam_property_list_reqd:
input = get_validated_string_input(pam_prop.pam_prop_val_prompt, pam_prop.pam_prop_name, pam_prop.prompt_regex,
"Invalid characters in the input!", False, pam_prop.allow_empty_prompt,
answer = pam_prop.option)
if input is not None and input != "":
pam_property_value_map[pam_prop.prop_name] = input
# Verify that the PAM config file exists, else show warning...
pam_config_file = pam_property_value_map[PAM_CONFIG_FILE]
if not os.path.exists(pam_config_file):
print_warning_msg("The PAM configuration file, {0} does not exist. " \
"Please create it before restarting Ambari.".format(pam_config_file))
update_properties_2(properties, pam_property_value_map)
print 'Saving...done'
return 0
#
# Migration of LDAP users & groups to PAM
#
def migrate_ldap_pam(args):
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") != 'pam':
err = "PAM is not configured. Please configure PAM authentication first."
raise FatalException(1, err)
db_title = get_db_type(properties).title
confirm = get_YN_input("Ambari Server configured for %s. Confirm "
"you have made a backup of the Ambari Server database [y/n] (y)? " % db_title, True)
if not confirm:
print_error_msg("Database backup is not confirmed")
return 1
jdk_path = get_java_exe_path()
if jdk_path is None:
print_error_msg("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
return 1
# At this point, the args does not have the ambari database information.
# Augment the args with the correct ambari database information
parse_properties_file(args)
ensure_jdbc_driver_is_installed(args, properties)
print 'Migrating LDAP Users & Groups to PAM'
serverClassPath = ServerClassPath(properties, args)
class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell()
command = LDAP_TO_PAM_MIGRATION_HELPER_CMD.format(jdk_path, class_path)
ambari_user = read_ambari_user()
current_user = ensure_can_start_under_current_user(ambari_user)
environ = generate_env(args, ambari_user, current_user)
(retcode, stdout, stderr) = run_os_command(command, env=environ)
print_info_msg("Return code from LDAP to PAM migration command, retcode = " + str(retcode))
if stdout:
print "Console output from LDAP to PAM migration command:"
print stdout
print
if stderr:
print "Error output from LDAP to PAM migration command:"
print stderr
print
if retcode > 0:
print_error_msg("Error executing LDAP to PAM migration, please check the server logs.")
else:
print_info_msg('LDAP to PAM migration completed')
return retcode
|
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
try:
# Django >= 1.7
from django.test import override_settings
except ImportError:
# Django <= 1.6
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.timezone import utc, localtime
from django.utils import timezone
import pytz
from notifications import notify
from notifications.models import Notification
from notifications.utils import id2slug
class NotificationTest(TestCase):
@override_settings(USE_TZ=True)
@override_settings(TIME_ZONE='Asia/Shanghai')
def test_use_timezone(self):
from_user = User.objects.create(username="from", password="pwd", email="[email protected]")
to_user = User.objects.create(username="to", password="pwd", email="[email protected]")
notify.send(from_user, recipient=to_user, verb='commented', action_object=from_user)
notification = Notification.objects.get(recipient=to_user)
delta = timezone.now().replace(tzinfo=utc) - localtime(notification.timestamp,pytz.timezone(settings.TIME_ZONE))
self.assertTrue(delta.seconds < 60)
# The delta between the two events will still be less than a second despite the different timezones
# The call to now and the immediate call afterwards will be within a short period of time, not 8 hours as the test above was originally.
@override_settings(USE_TZ=False)
@override_settings(TIME_ZONE='Asia/Shanghai')
def test_disable_timezone(self):
from_user = User.objects.create(username="from2", password="pwd", email="[email protected]")
to_user = User.objects.create(username="to2", password="pwd", email="[email protected]")
notify.send(from_user, recipient=to_user, verb='commented', action_object=from_user)
notification = Notification.objects.get(recipient=to_user)
delta = timezone.now() - notification.timestamp
self.assertTrue(delta.seconds < 60)
class NotificationManagersTest(TestCase):
def setUp(self):
self.message_count = 10
self.from_user = User.objects.create(username="from2", password="pwd", email="[email protected]")
self.to_user = User.objects.create(username="to2", password="pwd", email="[email protected]")
for i in range(self.message_count):
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
def test_unread_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
n = Notification.objects.filter(recipient=self.to_user).first()
n.mark_as_read()
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
for n in Notification.objects.unread():
self.assertTrue(n.unread)
def test_read_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
n = Notification.objects.filter(recipient=self.to_user).first()
n.mark_as_read()
self.assertEqual(Notification.objects.read().count(),1)
for n in Notification.objects.read():
self.assertFalse(n.unread)
def test_mark_all_as_read_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
self.assertEqual(Notification.objects.unread().count(),0)
def test_mark_all_as_unread_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
self.assertEqual(Notification.objects.unread().count(),0)
Notification.objects.filter(recipient=self.to_user).mark_all_as_unread()
self.assertEqual(Notification.objects.unread().count(), self.message_count)
def test_mark_all_deleted_manager_without_soft_delete(self):
self.assertRaises(ImproperlyConfigured, Notification.objects.active)
self.assertRaises(ImproperlyConfigured, Notification.objects.active)
self.assertRaises(ImproperlyConfigured, Notification.objects.mark_all_as_deleted)
self.assertRaises(ImproperlyConfigured, Notification.objects.mark_all_as_active)
@override_settings(NOTIFICATIONS_SOFT_DELETE=True)
def test_mark_all_deleted_manager(self):
n = Notification.objects.filter(recipient=self.to_user).first()
n.mark_as_read()
self.assertEqual(Notification.objects.read().count(), 1)
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
self.assertEqual(Notification.objects.active().count(), self.message_count)
self.assertEqual(Notification.objects.deleted().count(), 0)
Notification.objects.mark_all_as_deleted()
self.assertEqual(Notification.objects.read().count(), 0)
self.assertEqual(Notification.objects.unread().count(), 0)
self.assertEqual(Notification.objects.active().count(), 0)
self.assertEqual(Notification.objects.deleted().count(), self.message_count)
Notification.objects.mark_all_as_active()
self.assertEqual(Notification.objects.read().count(), 1)
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
self.assertEqual(Notification.objects.active().count(), self.message_count)
self.assertEqual(Notification.objects.deleted().count(), 0)
class NotificationTestPages(TestCase):
def setUp(self):
self.message_count = 10
self.from_user = User.objects.create_user(username="from", password="pwd", email="[email protected]")
self.to_user = User.objects.create_user(username="to", password="pwd", email="[email protected]")
self.to_user.is_staff = True
self.to_user.save()
for i in range(self.message_count):
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
def logout(self):
self.client.post(reverse('admin:logout')+'?next=/', {})
def login(self,username,password):
self.logout()
response = self.client.post(reverse('login'), {'username': username, 'password': password})
self.assertEqual(response.status_code,302)
return response
def test_all_messages_page(self):
self.login('to','pwd')
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code,200)
self.assertEqual(len(response.context['notifications']),len(self.to_user.notifications.all()))
def test_unread_messages_pages(self):
self.login('to','pwd')
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code,200)
self.assertEqual(len(response.context['notifications']),len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count)
for i,n in enumerate(self.to_user.notifications.all()):
if i%3 == 0:
response = self.client.get(reverse('notifications:mark_as_read',args=[id2slug(n.id)]))
self.assertEqual(response.status_code,302)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code,200)
self.assertEqual(len(response.context['notifications']),len(self.to_user.notifications.unread()))
self.assertTrue(len(response.context['notifications']) < self.message_count)
response = self.client.get(reverse('notifications:mark_all_as_read'))
self.assertRedirects(response,reverse('notifications:all'))
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(len(response.context['notifications']),len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']),0)
def test_next_pages(self):
self.login('to','pwd')
response = self.client.get(reverse('notifications:mark_all_as_read')+"?next="+reverse('notifications:unread'))
self.assertRedirects(response,reverse('notifications:unread'))
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:mark_as_read',args=[slug])+"?next="+reverse('notifications:unread'))
self.assertRedirects(response,reverse('notifications:unread'))
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:mark_as_unread',args=[slug])+"?next="+reverse('notifications:unread'))
self.assertRedirects(response,reverse('notifications:unread'))
def test_delete_messages_pages(self):
self.login('to', 'pwd')
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:delete', args=[slug]))
self.assertRedirects(response, reverse('notifications:all'))
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.all()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
@override_settings(NOTIFICATIONS_SOFT_DELETE=True)
def test_soft_delete_messages_manager(self):
self.login('to', 'pwd')
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:delete', args=[slug]))
self.assertRedirects(response, reverse('notifications:all'))
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.active()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
|
|
# -*- coding: utf-8 -*-
#
# GUI and Layout created by: Qt Designer 4.8.7
# Code for UI-Elements generated by: PyQt4 UI code generator 4.11.4
#
import sys
import os
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import SIGNAL
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
import networkx as nx
from tsp_worker import Problem
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
PROBLEMS_DIR = os.path.join(ROOT_DIR, 'problems')
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Tsp(QtGui.QWidget):
def __init__(self):
self.problem = None
QtGui.QWidget.__init__(self)
self.setupUi(self)
def setupUi(self, TSP):
TSP.setObjectName(_fromUtf8("TSP"))
TSP.resize(1072, 761)
ten_font = QtGui.QFont()
ten_font.setPointSize(10)
bold_font = QtGui.QFont()
bold_font.setBold(True)
bold_font.setWeight(75)
bold_ten_font = QtGui.QFont()
bold_ten_font.setPointSize(10)
bold_ten_font.setBold(True)
bold_ten_font.setWeight(75)
self.verticalLayout_3 = QtGui.QVBoxLayout(TSP)
self.verticalLayout_3.setMargin(5)
self.verticalLayout_3.setSpacing(5)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.widget = QtGui.QWidget(TSP)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.widget)
self.verticalLayout_4.setMargin(5)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.iterationLabel = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.iterationLabel.sizePolicy().hasHeightForWidth())
self.iterationLabel.setSizePolicy(sizePolicy)
self.iterationLabel.setFont(bold_font)
self.iterationLabel.setAlignment(QtCore.Qt.AlignCenter)
self.iterationLabel.setObjectName(_fromUtf8("iterationLabel"))
self.gridLayout_4.addWidget(self.iterationLabel, 0, 2, 1, 1)
spacerItem = QtGui.QSpacerItem(80, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem, 0, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(60, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem1, 0, 4, 1, 1)
spacerItem2 = QtGui.QSpacerItem(100, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem2, 0, 8, 1, 1)
self.noimproveBox = QtGui.QSpinBox(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.noimproveBox.sizePolicy().hasHeightForWidth())
self.noimproveBox.setSizePolicy(sizePolicy)
self.noimproveBox.setMinimum(0)
self.noimproveBox.setMaximum(9999)
self.noimproveBox.setSingleStep(10)
self.noimproveBox.setProperty("value", 100)
self.noimproveBox.setObjectName(_fromUtf8("noimproveBox"))
self.gridLayout_4.addWidget(self.noimproveBox, 0, 7, 1, 1)
self.fileComboBox = QtGui.QComboBox(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fileComboBox.sizePolicy().hasHeightForWidth())
self.fileComboBox.setSizePolicy(sizePolicy)
self.fileComboBox.setObjectName(_fromUtf8("fileComboBox"))
self.gridLayout_4.addWidget(self.fileComboBox, 0, 0, 1, 1)
self.iterationBox = QtGui.QSpinBox(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.iterationBox.sizePolicy().hasHeightForWidth())
self.iterationBox.setSizePolicy(sizePolicy)
self.iterationBox.setMinimum(0)
self.iterationBox.setMaximum(999999)
self.iterationBox.setSingleStep(50)
self.iterationBox.setProperty("value", 400)
self.iterationBox.setObjectName(_fromUtf8("iterationBox"))
self.gridLayout_4.addWidget(self.iterationBox, 0, 3, 1, 1)
self.number_group=QtGui.QButtonGroup(self.widget)
self.radio_opt=QtGui.QRadioButton("local optimum")
self.radio_opt.setChecked(True)
#self.radio_opt.setFont(bold_font)
self.number_group.addButton(self.radio_opt)
self.radio_alt=QtGui.QRadioButton("No local improvement limit:")
#self.radio_alt.setFont(bold_font)
self.number_group.addButton(self.radio_alt)
self.gridLayout_4.addWidget(self.radio_opt, 0, 5, 1, 1)
self.gridLayout_4.addWidget(self.radio_alt, 0, 6, 1, 1)
self.runTsp_btn = QtGui.QPushButton(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.runTsp_btn.sizePolicy().hasHeightForWidth())
self.runTsp_btn.setSizePolicy(sizePolicy)
self.runTsp_btn.setObjectName(_fromUtf8("runTsp_btn"))
self.gridLayout_4.addWidget(self.runTsp_btn, 0, 9, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout_4)
self.line = QtGui.QFrame(self.widget)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout_4.addWidget(self.line)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.infoLabel = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.infoLabel.sizePolicy().hasHeightForWidth())
self.infoLabel.setSizePolicy(sizePolicy)
self.infoLabel.setFont(bold_ten_font)
self.infoLabel.setAlignment(QtCore.Qt.AlignCenter)
self.infoLabel.setObjectName(_fromUtf8("infoLabel"))
self.gridLayout.addWidget(self.infoLabel, 0, 4, 1, 1)
self.iterText = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.iterText.sizePolicy().hasHeightForWidth())
self.iterText.setSizePolicy(sizePolicy)
self.iterText.setFont(ten_font)
self.iterText.setObjectName(_fromUtf8("iterText"))
self.gridLayout.addWidget(self.iterText, 0, 1, 1, 1)
self.runtimeText = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.runtimeText.sizePolicy().hasHeightForWidth())
self.runtimeText.setSizePolicy(sizePolicy)
self.runtimeText.setFont(ten_font)
self.runtimeText.setObjectName(_fromUtf8("runtimeText"))
self.gridLayout.addWidget(self.runtimeText, 0, 3, 1, 1)
self.infoText = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.infoText.sizePolicy().hasHeightForWidth())
self.infoText.setSizePolicy(sizePolicy)
self.infoText.setFont(ten_font)
self.infoText.setObjectName(_fromUtf8("infoText"))
self.gridLayout.addWidget(self.infoText, 0, 5, 1, 1)
self.runtimeLabel = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.runtimeLabel.sizePolicy().hasHeightForWidth())
self.runtimeLabel.setSizePolicy(sizePolicy)
self.runtimeLabel.setFont(bold_ten_font)
self.runtimeLabel.setAlignment(QtCore.Qt.AlignCenter)
self.runtimeLabel.setObjectName(_fromUtf8("runtimeLabel"))
self.gridLayout.addWidget(self.runtimeLabel, 0, 2, 1, 1)
self.iterLabel = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.iterLabel.sizePolicy().hasHeightForWidth())
self.iterLabel.setSizePolicy(sizePolicy)
self.iterLabel.setFont(bold_ten_font)
self.iterLabel.setAlignment(QtCore.Qt.AlignCenter)
self.iterLabel.setObjectName(_fromUtf8("iterLabel"))
self.gridLayout.addWidget(self.iterLabel, 0, 0, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout)
self.verticalLayout_2.addWidget(self.widget)
self.widget_2 = QtGui.QWidget(TSP)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(4)
sizePolicy.setHeightForWidth(self.widget_2.sizePolicy().hasHeightForWidth())
self.widget_2.setSizePolicy(sizePolicy)
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.verticalLayout = QtGui.QVBoxLayout(self.widget_2)
self.verticalLayout.setMargin(5)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSpacing(5)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(0)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(sizePolicy)
self.horizontalLayout_2.addWidget(self.canvas)
self.solutionList = QtGui.QListWidget(self.widget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.solutionList.sizePolicy().hasHeightForWidth())
self.solutionList.setSizePolicy(sizePolicy)
self.solutionList.setEditTriggers(QtGui.QAbstractItemView.DoubleClicked | QtGui.QAbstractItemView.SelectedClicked)
self.solutionList.setObjectName(_fromUtf8("listWidget"))
self.horizontalLayout_2.addWidget(self.solutionList)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout_2.addWidget(self.widget_2)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.retranslateUi(TSP)
QtCore.QMetaObject.connectSlotsByName(TSP)
def retranslateUi(self, TSP):
TSP.setWindowTitle(_translate("TSP", "Python TSP Heuristic", None))
self.iterationLabel.setText(_translate("TSP", "Set iteration limit:", None))
self.runTsp_btn.setText(_translate("TSP", "Run!", None))
self.infoLabel.setText(_translate("TSP", "Info:", None))
self.runtimeLabel.setText(_translate("TSP", "Runtime:", None))
self.iterLabel.setText(_translate("TSP", "Iterations:", None))
for problem in collect_problems():
self.fileComboBox.addItem(_fromUtf8(problem))
self.problem_changed()
self.runTsp_btn.clicked.connect(self.run_tsp)
self.fileComboBox.currentIndexChanged.connect(self.problem_changed)
self.solutionList.currentItemChanged.connect(self.solution_changed)
def problem_changed(self):
if self.problem and self.problem.isRunning():
QtGui.QMessageBox.information(self, "Warning!", "Solver is still running!", QtGui.QMessageBox.Ok)
else:
problem = self.fileComboBox.currentText()
file_path = os.path.join(PROBLEMS_DIR, str(problem))
try:
self.problem = Problem(file_path)
self.connect(self.problem, SIGNAL("finished()"), self.done)
self.connect(self.problem, SIGNAL("iter"), self.update_info)
self.infoText.setText("ready...")
except Exception as e:
print e
self.infoText.setText("Error while reading problem.")
def solution_changed(self):
if self.problem and self.problem.isRunning():
QtGui.QMessageBox.information(self, "Warning!", "Solver is still running!", QtGui.QMessageBox.Ok)
selected = self.solutionList.currentIndex().row()
if selected < len(self.problem.solutions):
solution = self.problem.solutions[selected]['tour']
self.draw_solution(solution)
def run_tsp(self):
self.problem.setParameters(self.iterationBox.value(), self.radio_alt.isChecked(), self.noimproveBox.value())
self.infoText.setText("Solving TSP '{0}'...".format(self.problem.meta['name']))
self.infoText.repaint()
self.iterText.setText("")
self.iterText.repaint()
self.runtimeText.setText("")
self.runtimeText.repaint()
self.problem.start()
def done(self):
try:
self.runtimeText.setText(str(self.problem.runtime) + " (best after " + str(self.problem.best_solution['runtime']) + ")")
self.iterText.setText(str(self.problem.iterations) + " (best at " + str(self.problem.best_solution['iteration']) + ")")
self.infoText.setText("Tour-Distance: " + str(self.problem.best_solution['distance']))
self.write_list(self.problem.solutions)
self.draw_solution(self.problem.best_solution['tour'])
except:
self.infoText.setText("Error occured while running... :(")
def update_info(self, iterations):
self.infoText.setText("Solving TSP '{0}': {1}/{2}".format(self.problem.meta['name'], iterations, self.problem.iteration_limit))
self.infoText.repaint()
def draw_solution(self, tour):
edges = self.problem.get_edge_list(tour)
ax = self.figure.add_subplot(111)
ax.hold(False)
G = nx.DiGraph()
G.add_nodes_from(range(0, len(self.problem.data)))
G.add_edges_from(edges)
nx.draw_networkx_nodes(G, self.problem.data, node_size=20, node_color='k')
nx.draw_networkx_edges(G, self.problem.data, width=0.5, arrows=True, edge_color='r')
self.figure.tight_layout(pad=1.2)
plt.title(self.problem.meta['name'])
plt.xlim(0)
plt.ylim(0)
plt.xlabel('X-Axis')
plt.ylabel('Y-Axis')
plt.savefig(self.problem.img)
self.canvas.draw()
def write_list(self, solutions):
self.solutionList.clear()
gold = QtGui.QBrush(QtGui.QColor(255, 191, 0))
gold.setStyle(QtCore.Qt.SolidPattern)
for i in range(0, len(solutions)):
item = QtGui.QListWidgetItem()
distance = solutions[i]['distance']
item.setText("{0}: {1}".format(str(i+1), str(distance)))
if i > 0:
if distance == self.problem.best_solution['distance']:
item.setBackground(gold)
self.solutionList.addItem(item)
def collect_problems():
for file in os.listdir(PROBLEMS_DIR):
if file.endswith('.tsp'):
yield file
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
ui = Ui_Tsp()
ui.show()
sys.exit(app.exec_())
|
|
"""
Custom widgets used by the URL form fields.
"""
from __future__ import unicode_literals
import re
import django
from django.contrib import admin
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.core.exceptions import ValidationError
from django.db.models.fields.related import ManyToOneRel
from django.forms import widgets
from django.forms.utils import flatatt
from django.template.defaultfilters import slugify
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
try:
from django.urls import reverse, NoReverseMatch # Django 1.10+
except ImportError:
from django.core.urlresolvers import reverse, NoReverseMatch
RE_CLEANUP_CLASS = re.compile('[^a-z0-9-_]')
if django.VERSION < (1, 11):
from django.forms.widgets import RadioFieldRenderer
class HorizontalRadioFieldRenderer(RadioFieldRenderer):
"""
Render a :class:`~django.forms.RadioSelect` horizontally in the Django admin interface.
This produces a similar layout like the ``radio_fields = {'field': admin.HORIZONTAL}`` code does in the admin interface.
It can be used as argument for the :class:`~django.forms.RadioSelect` widget:
.. code-block:: python
widget = widgets.RadioSelect(choices=choices, renderer=HorizontalRadioFieldRenderer)
"""
def __init__(self, name, value, attrs, choices):
extraclasses = 'radiolist inline'
if extraclasses not in attrs.get('class'):
attrs = attrs.copy()
if 'class' in attrs:
attrs['class'] += ' ' + extraclasses
else:
attrs['class'] = extraclasses
super(HorizontalRadioFieldRenderer, self).__init__(name, value, attrs, choices)
def render(self):
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_text(w) for w in self]))
)
class UrlTypeSelect(widgets.RadioSelect):
"""
Horizontal radio select
"""
if django.VERSION >= (1, 11):
template_name = "any_urlfield/widgets/url_type_select.html"
def __init__(self, *args, **kwargs):
kwargs.setdefault('attrs', {})
kwargs['attrs'].setdefault('class', 'any_urlfield-url_type radiolist inline')
if django.VERSION < (1, 11):
kwargs.setdefault('renderer', HorizontalRadioFieldRenderer)
super(UrlTypeSelect, self).__init__(*args, **kwargs)
if django.VERSION >= (1, 11):
def get_context(self, name, value, attrs):
context = super(UrlTypeSelect, self).get_context(name, value, attrs)
context['widget']['flatatt'] = flatatt(context['widget']['attrs'])
return context
class AnyUrlWidget(widgets.MultiWidget):
"""
The URL widget, rendering the URL selector.
"""
template_name = 'any_urlfield/widgets/any_urlfield.html'
class Media:
js = ('any_urlfield/any_urlfield.js',)
css = {'all': ('any_urlfield/any_urlfield.css',)}
def __init__(self, url_type_registry, attrs=None):
type_choices = [(urltype.prefix, urltype.title) for urltype in url_type_registry]
# Expose sub widgets for form field.
self.url_type_registry = url_type_registry
self.url_type_widget = UrlTypeSelect(choices=type_choices)
self.url_widgets = []
# Combine to list, ensure order of values list later.
subwidgets = []
for urltype in url_type_registry:
widget = urltype.get_widget()
subwidgets.append(widget)
subwidgets.insert(0, self.url_type_widget)
# init MultiWidget base
super(AnyUrlWidget, self).__init__(subwidgets, attrs=attrs)
def decompress(self, value):
# Split the value to a dictionary with key per prefix.
# value is a AnyUrlValue object
result = [None]
values = {}
if value is None:
values['http'] = ''
result[0] = 'http'
else:
# Expand the AnyUrlValue to the array of widget values.
# This is the reason, the widgets are ordered by ID; to make this easy.
result[0] = value.type_prefix
if value.type_prefix == 'http':
values['http'] = value.type_value
else:
# Instead of just passing the ID, make sure our SimpleRawIdWidget
# doesn't have to perform a query while we already have prefetched data.
values[value.type_prefix] = value.bound_type_value
# Append all values in the proper ordering,
# for every registered widget type shown in this multiwidget.
for urltype in self.url_type_registry:
result.append(values.get(urltype.prefix, None))
return result
if django.VERSION < (1, 11):
def format_output(self, rendered_widgets):
"""
Custom rendering of the widgets.
"""
urltypes = list(self.url_type_registry)
url_type_html = rendered_widgets.pop(0)
output = [url_type_html]
# Wrap remaining options in <p> for scripting.
for i, widget_html in enumerate(rendered_widgets):
prefix = slugify(urltypes[i].prefix) # can use [i], same order of adding items.
output.append(u'<p class="any_urlfield-url-{0}" style="clear:left">{1}</p>'.format(prefix, widget_html))
return u'<div class="any-urlfield-wrapper related-widget-wrapper">{0}</div>'.format(u''.join(output))
else:
def get_context(self, name, value, attrs):
context = super(AnyUrlWidget, self).get_context(name, value, attrs)
# BEGIN Django 1.11 code!
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
# FIX Django 1.11 "bug" of lost context for fields!
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs))
context['widget']['subwidgets'] = subwidgets
# END
# Each subwidget corresponds with an registered URL type.
# Make sure the template can render the proper ID's for JavaScript.
for i, urltype in enumerate(self.url_type_registry):
subwidgets[i + 1]['prefix'] = RE_CLEANUP_CLASS.sub('', urltype.prefix)
return context
class SimpleRawIdWidget(ForeignKeyRawIdWidget):
"""
A wrapper class to create raw ID widgets.
It produces a same layout as the ``raw_id_fields = (field',)`` code does in the admin interface.
This class wraps the functionality of the Django admin application
into a usable format that is both compatible with Django 1.3 and 1.4.
The basic invocation only requires the model:
.. code-block:: python
widget = SimpleRawIdWidget(MyModel)
"""
def __init__(self, model, limit_choices_to=None, admin_site=None, attrs=None, using=None):
"""
Instantiate the class.
"""
if admin_site is None:
admin_site = admin.site
rel = ManyToOneRel(None, model, model._meta.pk.name, limit_choices_to=limit_choices_to)
super(SimpleRawIdWidget, self).__init__(rel=rel, admin_site=admin_site, attrs=attrs, using=using)
if django.VERSION >= (1, 11):
def label_and_url_for_value(self, value):
"""Optimize retrieval of the data.
Because AnyUrlField.decompose() secretly returns both the ID,
and it's prefetched object, there is no need to refetch the object here.
"""
try:
obj = value.prefetched_object # ResolvedTypeValue
except AttributeError:
return super(SimpleRawIdWidget, self).label_and_url_for_value(value)
# Standard Django logic follows:
try:
url = reverse(
'{admin}:{app}_{model}_change'.format(
admin=self.admin_site.name,
app=obj._meta.app_label,
model=obj._meta.object_name.lower()
),
args=(obj.pk,)
)
except NoReverseMatch:
url = '' # Admin not registered for target model.
return Truncator(obj).words(14, truncate='...'), url
else:
def label_for_value(self, value):
try:
obj = value.prefetched_object # ResolvedTypeValue
except AttributeError:
return super(SimpleRawIdWidget, self).label_for_value(value)
try:
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except ValueError:
return ''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.