gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from elasticsearch import Elasticsearch,client
from snorkel import SnorkelSession
from snorkel.models import Document, Sentence,Span
from snorkel.viewer import SentenceNgramViewer
import os
import json
es = Elasticsearch()
session = SnorkelSession()
class ElasticSession:
#define document and index names
def __init__(self,**keyword_parameters):
self.indexName = "corpus"
self.docType = "articles"
self.fieldName = "sentence"
self.elastic_index()
if "cands" in keyword_parameters:
self.generate_tags(keyword_parameters['cands'])
def set_cand(self, Cands):
self.cands = Cands
#get the index mapping
def get_map(self):
mapping = es.indices.get_mapping(self.indexName)
print 'Index Mapping'
print(json.dumps(mapping, indent=2))
#get all index information
def get_index(self):
print 'Index Information: '
print ' '
print es.cat.indices(v='true')
#get a document by its id number
def get_doc(self,iden):
return es.get(index=self.indexName, doc_type=self.docType, id=iden)
#Elasticsearch to SQL mapping
#Index - Database
#Table - Type
#Row - Document
#Values are the data to be added to each document
def elastic_index(self):
#Define our index mapping
request_body = {
'settings' : {
'number_of_shards': 5,
'number_of_replicas': 1,
'analysis':{
'char_filter': {
'quotes': {
#Standardize apostrophes
'type': 'mapping',
'mappings': [
'\u0091=>\u0027',
'\u0092=>\u0027',
'\u2018=>\u0027',
'\u2019=>\u0027',
'\u201B=>\u0027'
]
}
},
'analyzer':{
'my_analyzer':{
'type':'custom',
'tokenizer':'standard',
'char_filter': ['quotes'],
#Remove apostrophes and perform asciifolding
'filter':['apostrophe','asciifolding']
},
#used to remove the unicode marker
'my_stop': {
'type':'stop',
'stopwords': ['u']
}
}
}
},
#define field properties
'mappings': {
self.docType: {
'properties': {
'lineNum':{'type':'integer'},
self.fieldName: {'type': 'text','analyzer':'my_analyzer'},
'tagged':{'type':'text','analyzer':'my_stop'},
'fillCand':{'type':'text','analyzer':'my_stop','search_analyzer':'my_stop'}
}}}}
#create the index
es.indices.create(index = self.indexName, body = request_body)
print 'Begin indexing'
docCount=0
for p in session.query(Document):
docCount+=1
for i in p.sentences:
#analyze the string and create an array of that length of o's
#this will be used for the candidate layer
value=len((es.indices.analyze(index=self.indexName,body={'analyzer':'standard','text':i.text}))['tokens'])
es.index(index=self.indexName, doc_type=self.docType, id=i.id,
body = {
'lineNum': i.id,
self.fieldName: i.text,
'fillCand':['o']*value
})
self.get_index()
print '%d items indexed'%docCount
print ""
def generate_tags(self,Cands):
self.set_cand(Cands)
print "Begin generating tags"
unique=[]
total=0
#Get all the sentences in our candidate set
for c in session.query(Cands).all():
total+=1
unique.append(c[0].sentence_id)
#Turn it into a set to get only the unique sentences
unique = set(unique)
#Used to keep tracking of the candidates that could not be tagged
flagNum=0
flagged=[]
for sent in unique:
#Get all candidates that correspond to a particular sentence
q = session.query(Cands)\
.join(Span, getattr(Cands, Cands.__argnames__[0] + '_id') == Span.id)\
.join(Span.sentence).filter(Sentence.id == sent).all()
#Get the term vector of the sentence. We will use this to determine
#where the candidate is in the sentence
vector=es.termvectors(
index=self.indexName,
doc_type=self.docType,
id=sent,
body ={
'fields' : [self.fieldName],
'positions' : 'true'
})
temp = []
for p in q:
for num in range(0,2):
candidate= p[num].get_span()
#Candidates can be more the one word so we asciifold and split the candidates
#on the spaces
value=es.indices.analyze(index=self.indexName,body={'analyzer':'my_analyzer','text':candidate})['tokens']
for vectorized in value:
temp.append(vectorized['token'])
#Get the candidate array that we will modify
hold=es.get(index=self.indexName, doc_type='articles', id=sent)['_source']['fillCand']
for tagObj in temp:
try:
#Candidates can appear multiple times in a sentence so we get the
#total number of occurances
limit = vector['term_vectors'][self.fieldName]['terms'][tagObj]['term_freq']
for i in range(0,limit):
#Find the candidate position and tag that index
index=vector['term_vectors'][self.fieldName]['terms'][tagObj]['tokens'][i]['position']
hold[index]='OBJECT'
#Used to handle candidates that could not be found
except KeyError:
flagNum+=1
flagged.append([sent,tagObj])
#Arrays have an implicit 100 positional gap between indices which
#make the search queries behave weirdly. To compensate we change
#the array to a string and add it to a new field.
turnAr = ' '.join((e).decode('utf-8') for e in hold)
es.update(index=self.indexName, doc_type=self.docType, id=sent,
body={'doc':{'fillCand':hold,'tagged':turnAr}})
#Most candidates that can not be tagged are ones that correspond to punctuation and spaces
#those are automatically stripped when the string is tokenized
print '%d candidates of %d tagged'%((total-flagNum),(total))
def search_index(self,keyWord,*args,**keyword_parameters):
check = 0
if keyWord == 'match':
for hold,query in enumerate(args):
#Match phrase if there is a slop value
if 'slop' in keyword_parameters:
sQuery={
'match_phrase':{
self.fieldName:{
'query':query,
'slop':keyword_parameters['slop']
}
}
}
else:
#Match query if no slop is defined
sQuery={
'match': {
self.fieldName: {
'query':query
}
}
}
#Query a specific field where we can about the order
#position(value1)<position(value2)<position(value3) etc
elif keyWord=='position':
holdVal=[]
if 'slop' in keyword_parameters:
dist = keyword_parameters['slop']
else:
dist=0
for hold,values in enumerate(args):
holdVal.append({ 'span_term' : { self.fieldName : values } })
sQuery={
'span_near' : {
'clauses' : holdVal,
'slop' : dist,
'in_order' : 'true'
}
}
#Query two fields in parallel respective of order
#the mask searches the tagged for object then switches to the fieldName to search for value
#before switching back to tagged to search for object again
elif keyWord=='between_cand':
check=1
if 'slop' in keyword_parameters:
dist = keyword_parameters['slop']
else:
dist=0
for hold,value in enumerate(args):
sQuery={
'span_near': {
'clauses': [
{'span_term': {'tagged': 'object'}},
{'field_masking_span': {
'query': {
'span_term': {
self.fieldName: value
}
},
'field': 'tagged'}
},
{'span_term': {'tagged': 'object'}},
],
'slop': dist,
'in_order': 'true'
}
}
#Query two fields in parallel respective of order
#Searches the fieldName first for the value then switches to the tagged
#field for the OBJECT tag
elif keyWord == 'before_cand':
check=1
holdVal=[]
if 'slop' in keyword_parameters:
dist = keyword_parameters['slop']
else:
dist=0
for hold,values in enumerate(args):
sQuery={
'span_near': {
'clauses': [
{'span_term':
{ self.fieldName : values }},
{'field_masking_span': {
'query': {
'span_term': {
'tagged': 'object'
}
},
'field': self.fieldName}
}
],
'slop': dist,
'in_order': 'true'
}
}
#Query two fields in parallel respective of order
#Searches the tagged field first for object then switches to the fieldName
#for the value
elif keyWord == 'after_cand':
check=1
if 'slop' in keyword_parameters:
dist = keyword_parameters['slop']
else:
dist=0
for hold,values in enumerate(args):
sQuery={
'span_near': {
'clauses': [
{'span_term':
{'tagged': 'object'}},
{'field_masking_span': {
'query': {
'span_term': {
self.fieldName : values
}
},
'field': 'tagged'}
}
],
'slop': dist,
'in_order': 'true'
}
}
else:
print 'QUERY TYPE NOT FOUND'
return
#Size indicates how many search results to return
if 'size' in keyword_parameters:
numRes = keyword_parameters['size']
else:
numRes=5
#Perform the query
searchResult = es.search(
size =numRes,
index=self.indexName,
doc_type=self.docType,
body={
'query': sQuery
})
temp=[]
print "Number of hits: %d" %searchResult['hits']['total']
#get sentence numbers from the search results
for i in searchResult['hits']['hits']:
temp.append(i['_source']['lineNum'])
holdCands=[]
if check ==1:
for i in temp:
#query the candidate set for all spans with the sentence number
q = session.query(self.cands)\
.join(Span, getattr(self.cands, self.cands.__argnames__[0] + '_id') == Span.id)\
.join(Span.sentence).filter(Sentence.id == i).all()
for span in q:
holdCands.append(span)
else:
for i in temp:
#get sentence using sentence number
q=session.query(Sentence).filter(Sentence.id ==i).all()
holdCands.append(q[0])
#returns candidate object
return holdCands
#deletes an elasticsearch index taking the index name as a parameter
#the _all flag will delete all indecies
def delete_index(indexName):
print es.indices.delete(index=indexName,ignore=404)
|
|
# -*- coding: utf-8 -*-
import datetime
import os.path
import functools
from unittest import skipIf
from django.conf import settings
from django.core.cache import cache
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.timezone import now as tz_now
from django.utils.translation import override as force_language
from cms import constants
from cms.api import create_page, add_plugin, create_title, publish_page
from cms.exceptions import PublicIsUnmodifiable, PublicVersionNeeded
from cms.forms.validators import validate_url_uniqueness
from cms.models import Page, Title
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.sitemaps import CMSSitemap
from cms.test_utils.testcases import CMSTestCase, TransactionCMSTestCase
from cms.utils.conf import get_cms_setting
from cms.utils.page import (
get_available_slug,
get_current_site,
get_page_from_request,
)
class PageMigrationTestCase(CMSTestCase):
def test_content_type(self):
"""
Test correct content type is set for Page object
"""
from django.contrib.contenttypes.models import ContentType
self.assertEqual(ContentType.objects.filter(model='page', app_label='cms').count(), 1)
def has_no_custom_user():
return get_user_model().USERNAME_FIELD != 'email'
class PagesTestCase(TransactionCMSTestCase):
def tearDown(self):
cache.clear()
def test_absolute_url(self):
user = self.get_superuser()
page = self.create_homepage("page", "nav_playground.html", "en", published=True)
create_title("fr", "french home", page)
page_2 = create_page("inner", "nav_playground.html", "en", published=True, parent=page)
create_title("fr", "french inner", page_2)
publish_page(page_2, user, "fr")
self.assertEqual(page_2.get_absolute_url(), '/en/inner/')
self.assertEqual(page_2.get_absolute_url(language='en'), '/en/inner/')
self.assertEqual(page_2.get_absolute_url(language='fr'), '/fr/french-inner/')
with force_language('fr'):
self.assertEqual(page_2.get_absolute_url(), '/fr/french-inner/')
self.assertEqual(page_2.get_absolute_url(language='en'), '/en/inner/')
self.assertEqual(page_2.get_absolute_url(language='fr'), '/fr/french-inner/')
def test_get_root_page(self):
_create = functools.partial(
create_page,
template='nav_playground.html',
language='en',
published=True,
)
page_a = _create('page_a')
page_a_a = _create('page_a_a_a', parent=page_a)
page_a_a_a = _create('page_a_a_a', parent=page_a_a)
page_tree_with_root = [
(page_a, page_a),
(page_a_a, page_a),
(page_a_a_a, page_a),
]
for page, root in page_tree_with_root:
self.assertEqual(page.get_root(), root)
self.assertEqual(page.publisher_public.get_root(), root.publisher_public)
def test_treebeard_delete(self):
"""
This is a test for #4102
When deleting a page, parent must be updated too, to reflect the new tree status.
This is handled by MP_NodeQuerySet (which was not used before the fix)
"""
page1 = create_page('home', 'nav_playground.html', 'en', published=True)
page2 = create_page('page2', 'nav_playground.html', 'en', parent=page1, published=True)
page3 = create_page('page3', 'nav_playground.html', 'en', parent=page2, published=True)
self.assertEqual(page1.node.depth, 1)
self.assertEqual(page1.node.numchild, 1)
self.assertFalse(page1.node.is_leaf())
self.assertEqual(page2.node.depth, 2)
self.assertEqual(page2.node.numchild, 1)
self.assertFalse(page2.node.is_leaf())
self.assertEqual(page3.node.depth, 3)
self.assertEqual(page3.node.numchild, 0)
self.assertTrue(page3.node.is_leaf())
page3.delete()
page1 = page1.reload().get_draft_object()
page2 = page2.reload().get_draft_object()
self.assertEqual(page2.node.depth, 2)
self.assertEqual(page2.node.numchild, 0)
self.assertTrue(page2.node.is_leaf())
page3 = create_page('page3', 'nav_playground.html', 'en', parent=page2, reverse_id='page3')
self.assertEqual(page2.node.depth, 2)
self.assertEqual(page2.node.numchild, 1)
self.assertFalse(page2.node.is_leaf())
self.assertEqual(page3.node.depth, 3)
self.assertEqual(page3.node.numchild, 0)
self.assertTrue(page3.node.is_leaf())
page1.publish('en')
page2.publish('en')
page3.publish('en')
self.assertEqual(page1.node.depth, 1)
self.assertEqual(page1.node.numchild, 1)
self.assertFalse(page1.node.is_leaf())
self.assertEqual(page2.node.depth, 2)
self.assertEqual(page2.node.numchild, 1)
self.assertFalse(page2.node.is_leaf())
self.assertEqual(page3.node.depth, 3)
self.assertEqual(page3.node.numchild, 0)
self.assertTrue(page3.node.is_leaf())
def test_create_page_api(self):
page_data = {
'title': 'root',
'slug': 'root',
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
}
page = self.create_homepage(**page_data)
page = page.reload()
page.publish('en')
self.assertEqual(Page.objects.count(), 2)
self.assertTrue(page.is_home)
self.assertTrue(page.publisher_public.is_home)
self.assertEqual(list(Title.objects.drafts().values_list('path', flat=True)), [u''])
self.assertEqual(list(Title.objects.public().values_list('path', flat=True)), [u''])
@skipIf(has_no_custom_user(), 'No custom user')
def test_create_page_api_with_long_username(self):
page_data = {
'title': 'root',
'slug': 'root',
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
'created_by': self._create_user(
'V' * constants.PAGE_USERNAME_MAX_LENGTH + 'ERY-LONG-USERNAME',
is_staff=True,
is_superuser=True,
),
}
page = create_page(**page_data)
self.assertEqual(Page.objects.count(), 1)
self.assertLessEqual(len(page.created_by), constants.PAGE_USERNAME_MAX_LENGTH)
self.assertRegexpMatches(page.created_by, r'V+\.{3} \(id=\d+\)')
self.assertLessEqual(len(page.changed_by), constants.PAGE_USERNAME_MAX_LENGTH)
self.assertRegexpMatches(page.changed_by, r'V+\.{3} \(id=\d+\)')
self.assertEqual(list(Title.objects.drafts().values_list('path', flat=True)), [u'root'])
def test_delete_page_no_template(self):
page_data = {
'title': 'root',
'slug': 'root',
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
}
page = create_page(**page_data)
page.template = 'no_such_template.html'
page.delete()
self.assertEqual(Page.objects.count(), 0)
def test_get_available_slug_recursion(self):
""" Checks cms.utils.page.get_available_slug for infinite recursion
"""
site = get_current_site()
for x in range(0, 12):
create_page('test copy', 'nav_playground.html', 'en', published=True)
new_slug = get_available_slug(site, 'test-copy', 'en')
self.assertTrue(new_slug, 'test-copy-11')
def test_path_collisions_api_1(self):
""" Checks for slug collisions on sibling pages - uses API to create pages
"""
site = get_current_site()
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_1 = create_page('test page 1_1', 'nav_playground.html', 'en',
published=True, parent=page1, slug="foo")
page1_2 = create_page('test page 1_2', 'nav_playground.html', 'en',
published=True, parent=page1, slug="foo")
# both sibling pages has same slug, so both pages have an invalid slug
self.assertRaises(
ValidationError,
validate_url_uniqueness,
site=site,
path=page1_1.get_path('en'),
language='en',
exclude_page=page1_1,
)
self.assertRaises(
ValidationError,
validate_url_uniqueness,
site=site,
path=page1_2.get_path('en'),
language='en',
exclude_page=page1_2,
)
def test_path_collisions_api_2(self):
""" Checks for slug collisions on root (not home) page and a home page child - uses API to create pages
"""
site = get_current_site()
page1 = self.create_homepage('test page 1', 'nav_playground.html', 'en',
published=True)
page1_1 = create_page('test page 1_1', 'nav_playground.html', 'en',
published=True, parent=page1, slug="foo")
page2 = create_page('test page 1_1', 'nav_playground.html', 'en',
published=True, slug="foo")
# Root (non home) page and child page has the same slug, both are invalid
self.assertRaises(
ValidationError,
validate_url_uniqueness,
site=site,
path=page1_1.get_path('en'),
language='en',
exclude_page=page1_1,
)
self.assertRaises(
ValidationError,
validate_url_uniqueness,
site=site,
path=page2.get_path('en'),
language='en',
exclude_page=page2,
)
def test_path_collisions_api_3(self):
""" Checks for slug collisions on children of a non root page - uses API to create pages
"""
site = get_current_site()
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_1 = create_page('test page 1_1', 'nav_playground.html', 'en',
published=True, parent=page1, slug="foo")
page1_1_1 = create_page('test page 1_1_1', 'nav_playground.html', 'en',
published=True, parent=page1_1, slug="bar")
page1_1_2 = create_page('test page 1_1_1', 'nav_playground.html', 'en',
published=True, parent=page1_1, slug="bar")
page1_2 = create_page('test page 1_2', 'nav_playground.html', 'en',
published=True, parent=page1, slug="bar")
# Direct children of home has different slug so it's ok.
self.assertTrue(validate_url_uniqueness(
site,
path=page1_1.get_path('en'),
language='en',
exclude_page=page1_1,
))
self.assertTrue(validate_url_uniqueness(
site,
path=page1_2.get_path('en'),
language='en',
exclude_page=page1_2,
))
# children of page1_1 has the same slug -> you lose!
self.assertRaises(
ValidationError,
validate_url_uniqueness,
site=site,
path=page1_1_1.get_path('en'),
language='en',
exclude_page=page1_1_1,
)
self.assertRaises(
ValidationError,
validate_url_uniqueness,
site=site,
path=page1_1_2.get_path('en'),
language='en',
exclude_page=page1_1_2,
)
def test_details_view(self):
"""
Test the details view
"""
superuser = self.get_superuser()
self.assertEqual(Page.objects.all().count(), 0)
with self.login_user_context(superuser):
page = self.create_homepage('test page 1', "nav_playground.html", "en")
page.publish('en')
response = self.client.get(self.get_pages_root())
self.assertEqual(response.status_code, 200)
self.assertTrue(page.publish('en'))
page2 = create_page("test page 2", "nav_playground.html", "en",
parent=page, published=True)
homepage = Page.objects.get_home()
self.assertTrue(homepage.get_slug(), 'test-page-1')
self.assertEqual(page2.get_absolute_url(), '/en/test-page-2/')
response = self.client.get(page2.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_public_exceptions(self):
page_a = create_page("page_a", "nav_playground.html", "en", published=True)
page_b = create_page("page_b", "nav_playground.html", "en")
page = page_a.publisher_public
self.assertRaises(PublicIsUnmodifiable, page.copy_with_descendants, page_b, 'last-child')
self.assertRaises(PublicIsUnmodifiable, page.unpublish, 'en')
self.assertRaises(PublicIsUnmodifiable, page.revert_to_live, 'en')
self.assertRaises(PublicIsUnmodifiable, page.publish, 'en')
self.assertTrue(page.get_draft_object().publisher_is_draft)
self.assertRaises(PublicVersionNeeded, page_b.revert_to_live, 'en')
def test_move_page_regression_left_to_right_5752(self):
# ref: https://github.com/divio/django-cms/issues/5752
# Tests tree integrity when moving sibling pages from left
# to right under the same parent.
home = create_page("Home", "nav_playground.html", "en", published=True)
alpha = create_page(
"Alpha",
"nav_playground.html",
"en",
published=True,
parent=home,
)
beta = create_page(
"Beta",
"nav_playground.html",
"en",
published=True,
parent=home,
)
beta.move_page(alpha.node, position='left')
# Draft
self.assertEqual(home.node.path, '0001')
self.assertEqual(beta.node.path, '00010001')
self.assertEqual(alpha.node.path, '00010002')
def test_move_page_regression_right_to_left_5752(self):
# ref: https://github.com/divio/django-cms/issues/5752
# Tests tree integrity when moving sibling pages from right
# to left under the same parent.
home = create_page("Home", "nav_playground.html", "en", published=True)
alpha = create_page(
"Alpha",
"nav_playground.html",
"en",
published=True,
parent=home,
)
beta = create_page(
"Beta",
"nav_playground.html",
"en",
published=True,
parent=home,
)
beta.move_page(alpha.node, position='left')
alpha.refresh_from_db()
beta.refresh_from_db()
# Draft
self.assertEqual(home.node.path, '0001')
self.assertEqual(beta.node.path, '00010001')
self.assertEqual(alpha.node.path, '00010002')
def test_move_page_regression_5640(self):
# ref: https://github.com/divio/django-cms/issues/5640
alpha = create_page("Alpha", "nav_playground.html", "en", published=True)
beta = create_page("Beta", "nav_playground.html", "en", published=False)
alpha.move_page(beta.node, position='right')
self.assertEqual(beta.node.path, '0002')
self.assertEqual(alpha.node.path, '0003')
def test_move_page_regression_nested_5640(self):
# ref: https://github.com/divio/django-cms/issues/5640
alpha = create_page("Alpha", "nav_playground.html", "en", published=True)
beta = create_page("Beta", "nav_playground.html", "en", published=False)
gamma = create_page("Gamma", "nav_playground.html", "en", published=False)
delta = create_page("Delta", "nav_playground.html", "en", published=True)
theta = create_page("Theta", "nav_playground.html", "en", published=True)
beta.move_page(alpha.node, position='last-child')
gamma.move_page(beta.reload().node, position='last-child')
delta.move_page(gamma.reload().node, position='last-child')
theta.move_page(delta.reload().node, position='last-child')
tree = [
(alpha, '0001'),
(beta, '00010001'),
(gamma, '000100010001'),
(delta, '0001000100010001'),
(theta, '00010001000100010001'),
]
for page, path in tree:
self.assertEqual(page.reload().node.path, path)
def test_move_page_regression_5643(self):
# ref: https://github.com/divio/django-cms/issues/5643
alpha = create_page("Alpha", "nav_playground.html", "en", published=True)
beta = create_page("Beta", "nav_playground.html", "en", published=False)
gamma = create_page("Gamma", "nav_playground.html", "en", published=False)
delta = create_page("Delta", "nav_playground.html", "en", published=True)
theta = create_page("Theta", "nav_playground.html", "en", published=True)
beta.move_page(alpha.node, position='last-child')
gamma.move_page(beta.node, position='last-child')
delta.move_page(gamma.node, position='last-child')
theta.move_page(delta.node, position='last-child')
self.assertPublished(alpha.reload())
self.assertNeverPublished(beta.reload())
self.assertNeverPublished(gamma.reload())
self.assertPending(delta.reload())
self.assertPending(theta.reload())
def test_publish_page_regression_5642(self):
# ref: https://github.com/divio/django-cms/issues/5642
alpha = create_page("Alpha", "nav_playground.html", "en", published=True)
beta = create_page("Beta", "nav_playground.html", "en", published=False)
gamma = create_page("Gamma", "nav_playground.html", "en", published=False)
delta = create_page("Delta", "nav_playground.html", "en", published=True)
theta = create_page("Theta", "nav_playground.html", "en", published=True)
beta.move_page(alpha.node, position='last-child')
gamma.move_page(beta.reload().node, position='last-child')
delta.move_page(gamma.reload().node, position='last-child')
theta.move_page(delta.reload().node, position='last-child')
beta.reload().publish('en')
# The delta and theta pages should remain pending publication
# because gamma is still unpublished
self.assertPublished(beta.reload())
self.assertNeverPublished(gamma.reload())
self.assertPending(delta.reload())
self.assertPending(theta.reload())
gamma.reload().publish('en')
self.assertPublished(gamma.reload())
self.assertPublished(delta.reload())
self.assertPublished(theta.reload())
def test_publish_page_regression_6188(self):
# ref: https://github.com/divio/django-cms/issues/6188
page = create_page("en-page", "nav_playground.html", "en", published=False)
create_title('de', 'de-page', page)
create_title('fr', 'fr-page', page)
# Publishing the en language should set "en" as the only language
# on the public version of the page.
page.publish("en")
self.assertListEqual(sorted(page.publisher_public.get_languages()), ['en'])
page.publish("de")
# Now there should be "en" and "de" on the public page
self.assertSequenceEqual(sorted(page.publisher_public.get_languages()), ['de', 'en'])
page.publish("fr")
# Now there should be "en", "de" and "fr" on the public page
self.assertSequenceEqual(sorted(page.publisher_public.get_languages()), ['de', 'en', 'fr'])
def test_move_page_inherit(self):
parent = create_page("Parent", 'col_three.html', "en")
child = create_page("Child", constants.TEMPLATE_INHERITANCE_MAGIC,
"en", parent=parent)
self.assertEqual(child.get_template(), parent.get_template())
child.move_page(parent.node, 'left')
child = Page.objects.get(pk=child.pk)
self.assertEqual(child.get_template(), parent.get_template())
def test_add_placeholder(self):
# create page
page = create_page("Add Placeholder", "nav_playground.html", "en",
position="last-child", published=True, in_navigation=True)
page.template = 'add_placeholder.html'
page.save()
page.publish('en')
url = page.get_absolute_url()
response = self.client.get(url)
self.assertEqual(200, response.status_code)
path = os.path.join(settings.TEMPLATES[0]['DIRS'][0], 'add_placeholder.html')
with open(path, 'r') as fobj:
old = fobj.read()
try:
new = old.replace(
'<!-- SECOND_PLACEHOLDER -->',
'{% placeholder second_placeholder %}'
)
with open(path, 'w') as fobj:
fobj.write(new)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
finally:
with open(path, 'w') as fobj:
fobj.write(old)
def test_sitemap_login_required_pages(self):
"""
Test that CMSSitemap object contains only published,public (login_required=False) pages
"""
create_page("page", "nav_playground.html", "en", login_required=True,
published=True, in_navigation=True)
self.assertEqual(CMSSitemap().items().count(), 0)
def test_sitemap_includes_last_modification_date(self):
one_day_ago = tz_now() - datetime.timedelta(days=1)
page = create_page("page", "nav_playground.html", "en", published=True, publication_date=one_day_ago)
page.creation_date = one_day_ago
page.save()
page.publish('en')
sitemap = CMSSitemap()
self.assertEqual(sitemap.items().count(), 1)
actual_last_modification_time = sitemap.lastmod(sitemap.items()[0])
self.assertTrue(actual_last_modification_time > one_day_ago)
def test_sitemap_uses_publication_date_when_later_than_modification(self):
now = tz_now()
now -= datetime.timedelta(microseconds=now.microsecond)
one_day_ago = now - datetime.timedelta(days=1)
page = create_page("page", "nav_playground.html", "en", published=True, publication_date=now)
title = page.get_title_obj('en')
page.creation_date = one_day_ago
page.changed_date = one_day_ago
sitemap = CMSSitemap()
actual_last_modification_time = sitemap.lastmod(title)
self.assertEqual(actual_last_modification_time.date(), now.date())
def test_templates(self):
"""
Test the inheritance magic for templates
"""
parent = create_page("parent", "nav_playground.html", "en")
child = create_page("child", "nav_playground.html", "en", parent=parent)
grand_child = create_page("grand child", "nav_playground.html", "en", parent=child)
child2 = create_page("child2", "col_two.html", "en", parent=parent)
grand_child2 = create_page("grand child2", "nav_playground.html", "en", parent=child2)
child.template = constants.TEMPLATE_INHERITANCE_MAGIC
grand_child.template = constants.TEMPLATE_INHERITANCE_MAGIC
child.save()
grand_child.save()
grand_child2.template = constants.TEMPLATE_INHERITANCE_MAGIC
grand_child2.save()
self.assertFalse(hasattr(grand_child, '_template_cache'))
with self.assertNumQueries(1):
self.assertEqual(child.template, constants.TEMPLATE_INHERITANCE_MAGIC)
self.assertEqual(parent.get_template_name(), grand_child.get_template_name())
# test template cache
with self.assertNumQueries(0):
grand_child.get_template()
self.assertFalse(hasattr(grand_child2, '_template_cache'))
with self.assertNumQueries(1):
self.assertEqual(child2.template, 'col_two.html')
self.assertEqual(child2.get_template_name(), grand_child2.get_template_name())
# test template cache
with self.assertNumQueries(0):
grand_child2.get_template()
parent.template = constants.TEMPLATE_INHERITANCE_MAGIC
parent.save()
self.assertEqual(parent.template, constants.TEMPLATE_INHERITANCE_MAGIC)
self.assertEqual(parent.get_template(), get_cms_setting('TEMPLATES')[0][0])
self.assertEqual(parent.get_template_name(), get_cms_setting('TEMPLATES')[0][1])
def test_delete_with_plugins(self):
"""
Check that plugins and placeholders get correctly deleted when we delete
a page!
"""
Text = self.get_plugin_model('TextPlugin')
home = create_page("home", "nav_playground.html", "en")
page = create_page("page", "nav_playground.html", "en")
page.rescan_placeholders() # create placeholders
placeholder = page.placeholders.all()[0]
plugin_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=settings.LANGUAGES[0][0]
)
plugin_base = plugin_base.add_root(instance=plugin_base)
plugin = Text(body='')
plugin_base.set_base_attr(plugin)
plugin.save()
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(Text.objects.count(), 1)
self.assertTrue(Placeholder.objects.count() > 2)
page.delete()
home.delete()
self.assertEqual(CMSPlugin.objects.count(), 0)
self.assertEqual(Text.objects.count(), 0)
self.assertEqual(Placeholder.objects.count(), 0)
self.assertEqual(Page.objects.count(), 0)
def test_get_page_from_request_nopage(self):
request = self.get_request('/')
page = get_page_from_request(request)
self.assertEqual(page, None)
def test_get_page_from_request_with_page_404(self):
page = create_page("page", "nav_playground.html", "en", published=True)
page.publish('en')
request = self.get_request('/does-not-exist/')
found_page = get_page_from_request(request)
self.assertEqual(found_page, None)
def test_get_page_without_final_slash(self):
root = create_page("root", "nav_playground.html", "en", slug="root",
published=True)
page = create_page("page", "nav_playground.html", "en", slug="page",
published=True, parent=root)
root.publish('en')
page = page.reload()
page.publish('en')
request = self.get_request('/en/root/page')
found_page = get_page_from_request(request)
self.assertIsNotNone(found_page)
self.assertFalse(found_page.publisher_is_draft)
def test_ancestor_expired(self):
yesterday = tz_now() - datetime.timedelta(days=1)
tomorrow = tz_now() + datetime.timedelta(days=1)
root = create_page("root", "nav_playground.html", "en", slug="root",
published=True)
page_past = create_page("past", "nav_playground.html", "en", slug="past",
publication_end_date=yesterday,
published=True, parent=root)
page_test = create_page("test", "nav_playground.html", "en", slug="test",
published=True, parent=page_past)
page_future = create_page("future", "nav_playground.html", "en", slug="future",
publication_date=tomorrow,
published=True, parent=root)
page_test_2 = create_page("test", "nav_playground.html", "en", slug="test",
published=True, parent=page_future)
request = self.get_request(page_test.get_absolute_url())
page = get_page_from_request(request)
self.assertEqual(page, None)
request = self.get_request(page_test_2.get_absolute_url())
page = get_page_from_request(request)
self.assertEqual(page, None)
def test_page_already_expired(self):
"""
Test that a page which has a end date in the past gives a 404, not a
500.
"""
yesterday = tz_now() - datetime.timedelta(days=1)
with self.settings(CMS_PERMISSION=False):
page = create_page('page', 'nav_playground.html', 'en',
publication_end_date=yesterday, published=True)
resp = self.client.get(page.get_absolute_url('en'))
self.assertEqual(resp.status_code, 404)
def test_page_urls(self):
page1 = self.create_homepage('test page 1', 'nav_playground.html', 'en', published=True)
page2 = create_page('test page 2', 'nav_playground.html', 'en',
published=True, parent=page1)
page3 = create_page('test page 3', 'nav_playground.html', 'en',
published=True, parent=page2)
page4 = create_page('test page 4', 'nav_playground.html', 'en',
published=True)
page5 = create_page('test page 5', 'nav_playground.html', 'en',
published=True, parent=page4)
page1 = page1.reload()
page2 = page2.reload()
page3 = page3.reload()
page4 = page4.reload()
page5 = page5.reload()
self.assertEqual(page3.node.parent_id, page2.node.pk)
self.assertEqual(page2.node.parent_id, page1.node.pk)
self.assertEqual(page5.node.parent_id, page4.node.pk)
self.assertEqual(page1.get_absolute_url(),
self.get_pages_root() + '')
self.assertEqual(page2.get_absolute_url(),
self.get_pages_root() + 'test-page-2/')
self.assertEqual(page3.get_absolute_url(),
self.get_pages_root() + 'test-page-2/test-page-3/')
self.assertEqual(page4.get_absolute_url(),
self.get_pages_root() + 'test-page-4/')
self.assertEqual(page5.get_absolute_url(),
self.get_pages_root() + 'test-page-4/test-page-5/')
page3 = self.move_page(page3, page1)
self.assertEqual(page3.get_absolute_url(),
self.get_pages_root() + 'test-page-3/')
page3 = page3.reload()
page2 = page2.reload()
page5 = page5.reload()
page5 = self.move_page(page5, page2)
self.assertEqual(page5.get_absolute_url(),
self.get_pages_root() + 'test-page-2/test-page-5/')
page3 = page3.reload()
page4 = page4.reload()
page3 = self.move_page(page3, page4)
self.assertEqual(page3.get_absolute_url(),
self.get_pages_root() + 'test-page-4/test-page-3/')
def test_page_and_title_repr(self):
non_saved_page = Page()
self.assertIsNone(non_saved_page.pk)
self.assertIn('id=None', repr(non_saved_page))
saved_page = create_page('test saved page', 'nav_playground.html', 'en')
self.assertIsNotNone(saved_page.pk)
self.assertIn('id={}'.format(saved_page.pk), repr(saved_page))
self.assertIn('is_draft={}'.format(saved_page.publisher_is_draft), repr(saved_page))
non_saved_title = Title()
self.assertIsNone(non_saved_title.pk)
self.assertIn('id=None', repr(non_saved_title))
saved_title = saved_page.get_title_obj()
self.assertIsNotNone(saved_title.pk)
self.assertIn('id={}'.format(saved_title.pk), repr(saved_title))
self.assertIn('is_draft={}'.format(saved_title.publisher_is_draft), repr(saved_title))
def test_page_overwrite_urls(self):
page1 = self.create_homepage('test page 1', 'nav_playground.html', 'en', published=True)
page2 = create_page('test page 2', 'nav_playground.html', 'en',
published=True, parent=page1)
page3 = create_page('test page 3', 'nav_playground.html', 'en',
published=True, parent=page2, overwrite_url='i-want-another-url')
superuser = self.get_superuser()
self.assertEqual(page2.get_absolute_url(),
self.get_pages_root() + 'test-page-2/')
self.assertEqual(page3.get_absolute_url(),
self.get_pages_root() + 'i-want-another-url/')
endpoint = self.get_admin_url(Page, 'change', page2.pk)
with self.login_user_context(superuser):
response = self.client.post(endpoint, {'title': 'test page 2', 'slug': 'page-test-2'})
self.assertRedirects(response, self.get_admin_url(Page, 'changelist'))
page2 = Page.objects.get(pk=page2.pk)
page3 = Page.objects.get(pk=page3.pk)
self.assertEqual(page2.get_absolute_url(),
self.get_pages_root() + 'page-test-2/')
self.assertEqual(page3.get_absolute_url(),
self.get_pages_root() + 'i-want-another-url/')
# tests a bug found in 2.2 where saving an ancestor page
# wiped out the overwrite_url for child pages
page2.save()
self.assertEqual(page3.get_absolute_url(),
self.get_pages_root() + 'i-want-another-url/')
def test_slug_url_overwrite_clash(self):
""" Tests if a URL-Override clashes with a normal page url
"""
site = get_current_site()
with self.settings(CMS_PERMISSION=False):
create_page('home', 'nav_playground.html', 'en', published=True)
bar = create_page('bar', 'nav_playground.html', 'en', published=False)
foo = create_page('foo', 'nav_playground.html', 'en', published=True)
# Tests to assure is_valid_url is ok on plain pages
self.assertTrue(validate_url_uniqueness(
site,
path=bar.get_path('en'),
language='en',
exclude_page=bar,
))
self.assertTrue(validate_url_uniqueness(
site,
path=foo.get_path('en'),
language='en',
exclude_page=foo,
))
# Set url_overwrite for page foo
title = foo.get_title_obj(language='en')
title.has_url_overwrite = True
title.path = 'bar'
title.save()
foo.publish('en')
self.assertRaises(
ValidationError,
validate_url_uniqueness,
site,
path=bar.get_path('en'),
language='en',
exclude_page=bar,
)
def test_valid_url_multisite(self):
site1 = Site.objects.get_current()
site3 = Site.objects.create(domain="sample3.com", name="sample3.com")
home = create_page('home', 'nav_playground.html', 'de', published=True, site=site1)
bar = create_page('bar', 'nav_playground.html', 'de', slug="bar", published=True, parent=home, site=site1)
home_s3 = create_page('home', 'nav_playground.html', 'de', published=True, site=site3)
bar_s3 = create_page('bar', 'nav_playground.html', 'de', slug="bar", published=True, parent=home_s3, site=site3)
self.assertTrue(validate_url_uniqueness(
site1,
path=bar.get_path('de'),
language='de',
exclude_page=bar,
))
self.assertTrue(validate_url_uniqueness(
site3,
path=bar_s3.get_path('de'),
language='de',
exclude_page=bar_s3,
))
def test_home_slug_not_accessible(self):
with self.settings(CMS_PERMISSION=False):
page = self.create_homepage('page', 'nav_playground.html', 'en', published=True)
self.assertEqual(page.get_absolute_url('en'), '/en/')
resp = self.client.get('/en/')
self.assertEqual(resp.status_code, HttpResponse.status_code)
resp = self.client.get('/en/page/')
self.assertEqual(resp.status_code, HttpResponseNotFound.status_code)
def test_plugin_loading_queries(self):
with self.settings(
CMS_TEMPLATES=(('placeholder_tests/base.html', 'tpl'), ),
):
page = create_page('home', 'placeholder_tests/base.html', 'en', published=True, slug='home')
placeholders = list(page.placeholders.all())
for i, placeholder in enumerate(placeholders):
for j in range(5):
add_plugin(placeholder, 'TextPlugin', 'en', body='text-%d-%d' % (i, j))
add_plugin(placeholder, 'LinkPlugin', 'en', name='link-%d-%d' % (i, j))
# trigger the apphook query so that it doesn't get in our way
reverse('pages-root')
# trigger the get_languages query so it doesn't get in our way
context = self.get_context(page=page)
context['request'].current_page.get_languages()
renderer = self.get_content_renderer(context['request'])
with self.assertNumQueries(4):
for i, placeholder in enumerate(placeholders):
content = renderer.render_page_placeholder(
placeholder.slot,
context,
inherit=False,
)
for j in range(5):
self.assertIn('text-%d-%d' % (i, j), content)
self.assertIn('link-%d-%d' % (i, j), content)
def test_xframe_options_allow(self):
"""Test that no X-Frame-Options is set when page's xframe_options is set to allow"""
page = create_page(
title='home',
template='nav_playground.html',
language='en',
published=True,
slug='home',
xframe_options=Page.X_FRAME_OPTIONS_ALLOW
)
resp = self.client.get(page.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), None)
def test_xframe_options_sameorigin(self):
"""Test that X-Frame-Options is 'SAMEORIGIN' when xframe_options is set to origin"""
page = create_page(
title='home',
template='nav_playground.html',
language='en',
published=True,
slug='home',
xframe_options=Page.X_FRAME_OPTIONS_SAMEORIGIN
)
resp = self.client.get(page.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), 'SAMEORIGIN')
def test_xframe_options_deny(self):
"""Test that X-Frame-Options is 'DENY' when xframe_options is set to deny"""
page = create_page(
title='home',
template='nav_playground.html',
language='en',
published=True,
slug='home',
xframe_options=Page.X_FRAME_OPTIONS_DENY
)
resp = self.client.get(page.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), 'DENY')
def test_xframe_options_inherit_with_parent(self):
"""Test that X-Frame-Options is set to parent page's setting when inherit is set"""
parent = create_page(
title='home',
template='nav_playground.html',
language='en',
published=True,
slug='home',
xframe_options=Page.X_FRAME_OPTIONS_DENY
)
child1 = create_page(
title='subpage',
template='nav_playground.html',
language='en',
published=True,
slug='subpage',
parent=parent,
xframe_options=Page.X_FRAME_OPTIONS_INHERIT
)
child2 = create_page(
title='subpage',
template='nav_playground.html',
language='en',
published=True,
slug='subpage',
parent=child1,
xframe_options=Page.X_FRAME_OPTIONS_ALLOW
)
child3 = create_page(
title='subpage',
template='nav_playground.html',
language='en',
published=True,
slug='subpage',
parent=child2,
xframe_options=Page.X_FRAME_OPTIONS_INHERIT
)
resp = self.client.get(parent.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), 'DENY')
resp = self.client.get(child1.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), 'DENY')
resp = self.client.get(child2.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), None)
resp = self.client.get(child3.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), None)
def test_top_level_page_inherited_xframe_options_are_applied(self):
if getattr(settings, 'MIDDLEWARE', None):
override = {
'MIDDLEWARE': settings.MIDDLEWARE + [
'django.middleware.clickjacking.XFrameOptionsMiddleware']
}
else:
override = {
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES + [
'django.middleware.clickjacking.XFrameOptionsMiddleware']
}
with self.settings(**override):
page = create_page('test page 1', 'nav_playground.html', 'en', published=True)
resp = self.client.get(page.get_absolute_url('en'))
self.assertEqual(resp.get('X-Frame-Options'), 'SAMEORIGIN')
def test_page_used_on_request(self):
"""
The rendered page changes depending on request and
user permissions.
"""
superuser = self.get_superuser()
staff_with_no_permissions = self.get_staff_user_with_no_permissions()
draft_text = '<p>text only in draft</p>'
public_text = '<p>text in draft & live</p>'
cms_page = create_page(
title='home',
template='nav_playground.html',
language='en',
published=True,
slug='home',
xframe_options=Page.X_FRAME_OPTIONS_DENY
)
placeholder = cms_page.placeholders.all()[0]
add_plugin(cms_page.placeholders.all()[0], 'TextPlugin', 'en', body=public_text)
cms_page.publish('en')
add_plugin(placeholder, 'TextPlugin', 'en', body=draft_text)
endpoint = cms_page.get_absolute_url('en')
with self.login_user_context(superuser):
# staff user with change permissions
# draft page is always used
resp = self.client.get(endpoint)
self.assertContains(resp, public_text)
self.assertContains(resp, draft_text)
with self.login_user_context(superuser):
# staff user with change permissions
# draft page is used regardless of edit
resp = self.client.get(endpoint + '?edit_off')
self.assertContains(resp, public_text)
self.assertContains(resp, draft_text)
with self.login_user_context(superuser):
# staff user with change permissions
# draft page is used regardless of edit
resp = self.client.get(endpoint + '?toolbar_off')
self.assertContains(resp, public_text)
self.assertContains(resp, draft_text)
with self.login_user_context(superuser):
# staff user with change permissions
# public page is used because of explicit ?preview
resp = self.client.get(endpoint + '?preview')
self.assertContains(resp, public_text)
self.assertNotContains(resp, draft_text)
with self.login_user_context(superuser):
# staff user with change permissions
# public page is used because of preview disables edit
resp = self.client.get(endpoint + '?preview&edit')
self.assertContains(resp, public_text)
self.assertNotContains(resp, draft_text)
with self.login_user_context(staff_with_no_permissions):
# staff user with no change permissions
# public page is always used
resp = self.client.get(endpoint)
self.assertContains(resp, public_text)
self.assertNotContains(resp, draft_text)
def test_page_preview_persists(self):
"""
Page preview persists in the user session to allow users
to navigate the site in public mode.
"""
superuser = self.get_superuser()
draft_text = '<p>text only in draft</p>'
public_text = '<p>text in draft & live</p>'
cms_page = create_page(
title='home',
template='nav_playground.html',
language='en',
published=True,
slug='home',
xframe_options=Page.X_FRAME_OPTIONS_DENY
)
placeholder = cms_page.placeholders.all()[0]
add_plugin(cms_page.placeholders.all()[0], 'TextPlugin', 'en', body=public_text)
cms_page.publish('en')
add_plugin(placeholder, 'TextPlugin', 'en', body=draft_text)
endpoint = cms_page.get_absolute_url('en')
with self.login_user_context(superuser):
# staff user with change permissions
# public page is used because of explicit ?preview
resp = self.client.get(endpoint + '?preview')
self.assertContains(resp, public_text)
self.assertNotContains(resp, draft_text)
resp = self.client.get(endpoint)
self.assertContains(resp, public_text)
self.assertNotContains(resp, draft_text)
class PageTreeTests(CMSTestCase):
def test_rename_node(self):
superuser = self.get_superuser()
home = create_page('grandpa', 'nav_playground.html', 'en', slug='home', published=True)
home.publish('en')
parent = create_page('parent', 'nav_playground.html', 'en', slug='parent', published=True)
parent.publish('en')
child = create_page('child', 'nav_playground.html', 'en', slug='child', published=True, parent=parent)
child.publish('en')
endpoint = self.get_admin_url(Page, 'change', parent.pk)
with self.login_user_context(superuser):
response = self.client.post(endpoint, {'title': 'parent', 'slug': 'father'})
self.assertRedirects(response, self.get_admin_url(Page, 'changelist'))
parent = Page.objects.get(pk=parent.pk)
parent.publish('en')
child = Page.objects.get(pk=child.pk)
self.assertEqual(child.get_absolute_url(language='en'), '/en/father/child/')
self.assertEqual(child.publisher_public.get_absolute_url(language='en'), '/en/father/child/')
def test_rename_node_alters_descendants(self):
superuser = self.get_superuser()
create_page('grandpa', 'nav_playground.html', 'en', slug='home', published=True)
parent = create_page('parent', 'nav_playground.html', 'en', slug='parent', published=True)
child = create_page('child', 'nav_playground.html', 'en', slug='child', published=True, parent=parent)
grandchild_1 = create_page('grandchild-1', 'nav_playground.html', 'en', slug='grandchild-1', published=True,
parent=child)
grandchild_2 = create_page('grandchild-2', 'nav_playground.html', 'en', slug='grandchild-2', published=True,
parent=child.reload())
grandchild_3 = create_page('grandchild-3', 'nav_playground.html', 'en', slug='grandchild-3', published=True,
parent=child.reload())
endpoint = self.get_admin_url(Page, 'change', parent.pk)
with self.login_user_context(superuser):
response = self.client.post(endpoint, {'title': 'parent', 'slug': 'father'})
self.assertRedirects(response, self.get_admin_url(Page, 'changelist'))
# Draft pages
self.assertEqual(grandchild_1.get_absolute_url(language='en'), '/en/father/child/grandchild-1/')
self.assertEqual(grandchild_2.get_absolute_url(language='en'), '/en/father/child/grandchild-2/')
self.assertEqual(grandchild_3.get_absolute_url(language='en'), '/en/father/child/grandchild-3/')
parent.reload().publish('en')
# Public pages
self.assertEqual(grandchild_1.publisher_public.get_absolute_url(language='en'), '/en/father/child/grandchild-1/')
self.assertEqual(grandchild_2.publisher_public.get_absolute_url(language='en'), '/en/father/child/grandchild-2/')
self.assertEqual(grandchild_3.publisher_public.get_absolute_url(language='en'), '/en/father/child/grandchild-3/')
def test_move_node(self):
home = create_page('grandpa', 'nav_playground.html', 'en', slug='home', published=True)
home.publish('en')
parent = create_page('parent', 'nav_playground.html', 'en', slug='parent', published=True)
parent.publish('en')
child = create_page('child', 'nav_playground.html', 'en', slug='child', published=True, parent=home)
child.publish('en')
child.move_page(parent.node)
child = child.reload()
child.publish('en')
child.reload()
self.assertEqual(child.get_absolute_url(language='en'), '/en/parent/child/')
self.assertEqual(child.publisher_public.get_absolute_url(language='en'), '/en/parent/child/')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesOperations(object):
"""VpnSitesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
"""Retrieves the details of a VPN site.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being retrieved.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSite, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.VpnSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.VpnSite"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'VpnSite')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnSite', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.VpnSite"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnSite"]
"""Creates a VpnSite resource if it doesn't exist else updates the existing VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being created or updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to create or update VpnSite.
:type vpn_site_parameters: ~azure.mgmt.network.v2019_06_01.models.VpnSite
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnSite or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.VpnSite]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
vpn_site_parameters=vpn_site_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnSite', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnSite"]
"""Updates VpnSite tags.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to update VpnSite tags.
:type vpn_site_parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnSite or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.VpnSite]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
vpn_site_parameters=vpn_site_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being deleted.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSitesResult"]
"""Lists all the vpnSites in a resource group.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSitesResult"]
"""Lists all the VpnSites in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnSites'} # type: ignore
|
|
#=======================================================================
__version__ = '''0.0.28'''
__sub_version__ = '''20040212122352'''
__copyright__ = '''(c) Alex A. Naanou 2003'''
#-----------------------------------------------------------------------
import re
##import acl
from pli.functional import rcurry
import pli.misc.filterlang as filterlang
##!!! REVIZE !!!##
#-----------------------------------------------------------------------
# WARNING: the selectors are quite slow!
#-----------------------------------------------------------_ls__iter---
# WARNING: this is a LL class. do not use directly...
class _ls__iter(object):
'''
this is a bidirectional iterator class...
NOTE: not intended for direct use...
'''
def __init__(self, top):
self._top = top
self._position = 0
def __iter__(self):
return self
def next(self):
isaccessible = self._top._isaccesible
end = len(self._top._slct_store)
while self._position < end:
if self._position < 0:
self._position = 0
e = self._top._slct_store[self._position]
if self._position < end:
self._position += 1
## if False not in [f(e) for f in self._top._slct_filter]:
if isaccessible(e):
return e
else:
break
raise StopIteration
def prev(self):
isaccessible = self._top._isaccesible
while self._position >= 0:
if self._position == len(self._top._slct_store):
self._position -= 1
e = self._top._slct_store[self._position]
if self._position >= 0:
self._position -= 1
## if False not in [f(e) for f in self._top._slct_filter]:
if isaccessible(e):
return e
else:
break
raise StopIteration
#--------------------------------------------------------ListSelector---
class ListSelector(object):
'''
this is a generic list selector.
'''
## __acl__ = None
def __init__(self, store, *filters):
'''
'''
self._slct_store = store
self._slct_filter = filters
self._slct_iter = self.__iter__()
self.slct_slice_length = None
self._slct_slice_start = 0
self._slct_slice_end = -1
def _isaccesible(self, obj):
'''
test if an object is accessible/visible through the selector...
'''
if False not in [f(obj) for f in self._slct_filter]:
return True
return False
_ls__iter = _ls__iter
def __iter__(self):
'''
'''
return self._ls__iter(self)
def filter(self, predicate):
'''
this will add a filter/predicate to the selector.
NOTE: this will reset the selector (?).
'''
self.reset()
self._slct_filter += (predicate,)
def unfilter(self):
'''
this will drop all filters.
'''
self._slct_filter = ()
def slice(self, length=None):
'''
this will set the slice size.
if no arguments are given will set the length to maximum.
'''
self.slct_slice_length = length
def next(self):
'''
this will return the next bunch and move the cursor to the start of the next bunch.
'''
res = []
if self.slct_slice_length == None:
# return the whole set...
self.reset()
## if hasattr(self, '__acl__') and self.__acl__ != None:
## acl = self.__acl__
## res = [ e for e in self._slct_iter if acl.isaccessible(e) and acl.isvisible(e) ]
## else:
## res = list(self._slct_iter)
res = list(self._slct_iter)
self.reset()
return res
try:
i = 0
## if hasattr(self, '__acl__') and self.__acl__ != None:
## acl = self.__acl__
## while i < self.slct_slice_length:
## e = self._slct_iter.next()
## if acl.isaccessible(e) and acl.isvisible(e):
## res += [e]
## i += 1
## else:
## while i < self.slct_slice_length:
## res += [self._slct_iter.next()]
## i += 1
while i < self.slct_slice_length:
res += [self._slct_iter.next()]
i += 1
except StopIteration:
pass
return res
def prev(self):
'''
this will return the previouse bunch and move the cursor to the start of the prev bunch.
'''
res = []
if self.slct_slice_length == None:
# return the whole set...
self.reset()
## if hasattr(self, '__acl__') and self.__acl__ != None:
## acl = self.__acl__
## res = [ e for e in self._slct_iter if acl.isaccessible(e) and acl.isvisible(e)]
## else:
## res = list(self._slct_iter)
res = list(self._slct_iter)
self.reset()
return res
try:
i = 0
## if hasattr(self, '__acl__') and self.__acl__ != None:
## acl = self.__acl__
## while i < self.slct_slice_length:
## e = self._slct_iter.prev()
## if acl.isaccessible(e) and acl.isvisible(e):
## res += [e]
## i += 1
## else:
## while i < self.slct_slice_length:
## res += [self._slct_iter.prev()]
## i += 1
while i < self.slct_slice_length:
res += [self._slct_iter.prev()]
i += 1
except StopIteration:
pass
res.reverse()
return res
def reset(self):
'''
this will move the cursor to the start of the res.
'''
self._slct_iter._position = 0
def position(self, pos=None):
'''
'''
if pos != None:
self._slct_iter._position = pos
return self._slct_iter._position
def isatstart(self):
'''
'''
if self.slct_slice_length == None:
return True
return self._slct_iter._position == 0
def isatend(self):
'''
'''
if self.slct_slice_length == None:
return True
return self._slct_iter._position == len(self._slct_store)
#--------------------------------------------------ListSortedSelector---
# WARNING: this selector will not get auto-updated on store update!
# use the refresh method...
# NOTE: the folowing are memory hungry!
# TODO make the sorter a mixin... (?)
class ListSortedSelector(ListSelector):
'''
'''
def __init__(self, store, filters=(), sort=()):
'''
'''
ListSelector.__init__(self, store[:], *filters)
self._slct_orig_store = store
self._slct_sort = sort
if sort != ():
self._slct_store.sort(self._attr_cmp)
def reverse(self):
'''
'''
self._slct_store.reverse()
self.reset()
def sort(self, *p):
'''
'''
self._slct_sort = p
self.reset()
if p != ():
self._slct_store.sort(self._attr_cmp)
def refresh(self):
'''
'''
self._slct_store = self._slct_orig_store[:]
## self.reset()
if self._slct_sort != ():
self._slct_store.sort(self._attr_cmp)
def _attr_cmp(self, a, b):
'''
'''
if len(self._slct_sort) == 0:
return cmp(a, b)
for attr in self._slct_sort:
pa, pb = hasattr(a, attr), hasattr(b, attr)
if False in (pa, pb):
return cmp(pa, pb)
aa, ba = getattr(a, attr), getattr(b, attr)
if aa != ba:
return cmp(aa, ba)
return 0
#--------------------------------------------------DictSortedSelector---
class DictSortedSelector(ListSortedSelector):
'''
'''
def __init__(self, store, filters=(), sort=()):
'''
'''
ListSortedSelector.__init__(self, store.values(), filters, sort)
self._slct_orig_store = store
def refresh(self):
'''
'''
self._slct_store = self._slct_orig_store.values()
## self.reset()
if self._slct_sort != ():
self._slct_store.sort(self._attr_cmp)
#----------------------------------------------DictSortedAttrSelector---
# if this flag is set the unavailable attributes will be returned as
# empty strings ('')...
RETURN_UNKNOWN_ATTRS = 1
# TODO make this a mixin... (do not inherit... wrap!)
# TODO make persistent store-specific selector sets...
class DictSortedAttrSelector(DictSortedSelector):
'''
'''
# this will define a getattr-like callable to be used to access
# stored object attributes....
__attribute_accessor__ = None
def __init__(self, store, filters=(), sort=(), attrs=(), flags=RETURN_UNKNOWN_ATTRS):
'''
'''
DictSortedSelector.__init__(self, store, filters, sort)
self._slct_attrs = attrs
self._slct_flags = flags
def attrs(self, *attrs):
'''
'''
self._slct_attrs = attrs
def _getres(self, lst):
'''
'''
if hasattr(self, '__attribute_accessor__') and self.__attribute_accessor__ != None:
__getattr = __attribute_accessor__
else:
__getattr = getattr
res = []
for e in lst:
set = {}
if len(self._slct_attrs) == 0:
##!!!
return [{} for i in lst]
for a in self._slct_attrs:
try:
set[a] = __getattr(e, a)
except AttributeError:
if self._slct_flags&RETURN_UNKNOWN_ATTRS:
##!!!
set[a] = ''
res += [set]
return res
def next(self):
'''
'''
return self._getres(DictSortedSelector.next(self))
def prev(self):
'''
'''
return self._getres(DictSortedSelector.prev(self))
#--------------------------------DictSortedAttrSelectorWithAttrFilter---
# TODO make this a mixin... (do not inherit... wrap!)
##!! revize !!##
class DictSortedAttrSelectorWithAttrFilter(DictSortedAttrSelector):
'''
'''
__private_attrs__ = (
'_slct_filter', # WARNING: this attr is not safe...
)
if hasattr(DictSortedAttrSelector, '__private_attrs__'):
__private_attrs__ = DictSortedAttrSelector.__private_attrs__ + __private_attrs__
def __init__(self, store, filters={}, sort=(), attrs=(), flags=RETURN_UNKNOWN_ATTRS):
'''
'''
DictSortedAttrSelector.__init__(self, store, (), sort, attrs, flags)
# make shure we are safe...
self.filter(filters)
def _isaccesible(self, obj):
'''
'''
if type(self._slct_filter) is dict:
if False not in [(hasattr(obj, attr) and getattr(obj, attr) == val) \
for attr, val in self._slct_filter.items()]:
return True
return False
else:
return self._slct_filter(obj)
def filter(self, filters={}):
'''
'''
if type(filters) is str:
self._slct_filter = filterlang.Filter(filters)
elif type(self._slct_filter) is not dict:
self._slct_filter = filters
else:
self._slct_filter.update(filters)
def unfilter(self):
'''
'''
self._slct_filter = {}
#-------------------------------DictSortedAttrSelectorWithFancyFilter---
class DictSortedAttrSelectorWithFancyFilter(DictSortedAttrSelector):
'''
'''
def __init__(self, store, filters=[], sort=(), attrs=(), flags=RETURN_UNKNOWN_ATTRS):
'''
'''
DictSortedAttrSelectorWithAttrFilter.__init__(self, store, (), sort, attrs, flags)
for f in filters:
self.filter(f)
def _filter_func(self, obj, code):
'''
'''
exec code in {}, locals()
def filter(self, filter_str):
'''
'''
# check the filter string...
##!!!
# replace attrs with qualifued
##!!!
filter_str = filter_str.replace('@', 'obj.')
# compile code...
code = compile(filter_str)
self._slct_filter += [rcurry(self._filter_func, code)]
#=======================================================================
# vim:set ts=4 sw=4 nowrap :
|
|
#Assignment 1
#Author: Dylan Kingston
#Date started: 16/10/14
#Date submitted : 30/11/14
import httplib2 #Needed to download file from internet, not simply reading a file on the hard drive.
YearsOfAge=0 #global variables
Workclass=1
#fnlwgt=2 Not needed.
#Education=3 Not needed.
Education_Number=4
Marital_Status=5
Occupation=6
Relationship=7
Race=8
Sex=9
Capital_Gain=10
Capital_Loss=11
Hours_Per_Week=12
#Native_Country=13 Not needed.
Outcome=14
#Start of functions.
#Start of counting().
def counting(number):
for row in file:
filearray.append(row)
number+=1 #Incrememnt by 1 for each record in the file.
return(number)
#End of counting().
#Start of weights().
def weights(docarray,number,position):
counter=0 #A simple counter.
ref={} #A dictionary (Currently empty).
attArray = []
while(counter<number):
split=docarray[counter].split(", ");
if split[position] in ref:
ref[split[position]]+=1
else:
ref[split[position]]=1
attArray.append(position)
counter+=1
counter=0 #Reset the counter to 0.
for attArray[counter] in ref:
ref[attArray[counter]]=ref[attArray[counter]]/sev
return(ref)
#End of weights().
#Start of separateXYnum().
def separateXYnum(records,attributepos):
X=0
Y=0
i=0
countU=0
countO=0
while(i<sev):
record=records[i]
recordarray=record.split(", ")
if recordarray[Outcome].startswith('>'):
X+=int(recordarray[attributepos]) #Earns more.
countO+=1
else:
Y+=int(recordarray[attributepos]) #Earns less.
countU+=1
i+=1
average_X=X/countO
average_Y=Y/countU
midpoint=average_X+average_Y
midpoint = midpoint/2
return(midpoint)
#End of separateXYnum().
#Start of separate().
def separate(diction,file,number,n):
i=0
pos=0
neg=0
midp=0
midn=0
above={}
below={}
while(i<number):
line=file[i].split(', ')
weight=diction[line[n]]
if(file[i].find('>50K')!=-1):
midp=midp + weight
pos+=1
elif(file[i].find('<=50K')!=-1):
midn=midn+weight
neg+=1
i+=1
midpoint=((midp/pos)+(midn/neg))/2
return(midpoint)
#End of separate().
#End of functions.
#Start of Main().
filedown = "http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data" #Download the file
h = httplib2.Http(".cache")
file_headers, file = h.request(filedown)
file = file.decode()
file = file.split('\n')
filearray=[]
count=0
print('Calculating...') #So the user knows the program is working, if it is running slowly.
count=counting(count) #Run the function counting().
sev=int(count*0.50)
#Trainging set.
#The following are all text based data.
workweight=weights(filearray,sev,Workclass)
maritalweight=weights(filearray,sev,Marital_Status)
occuweight=weights(filearray,sev,Occupation)
raceweight=weights(filearray,sev,Race)
sexweight=weights(filearray,sev,Sex)
relationweight=weights(filearray,sev,Relationship)
#The following are all integer based data.
age_mid=separateXYnum(filearray,YearsOfAge)
work_mid=separate(workweight,filearray,sev,Workclass)
edu_mid=separateXYnum(filearray,Education_Number)
marital_mid=separate(maritalweight,filearray,sev,Marital_Status)
occu_mid=separate(occuweight,filearray,sev,Occupation)
relation_mid=separate(relationweight,filearray,sev,Relationship)
race_mid=separate(raceweight,filearray,sev,Race)
sex_mid=separate(sexweight,filearray,sev,Sex)
gain_mid=separateXYnum(filearray,Capital_Gain)
loss_mid=separateXYnum(filearray,Capital_Loss)
hrs_mid=separateXYnum(filearray,Hours_Per_Week)
#Testing set
counter = 0
correct = 0
while(sev<count-2): #Errors resulted if it wasn't at -2.
More=0
Less=0
attribute=filearray[sev].split(", ")
#print("Check?:",type(attribute[age]),attribute[age]) Print until error, program was hitting the end of the file. Fixed now.
if (int(attribute[YearsOfAge]))>age_mid:
More+=-2
else: #I know these two are a little hardcoded, but it gave higher accuracy :)
Less+=2
if int(attribute[Education_Number])>edu_mid:
More+=1
else:
Less+=1
if int(attribute[Hours_Per_Week])>hrs_mid:
More+=1
else:
Less+=1
if int(attribute[Capital_Gain])>gain_mid:
More+=1
else:
Less+=1
if int(attribute[Capital_Loss])>loss_mid:
More+=1
else:
Less+=1
if (float(workweight[attribute[Workclass]])<work_mid):
More+=1
else:
Less+=1
if (float(maritalweight[attribute[Marital_Status]])>marital_mid):
More+=1
else:
Less+=1
if (float(occuweight[attribute[Occupation]])>occu_mid):
More+=1
else:
Less+=1
if (float(raceweight[attribute[Race]])>race_mid):
More+=1
else:
Less+=1
if (float(sexweight[attribute[Sex]])>sex_mid):
More+=1
else:
Less+=1
if (float(relationweight[attribute[Relationship]])>relation_mid):
More+=1
else:
Less+=1
if (More>Less):
answer='>50K'
else:
answer='<=50K'
if(filearray[sev].find(answer) != -1):
correct +=1
else:
correct +=0
counter+=1
sev+=1
accuracy = ((correct/counter)*100) #Claculate the accuracy.
total = 100 #Provide a total % value to compare the output against.
char = '/' #Used to separate the output value and the total value.
print('Accuracy is:',accuracy, char, total) #Print out the accuracy. Final program output.
#End of Main().
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the anaytics internals in models/analytics/*.py."""
__author__ = 'Mike Gainer ([email protected])'
import collections
import datetime
import unittest
import jinja2
from common import jinja_utils
from models import analytics
from models import data_sources
from models import jobs
#-------------------------------------------------------------------------------
# Mock objects to simulate models/jobs subsystem
class MockAppContext(object):
def __init__(self, namespace_name):
self._namespace_name = namespace_name
def get_namespace_name(self):
return self._namespace_name
class MockJobEntity(object):
def __init__(self, status=jobs.STATUS_CODE_STARTED, output=None):
self._status = status
self._output = output
self._updated_on = datetime.datetime.utcnow()
def complete(self, output):
self._complete(output, jobs.STATUS_CODE_COMPLETED)
def fail(self, output):
self._complete(output, jobs.STATUS_CODE_FAILED)
def _complete(self, output, status):
self._output = output
self._status = status
now = datetime.datetime.utcnow()
self._execution_time_sec = int((now - self._updated_on).total_seconds())
self._updated_on = now
def start(self):
self._status = jobs.STATUS_CODE_STARTED
self._updated_on = datetime.datetime.utcnow()
@property
def output(self):
return self._output
@property
def status_code(self):
return self._status
@property
def has_finished(self):
return self._status in (
jobs.STATUS_CODE_COMPLETED, jobs.STATUS_CODE_FAILED)
@property
def updated_on(self):
return self._updated_on
@property
def execution_time_sec(self):
return self._execution_time_sec
class MockJobBase(jobs.DurableJobBase):
_jobs = {} # Mock persistent store
@classmethod
def clear_jobs(cls):
cls._jobs.clear()
def __init__(self, app_context):
super(MockJobBase, self).__init__(app_context)
def submit(self):
job = self.load()
if not job:
job = self._create_job()
if job.has_finished:
job.start()
def load(self):
return self._jobs.get(self._get_name(), None)
@classmethod
def _create_job(cls):
job = MockJobEntity()
cls._jobs[cls._get_name()] = job
return job
@classmethod
def _get_name(cls):
return cls.__name__
def cancel(self):
job = self.load()
if job and not job.has_finished:
job.fail('Canceled')
#-------------------------------------------------------------------------------
# Mock objects to simulate page-display level constructs
class MockXsrfCreator(object):
def create_xsrf_token(self, action):
return 'xsrf_' + action
class MockHandler(object):
def __init__(self, app_context):
self._templates = {}
self._app_context = app_context
self.request = collections.namedtuple('X', ['url'])('/foo/bar/baz')
def get_template(self, template_name, template_dirs):
jinja_environment = jinja_utils.create_jinja_environment(
loader=jinja2.FileSystemLoader(template_dirs))
return jinja_environment.get_template(template_name)
@property
def app_context(self):
return self._app_context
#-------------------------------------------------------------------------------
# Generators and data source classes for use in visualizations.
class GenOne(MockJobBase):
@staticmethod
def get_description():
return 'gen one'
class GenTwo(MockJobBase):
@staticmethod
def get_description():
return 'gen two'
class GenThree(MockJobBase):
@staticmethod
def get_description():
return 'gen three'
class NoGenSource(data_sources.SynchronousQuery):
@staticmethod
def fill_values(app_context, template_values):
template_values['no_gen_source'] = 'no_gen_value'
class OneGenSource(data_sources.SynchronousQuery):
@staticmethod
def required_generators():
return [GenOne]
@staticmethod
def fill_values(app_context, template_values, gen_one_job):
template_values['one_gen_source_gen_one'] = (
gen_one_job.output)
class TwoGenSource(data_sources.SynchronousQuery):
@staticmethod
def required_generators():
return [GenOne, GenTwo]
@staticmethod
def fill_values(app_context, template_values, gen_one_job, gen_two_job):
template_values['two_gen_source_gen_one'] = (
gen_one_job.output)
template_values['two_gen_source_gen_two'] = (
gen_two_job.output)
class ThreeGenSource(data_sources.SynchronousQuery):
@staticmethod
def required_generators():
return [GenOne, GenTwo, GenThree]
@staticmethod
def fill_values(app_context, template_values, gen_one_job, gen_two_job,
gen_three_job):
template_values['three_gen_source_gen_one'] = (
gen_one_job.output)
template_values['three_gen_source_gen_two'] = (
gen_two_job.output)
template_values['three_gen_source_gen_three'] = (
gen_three_job.output)
data_sources.Registry.register(NoGenSource)
data_sources.Registry.register(OneGenSource)
data_sources.Registry.register(TwoGenSource)
data_sources.Registry.register(ThreeGenSource)
#-------------------------------------------------------------------------------
# Actual tests.
class AnalyticsTests(unittest.TestCase):
def setUp(self):
analytics.by_name.clear()
MockJobBase.clear_jobs()
self._mock_app_context = MockAppContext('testing')
self._mock_handler = MockHandler(self._mock_app_context)
self._mock_xsrf = MockXsrfCreator()
def _generate_analytics_page(self, visualizations):
return analytics.generate_display_html(
self._mock_handler, self._mock_xsrf, visualizations)
def _run_generators_for_visualizations(self, app_context, visualizations):
for gen_class in analytics.utils._generators_for_visualizations(
visualizations):
gen_class(app_context).submit()
def _cancel_generators_for_visualizations(self, app_context,
visualizations):
for gen_class in analytics.utils._generators_for_visualizations(
visualizations):
gen_class(app_context).cancel()
def test_illegal_name(self):
with self.assertRaisesRegexp(ValueError, 'name "A" must contain only'):
analytics.Visualization('A', 'Foo', 'foo.html')
with self.assertRaisesRegexp(ValueError, 'name " " must contain only'):
analytics.Visualization(' ', 'Foo', 'foo.html')
with self.assertRaisesRegexp(ValueError, 'name "#" must contain only'):
analytics.Visualization('#', 'Foo', 'foo.html')
def test_illegal_generator(self):
with self.assertRaisesRegexp(
ValueError,
'All data source classes used in visualizations must be'):
analytics.Visualization('foo', 'foo', 'foo', [MockHandler])
def test_no_generator_display(self):
name = 'no_generator'
analytic = analytics.Visualization(
name, name, 'models_analytics_section.html', [NoGenSource])
result = self._generate_analytics_page([analytic])
# Statistic reports result to page
self.assertIn('no_generator_no_gen_source: "no_gen_value"', result)
# Statistic does not have a run/cancel button; it has no generators
# which depend on jobs.
self.assertNotIn('gdb-run-analytic-simple', result)
self.assertNotIn('gdb-cancel-analytic-simple', result)
def test_generator_run_cancel_state_display(self):
name = 'foo'
analytic = analytics.Visualization(
name, name, 'models_analytics_section.html', [OneGenSource])
result = self._generate_analytics_page([analytic])
self.assertIn('Statistics for gen one have not been', result)
self.assertIn('Update', result)
self.assertIn('action=run_visualizations', result)
self._run_generators_for_visualizations(self._mock_app_context,
[analytic])
result = self._generate_analytics_page([analytic])
self.assertIn('Job for gen one statistics started at', result)
self.assertIn('Cancel', result)
self.assertIn('action=cancel_visualizations', result)
self._cancel_generators_for_visualizations(self._mock_app_context,
[analytic])
result = self._generate_analytics_page([analytic])
self.assertIn('There was an error updating gen one statistics', result)
self.assertIn('<pre>Canceled</pre>', result)
self.assertIn('Update', result)
self.assertIn('action=run_visualizations', result)
self._run_generators_for_visualizations(self._mock_app_context,
[analytic])
result = self._generate_analytics_page([analytic])
self.assertIn('Job for gen one statistics started at', result)
self.assertIn('Cancel', result)
self.assertIn('action=cancel_visualizations', result)
GenOne(self._mock_app_context).load().complete('run_state_display')
result = self._generate_analytics_page([analytic])
self.assertIn('Statistics for gen one were last updated at', result)
self.assertIn('in about 0 sec', result)
self.assertIn('Update', result)
self.assertIn('action=run_visualizations', result)
self.assertIn('foo_one_gen_source_gen_one: "run_state_display"', result)
def test_multiple_visualizations_multiple_generators_multiple_sources(self):
visualizations = []
visualizations.append(analytics.Visualization(
'trivial', 'Trivial Statistics', 'models_analytics_section.html',
[NoGenSource]))
visualizations.append(analytics.Visualization(
'simple', 'Simple Statistics', 'models_analytics_section.html',
[OneGenSource]))
visualizations.append(analytics.Visualization(
'complex', 'Complex Statistics', 'models_analytics_section.html',
[NoGenSource, OneGenSource, TwoGenSource, ThreeGenSource]))
self._run_generators_for_visualizations(self._mock_app_context,
visualizations)
# Verify that not-all generators are running, but that 'complex'
# is still not reporting data, as the generator it's relying on
# (GenThree) is still running.
GenOne(self._mock_app_context).load().complete('gen_one_data')
GenTwo(self._mock_app_context).load().complete('gen_two_data')
result = self._generate_analytics_page(visualizations)
self.assertIn('simple_one_gen_source_gen_one: "gen_one_data"', result)
self.assertIn('Statistics for gen one were last updated', result)
self.assertIn('Statistics for gen two were last updated', result)
self.assertIn('Job for gen three statistics started at', result)
self.assertNotIn('complex_three_gen_source', result)
# Finish last generator; should now have all data from all sources.
GenThree(self._mock_app_context).load().complete('gen_three_data')
result = self._generate_analytics_page(visualizations)
self.assertIn('trivial_no_gen_source: "no_gen_value"', result)
self.assertIn('simple_one_gen_source_gen_one: "gen_one_data"', result)
self.assertIn('complex_no_gen_source: "no_gen_value"', result)
self.assertIn('complex_one_gen_source_gen_one: "gen_one_data"', result)
self.assertIn('complex_two_gen_source_gen_one: "gen_one_data"', result)
self.assertIn('complex_two_gen_source_gen_two: "gen_two_data"', result)
self.assertIn('complex_three_gen_source_gen_one: "gen_one_data"',
result)
self.assertIn('complex_three_gen_source_gen_two: "gen_two_data"',
result)
self.assertIn('complex_three_gen_source_gen_three: "gen_three_data"',
result)
# Verify that we _don't_ have data for sections that didn't specify
# that source.
self.assertIn('trivial_one_gen_source_gen_one: ""', result)
self.assertIn('simple_no_gen_source: ""', result)
# We should have all headers
self.assertIn('Trivial Statistics', result)
self.assertIn('Simple Statistics', result)
self.assertIn('Complex Statistics', result)
# And submission forms for analytics w/ generators
self.assertNotIn(
'<input type="hidden" name="visualization" value="trivial"',
result)
self.assertIn(
'<input type="hidden" name="visualization" value="simple"',
result)
self.assertIn(
'<input type="hidden" name="visualization" value="complex"',
result)
|
|
#!/usr/bin/python2
#-------------------------------------------------------------------------------
# Filename: asteroids.py
#
# Author: David C. Drake (https://davidcdrake.com)
#
# Description: Contains an 'AsteroidsGame' class for managing a modified version
# of the classic game Asteroids and a 'main' function for running
# it. Developed using Python 2.7 and Pygame 1.9.
#-------------------------------------------------------------------------------
import random
import pygame
from pygame import mixer, mouse
import game
import shapes
from config import *
#-------------------------------------------------------------------------------
# Class: AsteroidsGame
#
# Description: Manages a modified version of the classic Asteroids game.
#
# Methods: __init__, game_logic, paint, spawn_asteroids
#-------------------------------------------------------------------------------
class AsteroidsGame(game.Game):
#---------------------------------------------------------------------------
# Method: __init__
#
# Description: Creates game objects and starts background music.
#
# Inputs: fps - Desired frames per second.
#
# Outputs: None.
#---------------------------------------------------------------------------
def __init__(self, fps=FRAMES_PER_SECOND):
game.Game.__init__(self, fps)
mouse.set_visible(False)
# Create the ship and place it in the center of the screen:
center = shapes.Point(self.width / 2, self.height / 2)
self.ship = shapes.Ship(center, SHIP_INITIAL_ROTATION, SHIP_COLOR)
# Create bullet and upgrade lists:
self.bullets = []
self.upgrades = []
# Create asteroids and background stars:
self.asteroids = []
self.spawn_asteroids()
self.stars = []
while len(self.stars) < (self.width * STAR_DENSITY):
self.stars.append(shapes.Star(self.get_random_point()))
# Initialize mixer and start looping background music:
mixer.init()
mixer.music.load(BACKGROUND_MUSIC)
mixer.music.play(-1)
#---------------------------------------------------------------------------
# Method: game_logic
#
# Description: Determines game behavior based on keyboard input and object
# interactions.
#
# Inputs: keys - Keys that are currently pressed down.
# new_keys - Keys that have just begun to be pressed down.
#
# Outputs: None.
#---------------------------------------------------------------------------
def game_logic(self, keys, new_keys):
# Ship:
self.ship.game_logic(keys, new_keys)
self.ship.boundary_check(self.width, self.height)
# Bullets:
if ((pygame.K_SPACE in new_keys or pygame.K_RETURN in new_keys or
pygame.K_KP_ENTER in new_keys or pygame.K_LCTRL in new_keys or
pygame.K_RCTRL in new_keys) and self.ship.active):
if self.ship.upgrade_level != 1:
self.bullets.append(shapes.Bullet(self.ship.get_points()[0],
self.ship.rotation))
if self.ship.upgrade_level > 0:
self.bullets.append(shapes.Bullet(self.ship.get_points()[3],
self.ship.rotation))
self.bullets.append(shapes.Bullet(self.ship.get_points()[9],
self.ship.rotation))
if self.ship.upgrade_level > 2:
self.bullets.append(shapes.Bullet(self.ship.get_points()[3],
self.ship.rotation + 45))
self.bullets.append(shapes.Bullet(self.ship.get_points()[9],
self.ship.rotation - 45))
if self.ship.upgrade_level > 3:
self.bullets.append(shapes.Bullet(self.ship.get_points()[3],
self.ship.rotation + 90))
self.bullets.append(shapes.Bullet(self.ship.get_points()[9],
self.ship.rotation - 90))
if self.ship.upgrade_level > 4:
self.bullets.append(shapes.Bullet(self.ship.get_points()[4],
self.ship.rotation + 135))
self.bullets.append(shapes.Bullet(self.ship.get_points()[8],
self.ship.rotation - 135))
if self.ship.upgrade_level > 5:
self.bullets.append(shapes.Bullet(self.ship.get_points()[6],
self.ship.rotation + 180))
for b in self.bullets:
b.game_logic(keys, new_keys)
if (b.position.x > self.width or b.position.x < 0 or
b.position.y > self.height or b.position.y < 0):
self.bullets.remove(b)
# Upgrades:
for u in self.upgrades:
u.game_logic()
if self.ship.active and self.ship.intersects(u):
self.ship.upgrade()
self.upgrades.remove(u)
# Asteroids:
if self.asteroid_count > 0:
for a in self.asteroids:
if a.active:
a.game_logic(keys, new_keys)
a.boundary_check(self.width, self.height)
if self.ship.active and self.ship.intersects(a):
self.ship.take_damage()
self.destroy_asteroid(a)
else:
for b in self.bullets:
if b.intersects(a):
self.bullets.remove(b)
self.destroy_asteroid(a)
break
elif self.asteroid_respawn_timer > 0:
self.asteroid_respawn_timer -= 1
else:
self.spawn_asteroids()
# Stars:
for s in self.stars:
s.twinkle()
#---------------------------------------------------------------------------
# Method: paint
#
# Description: Draws the background color and all active objects onto the
# screen.
#
# Inputs: surface - The surface onto which images will be drawn.
#
# Outputs: None.
#---------------------------------------------------------------------------
def paint(self, surface):
surface.fill(BACKGROUND_COLOR)
for s in self.stars:
s.paint(surface)
for u in self.upgrades:
u.paint(surface)
self.ship.paint(surface)
for b in self.bullets:
b.paint(surface)
for a in self.asteroids:
a.paint(surface)
#---------------------------------------------------------------------------
# Method: spawn_asteroids
#
# Description: Creates a new set of large asteroids. Also makes player
# temporarily invincible to avoid unfair deaths.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def spawn_asteroids(self):
self.asteroid_count = int(self.width * ASTEROID_DENSITY)
while len(self.asteroids):
self.asteroids.pop()
self.ship.invincibility_timer = VULNERABILITY_DELAY
while len(self.asteroids) < self.asteroid_count:
self.asteroids.append(shapes.Asteroid(ASTEROID_MAX_RADIUS,
self.get_random_point()))
self.asteroid_respawn_timer = 0
#---------------------------------------------------------------------------
# Method: destroy_asteroid
#
# Description: Handles the destruction of a given asteroid and the resulting
# aftermath, including possibly creating two smaller asteroids
# in its place.
#
# Inputs: asteroid - The asteroid to be destroyed.
#
# Outputs: None.
#---------------------------------------------------------------------------
def destroy_asteroid(self, asteroid):
asteroid.active = False
self.ship.asteroids_destroyed += 1
if self.ship.asteroids_destroyed % UPGRADE_REQ == 0:
self.upgrades.append(shapes.Upgrade(asteroid.position))
half_radius = asteroid.average_radius / 2
self.asteroid_count -= 1
if half_radius >= ASTEROID_MIN_RADIUS:
self.asteroids.append(shapes.Asteroid(half_radius,
asteroid.position))
self.asteroids.append(shapes.Asteroid(half_radius,
asteroid.position))
self.asteroid_count += 2
elif self.asteroid_count <= 0:
self.asteroid_respawn_timer = RESPAWN_DELAY
#---------------------------------------------------------------------------
# Method: get_random_point
#
# Description: Generates a random spawn point (for a star or asteroid).
#
# Inputs: None.
#
# Outputs: Tuple containing the random coordinates.
#---------------------------------------------------------------------------
def get_random_point(self):
random_point = shapes.Point(int(random.uniform(0, self.width - 1)),
int(random.uniform(0, self.height - 1)))
return random_point
def main():
game = AsteroidsGame()
game.main_loop()
if __name__ == '__main__':
main()
|
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Contains generic performance metric base class, metric comparison class, and
other utility functions.
"""
import hashlib
import math
import os
import sys
import neon
from neon.util.param import opt_param
from neon.util.persist import YAMLable, ensure_dirs_exist
class Metric(YAMLable):
"""
A Metric quantitatively measures some aspect of model performance by
contrasting the predictions generated by the model with actual expected
outputs.
Though metrics may examine device buffers, all computation is carried out
on host, and the results are returned in host buffers.
This base class defines which operations each metric must support
to be used within our framework.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
opt_param(self, ['epoch_interval'])
self.clear()
def __str__(self):
return self.__class__.__name__
def add(self, reference, outputs):
"""
Add the expected reference and predicted outputs passed to the set
of values used to calculate this metric.
Arguments:
reference (neon.backend.Tensor): Ground truth, expected outcomes.
If each outcome is a vector, we
expect it to be a column vector,
with each case in a separate
column.
outputs (neon.backend.Tensor): Predicted outputs. Must have the
same dimensions as reference.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def report(self):
"""
Report this metric's current calculated value(s).
Returns:
float or array-like: computed metric value
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def clear(self):
"""
Reset this metric's calculated value(s)
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def dump_metrics(dump_file, experiment_file, start_time, elapsed_time,
backend_name, metrics, field_sep="\t"):
"""
Write or append collected metric values to the specified flat file.
Arguments:
dump_file (str): path to file to write. Will be created if doesn't
exist, or appended to (without header if it does)
experiment_file (str): path to yaml file used to run this experiment
start_time (str): date and time at which experiment was started.
elapsed_time (float): time taken to run the experiment.
metrics (dict): Collection of metric values, as returned from
FitPredictErrorExperiment.run() call.
field_sep (str, optional): string used to separate each field in
dump_file. Defaults to tab character.
"""
if dump_file is None or dump_file == '':
df = sys.stdout()
elif not os.path.exists(dump_file) or os.path.getsize(dump_file) == 0:
ensure_dirs_exist(dump_file)
df = open(dump_file, 'w')
metric_names = []
if isinstance(metrics, dict):
metric_names = ["%s-%s" % (metric.lower(), dset.lower())
for metric in sorted(metrics.keys())
for dset in sorted(metrics[metric].keys())]
df.write(field_sep.join(["host", "architecture", "os",
"os_kernel_release", "neon_version",
"backend",
"yaml_name", "yaml_sha1", "start_time",
"elapsed_time"] + metric_names) + "\n")
else:
df = open(dump_file, 'a')
info = os.uname()
trunc_exp_name = ("..." + os.path.sep +
os.path.dirname(experiment_file).split(os.path.sep)[-1] +
os.path.sep +
os.path.basename(experiment_file))
# TODO: better handle situation where metrics recorded differ from those
# already in file
metric_vals = []
if isinstance(metrics, dict):
metric_vals = ["%.5f" % metrics[metric][dset] for metric in
sorted(metrics.keys()) for dset in
sorted(metrics[metric].keys())]
df.write(field_sep.join([x.replace("\t", " ") for x in
[info[1], info[4], info[0], info[2],
neon.__version__, backend_name, trunc_exp_name,
hashlib.sha1(open(experiment_file,
'rb').read()).hexdigest(),
start_time, "%.3f" % elapsed_time] +
metric_vals]) + "\n")
df.close()
class MetricComparison(object):
"""
Embodies the results of comparing one most recent run of a particular type
of experiment against a prior set of runs.
Arguments:
dump_file (str): path of file to compare.
experiment_file (str): path to yaml file used to run this experiment
max_comps (int, optional): collect and compare statistics against
max_comps most recent prior runs of the
same example. Defaults to 10.
match_backend (bool, optional): Only compare metric results of the same
backend. Defaults to True.
field_sep (str, optional): Dump file field separator. Defaults to tab
character.
"""
def __init__(self, dump_file, experiment_file, max_comps=10,
match_backend=True, field_sep='\t'):
self.dump_file = dump_file
self.experiment_file = experiment_file
self.experiment = None
self.backend = None
self.results = []
if not os.path.exists(dump_file):
raise OSError("file: %s doesn't exist. Can't run comparisons" %
dump_file)
data = open(dump_file).readlines()
if len(data) < 1 or not data[0].startswith("host"):
raise OSError("file: %s seems to have invalid format" % dump_file)
self.experiment = ("..." + os.path.sep +
os.path.dirname(experiment_file)
.split(os.path.sep)[-1] + os.path.sep +
os.path.basename(experiment_file))
line_num = len(data) - 1
header = {x[1]: x[0] for x in
enumerate(data[0].rstrip('\r\n').split(field_sep))}
latest = None
comps = []
while line_num > 0 and len(comps) < max_comps:
if self.experiment in data[line_num]:
this_line = data[line_num].rstrip('\r\n').split(field_sep)
if (not match_backend or latest is None or
self.backend == this_line[header['backend']]):
if latest is None:
latest = this_line
self.backend = latest[header['backend']]
else:
comps.append(this_line)
line_num -= 1
if latest is None:
raise ValueError("unable to find any lines containing %s" %
self.experiment)
for name, idx in [(x, header[x]) for x in sorted(header.keys())
if x == "elapsed_time" or x.startswith("train-") or
x.startswith("test-") or
x.startswith("validation-")]:
val = float(latest[idx])
comp_sum = 0.0
comp_count = 0
for comp in comps:
if comp[idx] != "nan":
comp_sum += float(comp[idx])
comp_count += 1
if comp_count == 0:
comp_mean = float("nan")
else:
comp_mean = comp_sum / comp_count
self.results.append({"metric": name, "value": val,
"comp_mean": comp_mean, "num_comps":
comp_count})
def __str__(self):
return ("Experiment: {0}, Backend: {1} ".format(self.experiment,
self.backend) +
str(self.results))
def print_results(self, field_sep="\t", escape_colors=True,
color_threshold=.01, header=True, min_exp_field_width=1,
min_metric_name_field_width=1):
"""
Prints metric comparison results to the console, formatted based on
parameters passed.
Arguments:
field_sep (str, optional): string used to separate each field in
dump_file. Defaults to tab character.
escape_colors (bool, optional): Should we dump diffs in a different
color? Default is True.
color_threshold (float, optional): How different does a value have
to be from the comp mean to
warrant being colored? Specify
as a percentage of the mean (as
a value between 0 and 1).
Defaults to .01 (i.e. 1%)
header (bool, optional): Print the list of field names on the first
line. Defaults to True.
min_exp_field_width (int, optional): Left pad the experiment field
with spaces to ensure it is at
least the specified number of
characters. Defaults to 1
(i.e. don't pad).
min_metric_name_field_width (int, optional): Right pad the metric
name field with spaces
to give it a
particular length.
Defaults to 1 (i.e.
don't pad).
"""
def make_red(string):
return "\033[31m%s\033[0m" % string
def make_green(string):
return "\033[32m%s\033[0m" % string
def make_yellow(string):
return "\033[93m%s\033[0m" % string
if header:
print(field_sep.join(["experiment".rjust(min_exp_field_width),
"backend",
"metric".ljust(min_metric_name_field_width),
"value", "comp_mean", "num_comps"]))
for res in self.results:
val = res["value"]
comp_mean = res["comp_mean"]
if math.isnan(val):
val = make_yellow("nan") if escape_colors else "nan"
elif escape_colors:
if (comp_mean - val) > color_threshold * comp_mean:
# val has dropped enough to warrant coloring
if res["metric"].lower().startswith("auc"):
val = make_red("{:05f}".format(val))
else:
val = make_green("{:05f}".format(val))
elif (val - comp_mean) > color_threshold * comp_mean:
# val has increased enough to warrant coloring
if res["metric"].lower().startswith("auc"):
val = make_green("{:05f}".format(val))
else:
val = make_red("{:05f}".format(val))
else:
# no coloring needed
val = "{:05f}".format(val)
else:
val = "{:05f}".format(val)
if res["num_comps"] == 0:
comp_mean = make_yellow("nan") if escape_colors else "nan"
else:
comp_mean = "{:05f}".format(comp_mean)
print(field_sep.join([self.experiment.rjust(min_exp_field_width),
self.backend, res["metric"]
.ljust(min_metric_name_field_width),
val, comp_mean, str(res["num_comps"])]))
|
|
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import unittest
from hashlib import sha1
from time import time
from swift.common.middleware import tempauth, tempurl
from swift.common.swob import Request, Response, HeaderKeyDict
from swift.common.utils import split_path
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {
'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'}, '')])
self.request = None
def __call__(self, env, start_response):
self.calls += 1
self.request = Request.blank('', environ=env)
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.request)
if resp:
return resp(env, start_response)
status, headers, body = self.status_headers_body_iter.next()
return Response(status=status, headers=headers,
body=body)(env, start_response)
class TestTempURL(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.auth = tempauth.filter_factory({})(self.app)
self.auth.reseller_prefix = 'a'
self.tempurl = tempurl.filter_factory({})(self.auth)
def _make_request(self, path, environ=None, keys=(), **kwargs):
if environ is None:
environ = {}
_junk, account, _junk, _junk = split_path(path, 2, 4)
self._fake_cache_environ(environ, account, keys)
req = Request.blank(path, environ=environ, **kwargs)
return req
def _fake_cache_environ(self, environ, account, keys):
"""
Fake out the caching layer for get_account_info(). Injects account data
into environ such that keys are the tempurl keys, if set.
"""
meta = {'swash': 'buckle'}
for idx, key in enumerate(keys):
meta_name = 'Temp-URL-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
environ['swift.account/' + account] = {
'status': 204,
'container_count': '0',
'total_object_count': '0',
'bytes': '0',
'meta': meta}
def test_passthrough(self):
resp = self._make_request('/v1/a/c/o').get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' not in resp.body)
def test_allow_options(self):
self.app.status_headers_body_iter = iter([('200 Ok', {}, '')])
resp = self._make_request(
'/v1/a/c/o?temp_url_sig=abcde&temp_url_expires=12345',
environ={'REQUEST_METHOD': 'OPTIONS'}).get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
def assert_valid_sig(self, expires, path, keys, sig):
req = self._make_request(path, keys=keys,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="o"')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
self.assert_valid_sig(expires, path, [key], sig)
def test_get_valid_key2(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key1 = 'abc123'
key2 = 'def456'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig1 = hmac.new(key1, hmac_body, sha1).hexdigest()
sig2 = hmac.new(key2, hmac_body, sha1).hexdigest()
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, [key1, key2], sig)
def test_get_valid_with_filename(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob%%20%%22killer%%22.txt' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="bob \\\"killer\\\".txt"')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_trailing_slash(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o/'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="o"')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_filename_trailing_slash(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=/i/want/this/just/as/it/is/' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="/i/want/this/just/as/it/is/"')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid_but_404(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertFalse('content-disposition' in resp.headers)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_put_not_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_put_valid(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_not_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_missing_sig(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING': 'temp_url_expires=%s' % expires})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_missing_expires(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s' % sig})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_bad_path(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_no_key(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_head_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_otherwise_not_allowed(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
# Deliberately fudge expires to show HEADs aren't just automatically
# allowed.
expires += 1
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
def test_post_not_allowed(self):
method = 'POST'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'POST',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_delete_not_allowed(self):
method = 'DELETE'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_delete_allowed_with_conf(self):
self.tempurl.methods.append('DELETE')
method = 'DELETE'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
def test_unknown_not_allowed(self):
method = 'UNKNOWN'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'REQUEST_METHOD': 'UNKNOWN',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_changed_path_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path + '2', keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_changed_sig_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
if sig[-1] != '0':
sig = sig[:-1] + '0'
else:
sig = sig[:-1] + '1'
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_changed_expires_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' %
(sig, expires + 1)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_different_key_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key + '2'],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_removed_incoming_header(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
headers={'x-remove-this': 'value'},
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-remove-this' not in self.app.request.headers)
def test_removed_incoming_headers_match(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this-*',
'incoming_allow_headers': 'x-remove-this-except-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
headers={'x-remove-this-one': 'value1',
'x-remove-this-except-this': 'value2'},
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-remove-this-one' not in self.app.request.headers)
self.assertEquals(
self.app.request.headers['x-remove-this-except-this'], 'value2')
def test_removed_outgoing_header(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-one-a'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-test-header-one-a' not in resp.headers)
self.assertEquals(resp.headers['x-test-header-two-a'], 'value2')
def test_removed_outgoing_headers_match(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-two-*',
'outgoing_allow_headers': 'x-test-header-two-b'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.headers['x-test-header-one-a'], 'value1')
self.assertTrue('x-test-header-two-a' not in resp.headers)
self.assertEquals(resp.headers['x-test-header-two-b'], 'value3')
def test_get_account(self):
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'POST', 'PATH_INFO': '/v1/a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'DELETE', 'PATH_INFO': '/v1/a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'UNKNOWN', 'PATH_INFO': '/v1/a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c//////'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c///o///'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a//o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1//c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '//a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v2/a/c/o'}), None)
def test_get_temp_url_info(self):
s = 'f5d5051bddf5df7e27c628818738334f'
e = int(time() + 86400)
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (s, e)}), (s, e, None))
self.assertEquals(self.tempurl._get_temp_url_info({
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bobisyouruncle' % (s, e)}), (s, e, 'bobisyouruncle'))
self.assertEquals(self.tempurl._get_temp_url_info({}),
(None, None, None))
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_expires=%s' % e}), (None, e, None))
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s' % s}), (s, None, None))
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=bad' % s}), (s, 0, None))
e = int(time() - 1)
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (s, e)}), (s, 0, None))
def test_get_hmac(self):
self.assertEquals(self.tempurl._get_hmac(
{'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'},
1, 'abc'),
'026d7f7cc25256450423c7ad03fc9f5ffc1dab6d')
self.assertEquals(self.tempurl._get_hmac(
{'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'},
1, 'abc', request_method='GET'),
'026d7f7cc25256450423c7ad03fc9f5ffc1dab6d')
def test_invalid(self):
def _start_response(status, headers, exc_info=None):
self.assertTrue(status, '401 Unauthorized')
self.assertTrue('Temp URL invalid' in
''.join(self.tempurl._invalid({'REQUEST_METHOD': 'GET'},
_start_response)))
self.assertEquals('',
''.join(self.tempurl._invalid({'REQUEST_METHOD': 'HEAD'},
_start_response)))
def test_clean_incoming_headers(self):
irh = ''
iah = ''
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER' in env)
irh = 'test-header'
iah = ''
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER' not in env)
irh = 'test-header-*'
iah = ''
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' not in env)
irh = 'test-header-*'
iah = 'test-header-two'
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' in env)
irh = 'test-header-* test-other-header'
iah = 'test-header-two test-header-yes-*'
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value',
'HTTP_TEST_OTHER_HEADER': 'value',
'HTTP_TEST_HEADER_YES': 'value',
'HTTP_TEST_HEADER_YES_THIS': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' in env)
self.assertTrue('HTTP_TEST_OTHER_HEADER' not in env)
self.assertTrue('HTTP_TEST_HEADER_YES' not in env)
self.assertTrue('HTTP_TEST_HEADER_YES_THIS' in env)
def test_clean_outgoing_headers(self):
orh = ''
oah = ''
hdrs = {'test-header': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header' in hdrs)
orh = 'test-header'
oah = ''
hdrs = {'test-header': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header' not in hdrs)
orh = 'test-header-*'
oah = ''
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' not in hdrs)
orh = 'test-header-*'
oah = 'test-header-two'
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' in hdrs)
orh = 'test-header-* test-other-header'
oah = 'test-header-two test-header-yes-*'
hdrs = {'test-header-one': 'value',
'test-header-two': 'value',
'test-other-header': 'value',
'test-header-yes': 'value',
'test-header-yes-this': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' in hdrs)
self.assertTrue('test-other-header' not in hdrs)
self.assertTrue('test-header-yes' not in hdrs)
self.assertTrue('test-header-yes-this' in hdrs)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
"""
Copyright (c) 2015-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import gmii_ep
module = 'eth_mac_1g_gmii_fifo'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("../rtl/axis_gmii_rx.v")
srcs.append("../rtl/axis_gmii_tx.v")
srcs.append("../rtl/eth_mac_1g.v")
srcs.append("../rtl/eth_mac_1g_gmii.v")
srcs.append("../rtl/gmii_phy_if.v")
srcs.append("../rtl/oddr.v")
srcs.append("../rtl/ssio_sdr_in.v")
srcs.append("../rtl/ssio_sdr_out.v")
srcs.append("../lib/axis/rtl/axis_async_fifo.v")
srcs.append("../lib/axis/rtl/axis_async_fifo_adapter.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
TARGET = "SIM"
IODDR_STYLE = "IODDR2"
CLOCK_INPUT_STYLE = "BUFIO2"
AXIS_DATA_WIDTH = 8
AXIS_KEEP_ENABLE = (AXIS_DATA_WIDTH>8)
AXIS_KEEP_WIDTH = (AXIS_DATA_WIDTH/8)
ENABLE_PADDING = 1
MIN_FRAME_LENGTH = 64
TX_FIFO_DEPTH = 4096
TX_FIFO_PIPELINE_OUTPUT = 2
TX_FRAME_FIFO = 1
TX_DROP_BAD_FRAME = TX_FRAME_FIFO
TX_DROP_WHEN_FULL = 0
RX_FIFO_DEPTH = 4096
RX_FIFO_PIPELINE_OUTPUT = 2
RX_FRAME_FIFO = 1
RX_DROP_BAD_FRAME = RX_FRAME_FIFO
RX_DROP_WHEN_FULL = RX_FRAME_FIFO
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
gtx_clk = Signal(bool(0))
gtx_rst = Signal(bool(0))
logic_clk = Signal(bool(0))
logic_rst = Signal(bool(0))
tx_axis_tdata = Signal(intbv(0)[AXIS_DATA_WIDTH:])
tx_axis_tkeep = Signal(intbv(1)[AXIS_KEEP_WIDTH:])
tx_axis_tvalid = Signal(bool(0))
tx_axis_tlast = Signal(bool(0))
tx_axis_tuser = Signal(bool(0))
rx_axis_tready = Signal(bool(0))
gmii_rx_clk = Signal(bool(0))
gmii_rxd = Signal(intbv(0)[8:])
gmii_rx_dv = Signal(bool(0))
gmii_rx_er = Signal(bool(0))
mii_tx_clk = Signal(bool(0))
ifg_delay = Signal(intbv(0)[8:])
# Outputs
tx_axis_tready = Signal(bool(0))
rx_axis_tdata = Signal(intbv(0)[AXIS_DATA_WIDTH:])
rx_axis_tkeep = Signal(intbv(1)[AXIS_KEEP_WIDTH:])
rx_axis_tvalid = Signal(bool(0))
rx_axis_tlast = Signal(bool(0))
rx_axis_tuser = Signal(bool(0))
gmii_tx_clk = Signal(bool(0))
gmii_txd = Signal(intbv(0)[8:])
gmii_tx_en = Signal(bool(0))
gmii_tx_er = Signal(bool(0))
tx_error_underflow = Signal(bool(0))
tx_fifo_overflow = Signal(bool(0))
tx_fifo_bad_frame = Signal(bool(0))
tx_fifo_good_frame = Signal(bool(0))
rx_error_bad_frame = Signal(bool(0))
rx_error_bad_fcs = Signal(bool(0))
rx_fifo_overflow = Signal(bool(0))
rx_fifo_bad_frame = Signal(bool(0))
rx_fifo_good_frame = Signal(bool(0))
speed = Signal(intbv(0)[1:0])
# sources and sinks
axis_source_pause = Signal(bool(0))
axis_sink_pause = Signal(bool(0))
mii_select = Signal(bool(0))
gmii_source = gmii_ep.GMIISource()
gmii_source_logic = gmii_source.create_logic(
gmii_rx_clk,
rst,
txd=gmii_rxd,
tx_en=gmii_rx_dv,
tx_er=gmii_rx_er,
mii_select=mii_select,
name='gmii_source'
)
gmii_sink = gmii_ep.GMIISink()
gmii_sink_logic = gmii_sink.create_logic(
gmii_tx_clk,
rst,
rxd=gmii_txd,
rx_dv=gmii_tx_en,
rx_er=gmii_tx_er,
mii_select=mii_select,
name='gmii_sink'
)
axis_source = axis_ep.AXIStreamSource()
axis_source_logic = axis_source.create_logic(
logic_clk,
logic_rst,
tdata=tx_axis_tdata,
tkeep=tx_axis_tkeep,
tvalid=tx_axis_tvalid,
tready=tx_axis_tready,
tlast=tx_axis_tlast,
tuser=tx_axis_tuser,
pause=axis_source_pause,
name='axis_source'
)
axis_sink = axis_ep.AXIStreamSink()
axis_sink_logic = axis_sink.create_logic(
logic_clk,
logic_rst,
tdata=rx_axis_tdata,
tkeep=rx_axis_tkeep,
tvalid=rx_axis_tvalid,
tready=rx_axis_tready,
tlast=rx_axis_tlast,
tuser=rx_axis_tuser,
pause=axis_sink_pause,
name='axis_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
gtx_clk=gtx_clk,
gtx_rst=gtx_rst,
logic_clk=logic_clk,
logic_rst=logic_rst,
tx_axis_tdata=tx_axis_tdata,
tx_axis_tkeep=tx_axis_tkeep,
tx_axis_tvalid=tx_axis_tvalid,
tx_axis_tready=tx_axis_tready,
tx_axis_tlast=tx_axis_tlast,
tx_axis_tuser=tx_axis_tuser,
rx_axis_tdata=rx_axis_tdata,
rx_axis_tkeep=rx_axis_tkeep,
rx_axis_tvalid=rx_axis_tvalid,
rx_axis_tready=rx_axis_tready,
rx_axis_tlast=rx_axis_tlast,
rx_axis_tuser=rx_axis_tuser,
gmii_rx_clk=gmii_rx_clk,
gmii_rxd=gmii_rxd,
gmii_rx_dv=gmii_rx_dv,
gmii_rx_er=gmii_rx_er,
gmii_tx_clk=gmii_tx_clk,
mii_tx_clk=mii_tx_clk,
gmii_txd=gmii_txd,
gmii_tx_en=gmii_tx_en,
gmii_tx_er=gmii_tx_er,
tx_error_underflow=tx_error_underflow,
tx_fifo_overflow=tx_fifo_overflow,
tx_fifo_bad_frame=tx_fifo_bad_frame,
tx_fifo_good_frame=tx_fifo_good_frame,
rx_error_bad_frame=rx_error_bad_frame,
rx_error_bad_fcs=rx_error_bad_fcs,
rx_fifo_overflow=rx_fifo_overflow,
rx_fifo_bad_frame=rx_fifo_bad_frame,
rx_fifo_good_frame=rx_fifo_good_frame,
speed=speed,
ifg_delay=ifg_delay
)
@always(delay(4))
def clkgen():
clk.next = not clk
gtx_clk.next = not clk
logic_clk.next = not clk
rx_clk_hp = Signal(int(4))
@instance
def rx_clk_gen():
while True:
yield delay(int(rx_clk_hp))
gmii_rx_clk.next = not gmii_rx_clk
mii_tx_clk.next = not gmii_rx_clk
rx_error_bad_frame_asserted = Signal(bool(0))
rx_error_bad_fcs_asserted = Signal(bool(0))
@always(clk.posedge)
def monitor():
if (rx_error_bad_frame):
rx_error_bad_frame_asserted.next = 1
if (rx_error_bad_fcs):
rx_error_bad_fcs_asserted.next = 1
clk_enable_rate = Signal(int(0))
clk_enable_div = Signal(int(0))
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
gtx_rst.next = 1
logic_rst.next = 1
yield clk.posedge
rst.next = 0
gtx_rst.next = 0
logic_rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
ifg_delay.next = 12
# testbench stimulus
for rate, mii in [(4, 0), (20, 1), (200, 1)]:
rx_clk_hp.next = rate
mii_select.next = mii
yield delay(1000)
yield clk.posedge
print("test 1: test rx packet")
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(32))
test_frame.update_fcs()
axis_frame = test_frame.build_axis_fcs()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+bytearray(axis_frame))
yield axis_sink.wait()
rx_frame = axis_sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: test tx packet")
current_test.next = 2
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(32))
test_frame.update_fcs()
axis_frame = test_frame.build_axis()
axis_source.send(axis_frame)
yield gmii_sink.wait()
rx_frame = gmii_sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == 46
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame.eth_src_mac
assert eth_frame.eth_type == test_frame.eth_type
assert eth_frame.payload.data.index(test_frame.payload.data) == 0
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
###
# Copyright (c) 2004-2005, Jeremiah Fincher
# Copyright (c) 2009-2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import os
import time
import json
import codecs
import string
import textwrap
from . import utils, i18n
from .utils import minisix
_ = i18n.PluginInternationalization()
def error(s):
"""Replace me with something better from another module!"""
print('***', s)
def exception(s):
"""Ditto!"""
print('***', s, 'A bad exception.')
class RegistryException(Exception):
pass
class InvalidRegistryFile(RegistryException):
pass
class InvalidRegistryName(RegistryException):
pass
class InvalidRegistryValue(RegistryException):
pass
class NonExistentRegistryEntry(RegistryException, AttributeError):
# If we use hasattr() on a configuration group/value, Python 3 calls
# __getattr__ and looks for an AttributeError, so __getattr__ has to
# raise an AttributeError if a registry entry does not exist.
pass
ENCODING = 'string_escape' if minisix.PY2 else 'unicode_escape'
decoder = codecs.getdecoder(ENCODING)
encoder = codecs.getencoder(ENCODING)
_cache = utils.InsensitivePreservingDict()
_lastModified = 0
def open_registry(filename, clear=False):
"""Initializes the module by loading the registry file into memory."""
global _lastModified
if clear:
_cache.clear()
_fd = open(filename)
fd = utils.file.nonCommentNonEmptyLines(_fd)
acc = ''
slashEnd = re.compile(r'\\*$')
for line in fd:
line = line.rstrip('\r\n')
# XXX There should be some way to determine whether or not we're
# starting a new variable or not. As it is, if there's a backslash
# at the end of every line in a variable, it won't be read, and
# worse, the error will pass silently.
#
# If the line ends in an odd number of backslashes, then there is a
# line-continutation.
m = slashEnd.search(line)
if m and len(m.group(0)) % 2:
acc += line[:-1]
continue
else:
acc += line
try:
(key, value) = re.split(r'(?<!\\):', acc, 1)
key = key.strip()
value = value.strip()
value = decoder(value)[0]
acc = ''
except ValueError:
raise InvalidRegistryFile('Error unpacking line %r' % acc)
_cache[key] = value
_lastModified = time.time()
_fd.close()
CONF_FILE_HEADER = """
######
# Althrough it is technically possible to do so, we do not recommend that
# you edit this file with a text editor.
# Whenever possible, do it on IRC using the Config plugin, which
# checks values you set are valid before writing them to the
# configuration.
# Moreover, if you edit this file while the bot is running, your
# changes may be lost.
######
"""
def close(registry, filename, private=True):
first = True
fd = utils.file.AtomicFile(filename)
fd.write(CONF_FILE_HEADER)
for (name, value) in registry.getValues(getChildren=True):
help = value.help()
if help:
lines = textwrap.wrap(value._help)
for (i, line) in enumerate(lines):
lines[i] = '# %s\n' % line
lines.insert(0, '###\n')
if first:
first = False
else:
lines.insert(0, '\n')
if hasattr(value, 'value'):
if value._showDefault:
lines.append('#\n')
try:
x = value.__class__(value._default, value._help)
except Exception as e:
exception('Exception instantiating default for %s:' %
value._name)
try:
lines.append('# Default value: %s\n' % x)
except Exception:
exception('Exception printing default value of %s:' %
value._name)
lines.append('###\n')
fd.writelines(lines)
if hasattr(value, 'value'): # This lets us print help for non-values.
try:
if private or not value._private:
s = value.serialize()
else:
s = 'CENSORED'
fd.write('%s: %s\n' % (name, s))
except Exception:
exception('Exception printing value:')
fd.close()
def isValidRegistryName(name):
# Now we can have . and : in names. I'm still gonna call shenanigans on
# anyone who tries to have spaces (though technically I can't see any
# reason why it wouldn't work). We also reserve all names starting with
# underscores for internal use.
return len(name.split()) == 1 and not name.startswith('_')
def escape(name):
name = encoder(name)[0].decode()
name = name.replace(':', '\\:')
name = name.replace('.', '\\.')
return name
def unescape(name):
name = name.replace('\\.', '.')
name = name.replace('\\:', ':')
name = decoder(name.encode())[0]
return name
_splitRe = re.compile(r'(?<!\\)\.')
def split(name):
return list(map(unescape, _splitRe.split(name)))
def join(names):
return '.'.join(map(escape, names))
class Group(object):
"""A group; it doesn't hold a value unless handled by a subclass."""
def __init__(self, help='', supplyDefault=False,
orderAlphabetically=False, private=False):
self._help = utils.str.normalizeWhitespace(help)
self._name = 'unset'
self._added = []
self._children = utils.InsensitivePreservingDict()
self._lastModified = 0
self._private = private
self._supplyDefault = supplyDefault
self._orderAlphabetically = orderAlphabetically
OriginalClass = self.__class__
class X(OriginalClass):
"""This class exists to differentiate those values that have
been changed from their default from those that haven't."""
def set(self, *args):
self.__class__ = OriginalClass
self.set(*args)
def setValue(self, *args):
self.__class__ = OriginalClass
self.setValue(*args)
self.X = X
def __call__(self):
raise ValueError('Groups have no value.')
def __nonExistentEntry(self, attr):
s = '%r is not a valid entry in %r' % (attr, self._name)
raise NonExistentRegistryEntry(s)
def __makeChild(self, attr, s):
v = self.__class__(self._default, self._help)
v.set(s)
v.__class__ = self.X
v._supplyDefault = False
v._help = '' # Clear this so it doesn't print a bazillion times.
self.register(attr, v)
return v
def __hasattr__(self, attr):
return attr in self._children
def __getattr__(self, attr):
if attr in self._children:
return self._children[attr]
elif self._supplyDefault:
return self.__makeChild(attr, str(self))
else:
self.__nonExistentEntry(attr)
def help(self):
return i18n.PluginInternationalization().__call__(self._help)
def get(self, attr):
# Not getattr(self, attr) because some nodes might have groups that
# are named the same as their methods.
return self.__getattr__(attr)
def setName(self, name):
#print '***', name
self._name = name
if name in _cache and self._lastModified < _lastModified:
#print '***>', _cache[name]
self.set(_cache[name])
if self._supplyDefault:
for (k, v) in _cache.items():
if k.startswith(self._name):
rest = k[len(self._name)+1:] # +1 is for .
parts = split(rest)
if len(parts) == 1 and parts[0] == name:
try:
self.__makeChild(name, v)
except InvalidRegistryValue:
# It's probably supposed to be registered later.
pass
def register(self, name, node=None):
if not isValidRegistryName(name):
raise InvalidRegistryName(name)
if node is None:
node = Group(private=self._private)
else:
node._private = node._private or self._private
# We tried in any number of horrible ways to make it so that
# re-registering something would work. It doesn't, plain and simple.
# For the longest time, we had an "Is this right?" comment here, but
# from experience, we now know that it most definitely *is* right.
if name not in self._children:
self._children[name] = node
self._added.append(name)
names = split(self._name)
names.append(name)
fullname = join(names)
node.setName(fullname)
else:
# We do this in order to reload the help, if it changed.
if node._help != '' and node._help != self._children[name]._help:
self._children[name]._help = node._help
# We do this so the return value from here is at least useful;
# otherwise, we're just returning a useless, unattached node
# that's simply a waste of space.
node = self._children[name]
return node
def unregister(self, name):
try:
node = self._children[name]
del self._children[name]
# We do this because we need to remove case-insensitively.
name = name.lower()
for elt in reversed(self._added):
if elt.lower() == name:
self._added.remove(elt)
if node._name in _cache:
del _cache[node._name]
return node
except KeyError:
self.__nonExistentEntry(name)
def rename(self, old, new):
node = self.unregister(old)
self.register(new, node)
def getValues(self, getChildren=False, fullNames=True):
L = []
if self._orderAlphabetically:
self._added.sort()
for name in self._added:
node = self._children[name]
if hasattr(node, 'value') or hasattr(node, 'help'):
if node.__class__ is not self.X:
L.append((node._name, node))
if getChildren:
L.extend(node.getValues(getChildren, fullNames))
if not fullNames:
L = [(split(s)[-1], node) for (s, node) in L]
return L
class _NoValueGiven:
# Special value for Value.error()
pass
class Value(Group):
"""Invalid registry value. If you're getting this message, report it,
because we forgot to put a proper help string here."""
def __init__(self, default, help, setDefault=True,
showDefault=True, **kwargs):
self.__parent = super(Value, self)
self.__parent.__init__(help, **kwargs)
self._default = default
self._showDefault = showDefault
self._help = utils.str.normalizeWhitespace(help.strip())
self._callbacks = []
if setDefault:
self.setValue(default)
def error(self, value=_NoValueGiven):
if hasattr(self, 'errormsg') and value is not _NoValueGiven:
try:
s = self.errormsg % value
except TypeError:
s = self.errormsg
elif self.__doc__:
s = self.__doc__
else:
s = """%s has no docstring. If you're getting this message,
report it, because we forgot to put a proper help string here."""%\
self._name
e = InvalidRegistryValue(utils.str.normalizeWhitespace(s))
e.value = self
raise e
def setName(self, *args):
if self._name == 'unset':
self._lastModified = 0
self.__parent.setName(*args)
self._lastModified = time.time()
def set(self, s):
"""Override this with a function to convert a string to whatever type
you want, and call self.setValue to set the value."""
self.setValue(s)
def setValue(self, v):
"""Check conditions on the actual value type here. I.e., if you're a
IntegerLessThanOneHundred (all your values must be integers less than
100) convert to an integer in set() and check that the integer is less
than 100 in this method. You *must* call this parent method in your
own setValue."""
self._lastModified = time.time()
self.value = v
if self._supplyDefault:
for (name, v) in list(self._children.items()):
if v.__class__ is self.X:
self.unregister(name)
# We call the callback once everything is clean
for callback, args, kwargs in self._callbacks:
callback(*args, **kwargs)
def context(self, value):
"""Return a context manager object, which sets this variable to a
temporary value, and set the previous value back when exiting the
context."""
class Context:
def __enter__(self2):
self2._old_value = self.value
self.setValue(value)
def __exit__(self2, exc_type, exc_value, traceback):
self.setValue(self2._old_value)
return Context()
def addCallback(self, callback, *args, **kwargs):
"""Add a callback to the list. A callback is a function that will be
called when the value is changed. You can give this function as many
extra arguments as you wish, they will be passed to the callback."""
self._callbacks.append((callback, args, kwargs))
def removeCallback(self, callback):
"""Remove all occurences of this callbacks from the callback list."""
self._callbacks = [x for x in self._callbacks if x[0] is not callback]
def __str__(self):
return repr(self())
def serialize(self):
return encoder(str(self))[0].decode()
# We tried many, *many* different syntactic methods here, and this one was
# simply the best -- not very intrusive, easily overridden by subclasses,
# etc.
def __call__(self):
if _lastModified > self._lastModified:
if self._name in _cache:
self.set(_cache[self._name])
return self.value
class Boolean(Value):
"""Value must be either True or False (or On or Off)."""
errormsg = _('Value must be either True or False (or On or Off), not %r.')
def set(self, s):
try:
v = utils.str.toBool(s)
except ValueError:
if s.strip().lower() == 'toggle':
v = not self.value
else:
self.error(s)
self.setValue(v)
def setValue(self, v):
super(Boolean, self).setValue(bool(v))
class Integer(Value):
"""Value must be an integer."""
errormsg = _('Value must be an integer, not %r.')
def set(self, s):
try:
self.setValue(int(s))
except ValueError:
self.error(s)
class NonNegativeInteger(Integer):
"""Value must be a non-negative integer."""
errormsg = _('Value must be a non-negative integer, not %r.')
def setValue(self, v):
if v < 0:
self.error(v)
super(NonNegativeInteger, self).setValue(v)
class PositiveInteger(NonNegativeInteger):
"""Value must be positive (non-zero) integer."""
errormsg = _('Value must be positive (non-zero) integer, not %r.')
def setValue(self, v):
if not v:
self.error(v)
super(PositiveInteger, self).setValue(v)
class Float(Value):
"""Value must be a floating-point number."""
errormsg = _('Value must be a floating-point number, not %r.')
def set(self, s):
try:
self.setValue(float(s))
except ValueError:
self.error(s)
def setValue(self, v):
try:
super(Float, self).setValue(float(v))
except ValueError:
self.error(v)
class PositiveFloat(Float):
"""Value must be a floating-point number greater than zero."""
errormsg = _('Value must be a floating-point number greater than zero, '
'not %r.')
def setValue(self, v):
if v <= 0:
self.error(v)
else:
super(PositiveFloat, self).setValue(v)
class Probability(Float):
"""Value must be a floating point number in the range [0, 1]."""
errormsg = _('Value must be a floating point number in the range [0, 1], '
'not %r.')
def __init__(self, *args, **kwargs):
self.__parent = super(Probability, self)
self.__parent.__init__(*args, **kwargs)
def setValue(self, v):
if 0 <= v <= 1:
self.__parent.setValue(v)
else:
self.error(v)
class String(Value):
"""Value is not a valid Python string."""
errormsg = _('Value is not a valid Python string, not %r.')
def set(self, s):
v = s
if not v:
v = '""'
elif v[0] != v[-1] or v[0] not in '\'"':
v = repr(v)
try:
v = utils.safeEval(v)
if not isinstance(v, minisix.string_types):
raise ValueError
self.setValue(v)
except ValueError: # This catches utils.safeEval(s) errors too.
self.error(s)
_printable = string.printable[:-4]
def _needsQuoting(self, s):
return any([x not in self._printable for x in s]) and s.strip() != s
def __str__(self):
s = self.value
if self._needsQuoting(s):
s = repr(s)
return s
class OnlySomeStrings(String):
validStrings = ()
def __init__(self, *args, **kwargs):
assert self.validStrings, 'There must be some valid strings. ' \
'This is a bug.'
self.__parent = super(OnlySomeStrings, self)
self.__parent.__init__(*args, **kwargs)
self.__doc__ = format(_('Valid values include %L.'),
list(map(repr, self.validStrings)))
self.errormsg = format(_('Valid values include %L, not %%r.'),
list(map(repr, self.validStrings)))
def help(self):
strings = [s for s in self.validStrings if s]
return format('%s Valid strings: %L.', self._help, strings)
def normalize(self, s):
lowered = s.lower()
L = list(map(str.lower, self.validStrings))
try:
i = L.index(lowered)
except ValueError:
return s # This is handled in setValue.
return self.validStrings[i]
def setValue(self, s):
v = self.normalize(s)
if s in self.validStrings:
self.__parent.setValue(v)
else:
self.error(v)
class NormalizedString(String):
def __init__(self, default, *args, **kwargs):
default = self.normalize(default)
self.__parent = super(NormalizedString, self)
self.__parent.__init__(default, *args, **kwargs)
self._showDefault = False
def normalize(self, s):
return utils.str.normalizeWhitespace(s.strip())
def set(self, s):
s = self.normalize(s)
self.__parent.set(s)
def setValue(self, s):
s = self.normalize(s)
self.__parent.setValue(s)
def serialize(self):
s = self.__parent.serialize()
prefixLen = len(self._name) + 2
lines = textwrap.wrap(s, width=76-prefixLen)
last = len(lines)-1
for (i, line) in enumerate(lines):
if i != 0:
line = ' '*prefixLen + line
if i != last:
line += '\\'
lines[i] = line
ret = os.linesep.join(lines)
return ret
class StringSurroundedBySpaces(String):
def setValue(self, v):
if v and v.lstrip() == v:
v= ' ' + v
if v.rstrip() == v:
v += ' '
super(StringSurroundedBySpaces, self).setValue(v)
class StringWithSpaceOnRight(String):
def setValue(self, v):
if v and v.rstrip() == v:
v += ' '
super(StringWithSpaceOnRight, self).setValue(v)
class Regexp(Value):
"""Value must be a valid regular expression."""
errormsg = _('Value must be a valid regular expression, not %r.')
def __init__(self, *args, **kwargs):
kwargs['setDefault'] = False
self.sr = ''
self.value = None
self.__parent = super(Regexp, self)
self.__parent.__init__(*args, **kwargs)
def error(self, e):
s = 'Value must be a regexp of the form m/.../ or /.../. %s' % e
e = InvalidRegistryValue(s)
e.value = self
raise e
def set(self, s):
try:
if s:
self.setValue(utils.str.perlReToPythonRe(s), sr=s)
else:
self.setValue(None)
except ValueError as e:
self.error(e)
def setValue(self, v, sr=None):
if v is None:
self.sr = ''
self.__parent.setValue(None)
elif sr is not None:
self.sr = sr
self.__parent.setValue(v)
else:
raise InvalidRegistryValue('Can\'t setValue a regexp, there would be an inconsistency '\
'between the regexp and the recorded string value.')
def __str__(self):
self() # Gotta update if we've been reloaded.
return self.sr
class SeparatedListOf(Value):
List = list
Value = Value
sorted = False
def splitter(self, s):
"""Override this with a function that takes a string and returns a list
of strings."""
raise NotImplementedError
def joiner(self, L):
"""Override this to join the internal list for output."""
raise NotImplementedError
def set(self, s):
L = self.splitter(s)
for (i, s) in enumerate(L):
v = self.Value(s, '')
L[i] = v()
self.setValue(L)
def setValue(self, v):
super(SeparatedListOf, self).setValue(self.List(v))
def __str__(self):
values = self()
if self.sorted:
values = sorted(values)
if values:
return self.joiner(values)
else:
# We must return *something* here, otherwise down along the road we
# can run into issues showing users the value if they've disabled
# nick prefixes in any of the numerous ways possible. Since the
# config parser doesn't care about this space, we'll use it :)
return ' '
class SpaceSeparatedListOf(SeparatedListOf):
def splitter(self, s):
return s.split()
joiner = ' '.join
class SpaceSeparatedListOfStrings(SpaceSeparatedListOf):
Value = String
class SpaceSeparatedSetOfStrings(SpaceSeparatedListOfStrings):
List = set
class CommaSeparatedListOfStrings(SeparatedListOf):
Value = String
def splitter(self, s):
return re.split(r'\s*,\s*', s)
joiner = ', '.join
class CommaSeparatedSetOfStrings(SeparatedListOf):
List = set
Value = String
def splitter(self, s):
return re.split(r'\s*,\s*', s)
joiner = ', '.join
class TemplatedString(String):
requiredTemplates = []
def __init__(self, *args, **kwargs):
assert self.requiredTemplates, \
'There must be some templates. This is a bug.'
self.__parent = super(String, self)
self.__parent.__init__(*args, **kwargs)
def setValue(self, v):
def hasTemplate(s):
return re.search(r'\$%s\b|\${%s}' % (s, s), v) is not None
if utils.iter.all(hasTemplate, self.requiredTemplates):
self.__parent.setValue(v)
else:
self.error(v)
class Json(String):
# Json-serializable data
def set(self, v):
self.setValue(json.loads(v))
def setValue(self, v):
super(Json, self).setValue(json.dumps(v))
def __call__(self):
return json.loads(super(Json, self).__call__())
class _Context:
def __init__(self, var):
self._var = var
def __enter__(self):
self._dict = self._var()
return self._dict
def __exit__(self, *args):
self._var.setValue(self._dict)
def editable(self):
"""Return an editable dict usable within a 'with' statement and
committed to the configuration variable at the end."""
return self._Context(self)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
"""
Copyright (C) 2005 - 2011 Eric Van Dewoestine
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author: Anton Sharonov
@author: Eric Van Dewoestine
"""
import socket
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
class Nailgun(object):
"""
Client used to communicate with a nailgun server.
"""
def __init__(self, **kwargs):
self.socket = None
self.port = kwargs.get('port')
self.keepAlive = int(kwargs.get('keepAlive', 0))
self.reconnectCounter = 0
def send(self, cmdline):
"""
Sends a complete command to the nailgun server. Handles connecting to the
server if not currently connected.
@param cmdline command, which is sent to server, for instance
"-command ping".
@return tuple consisting of:
- retcode from server (0 for success, non-0 for failure)
- string response from server
"""
if not self.isConnected():
# with keepAlive do only first reconnect
if not self.keepAlive or self.reconnectCounter == 0:
(retcode, result) = self.reconnect()
if retcode:
return (retcode, result)
if not self.isConnected(): # Only for keepAlive
return (-1, "connect: ERROR - socket is not connected (nailgun.py)")
try: # outer try for pre python 2.5 support.
try:
for arg in self.parseArgs(cmdline):
self.sendChunk("A", arg)
if self.keepAlive:
self.sendChunk("K")
self.sendChunk("C", "org.eclim.command.Main")
(retcode, result) = self.processResponse()
if self.keepAlive and retcode:
# force reconnect on error (may not be necessary)
self.reconnect()
return (retcode, result)
except socket.error, ex:
args = ex.args
if len(args) > 1:
retcode, msg = args[0], args[1]
elif len(args):
retcode, msg = 1, args[0]
else:
retcode, msg = 1, 'No message'
return (retcode, 'send: %s' % msg)
finally:
if not self.keepAlive:
try:
self.close()
except:
# don't let an error on close mask any previous error.
pass
def connect(self, port=None):
"""
Establishes the connection to specified port or if not supplied,
uses the default.
"""
port = port or self.port
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', port))
except socket.error, ex:
args = ex.args
if len(args) > 1:
retcode, msg = args[0], args[1]
elif len(args):
retcode, msg = 1, args[0]
else:
retcode, msg = 1, 'No message'
return (retcode, 'connect: %s' % msg)
self.socket = sock
return (0, '')
def reconnect(self):
if self.socket != None:
self.close()
self.reconnectCounter += 1
return self.connect()
def close(self):
self.socket.close()
self.socket = None
def isConnected(self):
return self.socket != None
def parseArgs(self, cmdline):
# FIXME: doesn't handle escaping of spaces/quotes yet (may never need to)
args = []
arg = ''
quote = ''
for char in cmdline:
if char == ' ' and not quote:
if arg:
args.append(arg)
arg = ''
elif char == '"' or char == "'":
if quote and char == quote:
quote = ''
elif not quote:
quote = char
else:
arg += char
else:
arg += char
if arg:
args.append(arg)
return args
def sendChunk(self, chunkType, text=''):
"""
Sends a nailgun 'chunk' to the server.
"""
#print("sendChunk " + chunkType + " " + text)
length = len(text)
str = "%c%c%c%c%c" % (
(length / (65536*256)) % 256,
(length / 65536) % 256,
(length / 256) % 256,
length % 256,
chunkType)
nbytes = self.socket.sendall(str)
nbytes = self.socket.sendall(text)
def processResponse(self):
result = StringIO()
exit = 0
exitFlag = 1 # expecting 1 times exit chunk
while exitFlag > 0:
answer = self.recvBlocked(5)
if len(answer) < 5:
print("error: socket closed unexpectedly\n")
return None
lenPayload = ord(answer[0]) * 65536 * 256 \
+ ord(answer[1]) * 65536 \
+ ord(answer[2]) * 256 \
+ ord(answer[3])
#print("lenPayload detected : %d" % lenPayload)
chunkType = answer[4]
if chunkType == "1":
# STDOUT
result.write(self.recvToFD(1, answer, lenPayload))
elif chunkType == "2":
# STDERR
result.write(self.recvToFD(2, answer, lenPayload))
elif chunkType == "X":
exitFlag = exitFlag - 1
exit = int(self.recvToFD(2, answer, lenPayload))
else:
print("error: unknown chunk type = %d\n" % chunkType)
exitFlag = 0
return [exit, result.getvalue()]
def recvBlocked(self, lenPayload):
"""
Receives until all data is read - necessary because usual recv sometimes
returns with number of bytes read less then asked.
"""
received = ""
while (len(received) < lenPayload):
received = received + self.socket.recv(lenPayload - len(received))
return received
def recvToFD(self, destFD, buf, lenPayload):
"""
This function just mimics the function with the same name from the C
client. We don't really care which file descriptor the server tells us to
write to - STDOUT and STDERR are the same on VIM side (see eclim.bat,
"2>&1" at the end of command).
"""
received = self.recvBlocked(lenPayload)
return received
|
|
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import packet
from engineio import payload
from engineio import socket
class TestSocket(unittest.TestCase):
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.1
mock_server.ping_interval = 0.1
try:
import queue
except ImportError:
import Queue as queue
import threading
mock_server.async = {'threading': threading,
'thread_class': 'Thread',
'queue': queue,
'queue_class': 'Queue',
'websocket': None}
return mock_server
def test_create(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertEqual(s.server, mock_server)
self.assertEqual(s.sid, 'sid')
self.assertFalse(s.upgraded)
self.assertFalse(s.closed)
self.assertTrue(hasattr(s.queue, 'get'))
self.assertTrue(hasattr(s.queue, 'put'))
self.assertTrue(hasattr(s.queue, 'task_done'))
self.assertTrue(hasattr(s.queue, 'join'))
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(IOError, s.poll)
def test_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
self.assertEqual(s.poll(), [pkt1, pkt2])
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.PING, data='abc'))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'3abc')
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(ValueError, s.receive, packet.Packet(packet.OPEN))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = -0.1
s = socket.Socket(mock_server, 'sid')
s.last_ping = time.time() - 1
s.close = mock.MagicMock()
s.send('packet')
s.close.assert_called_once_with(wait=False, abort=True)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
self.assertEqual(packets, [pkt1, pkt2])
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
self.assertRaises(IOError, s.handle_get_request, environ,
start_response)
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
s.handle_post_request(environ)
self.assertEqual(s.receive.call_count, 2)
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
self.assertRaises(ValueError, s.handle_post_request, environ)
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
s._upgrade_websocket = mock.MagicMock()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket'}
start_response = mock.MagicMock()
s.handle_get_request(environ, start_response)
s._upgrade_websocket.assert_called_once_with(environ, start_response)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server.async['websocket'] = mock.MagicMock()
mock_server.async['websocket_class'] = 'WebSocket'
mock_ws = mock.MagicMock()
mock_server.async['websocket'].WebSocket.configure_mock(
return_value=mock_ws)
s = socket.Socket(mock_server, 'sid')
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server.async['websocket'].WebSocket.assert_called_once_with(
s._websocket_handler)
mock_ws.assert_called_once_with(environ, start_response)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server.async['websocket'] = mock.MagicMock()
s = socket.Socket(mock_server, 'sid')
s.upgraded = True
environ = "foo"
start_response = "bar"
self.assertRaises(IOError, s._upgrade_websocket,
environ, start_response)
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.UPGRADE))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'6')
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
ws = mock.MagicMock()
ws.wait.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False)
s._websocket_handler(ws)
self.assertFalse(s.upgraded)
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
ws = mock.MagicMock()
probe = six.text_type('probe')
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False)]
s._websocket_handler(ws)
ws.send.assert_called_once_with(packet.Packet(
packet.PONG, data=probe).encode(always_bytes=False))
self.assertEqual(s.queue.get().packet_type, packet.NOOP)
self.assertFalse(s.upgraded)
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', 'foo'),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_websocket_read_write_fail(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
RuntimeError]
ws.send.side_effect = [None, RuntimeError]
s._websocket_handler(ws)
time.sleep(0)
self.assertEqual(s.closed, True)
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', foo),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
self.assertRaises(IOError, s.send, packet.Packet(packet.NOOP))
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=True)
s.queue.join.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=False)
self.assertEqual(s.queue.join.call_count, 0)
|
|
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, text_type, zip
from pandas import compat
from functools import partial
import itertools
import re
import numpy as np
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion, is_sparse)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.missing import notna
import pandas.core.dtypes.concat as _concat
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
from pandas.core.categorical import Categorical, _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
from pandas.core.frame import _shared_docs
from pandas.util._decorators import Appender
from pandas.core.index import Index, MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None):
self.is_categorical = None
self.is_sparse = is_sparse(values)
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
elif self.is_sparse:
# XXX: Makes SparseArray *dense*, but it's supposedly
# a single column at a time, so it's "doable"
values = values.values
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {level}. The index "
"names are not unique.".format(level=level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = algos.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1])]
klass = SparseDataFrame if self.is_sparse else DataFrame
return klass(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
if rlocs == []:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name='__placeholder__')
else:
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index, self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sort_index(level=0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
blocks = obj._data.unstack(unstacker)
klass = type(obj)
return klass(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value)
return unstacker.get_result()
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {level}. The column "
"names are not unique.".format(level=level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
klass = type(frame)._constructor_sliced
return klass(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.loc[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
@Appender(_shared_docs['melt'] %
dict(caller='pd.melt(df, ',
versionadded="",
other='DataFrame.melt'))
def melt(frame, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
# TODO: what about the existing index?
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif (isinstance(frame.columns, MultiIndex) and
not isinstance(id_vars, list)):
raise ValueError('id_vars must be a list of tuples when columns'
' are a MultiIndex')
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif (isinstance(frame.columns, MultiIndex) and
not isinstance(value_vars, list)):
raise ValueError('value_vars must be a list of tuples when'
' columns are a MultiIndex')
else:
value_vars = list(value_vars)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_{i}'.format(i=i)
for i in range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns
._get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2007], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team year hr
0 Red Sox 2007 514
1 Yankees 2007 573
2 Red Sox 2008 545
3 Yankees 2008 526
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
mdata[target] = _concat._concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notna(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the subobservation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hypen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
Aone, Btwo,.., and you have an unrelated column Arating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j)
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multuple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.reset_index().set_index(['famid', 'birth', 'age']).unstack()
>>> w.columns = pd.Index(w.columns).str.join('')
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, stub, sep, suffix):
regex = "^{stub}{sep}{suffix}".format(
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix)
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j, value_vars, sep):
newdf = melt(df, id_vars=i, value_vars=value_vars,
value_name=stub.rstrip(sep), var_name=j)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
return newdf.set_index(i + [j])
if any(map(lambda s: s in df.columns.tolist(), stubnames)):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = list(map(lambda stub:
get_var_names(df, stub, sep, suffix), stubnames))
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
melted = []
for s, v in zip(stubnames, value_vars):
melted.append(melt_stub(df, s, i, j, v, sep))
melted = melted[0].join(melted[1:], how='outer')
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
See Also
--------
Series.str.get_dummies
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
if not len(item) == len(columns_to_encode):
len_msg = len_msg.format(name=name, len_item=len(item),
len_enc=len(columns_to_encode))
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse, drop_first=drop_first)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index, default_fill_value=0)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_strs = [u'{prefix}{sep}{level}' if isinstance(v, text_type)
else '{prefix}{sep}{level}' for v in levels]
dummy_cols = [dummy_str.format(prefix=prefix, sep=prefix_sep, level=v)
for dummy_str, v in zip(dummy_strs, levels)]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=np.uint8),
sparse_index=IntIndex(N, ixs), fill_value=0,
dtype=np.uint8)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
default_fill_value=0,
dtype=np.uint8)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=np.uint8).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
labels, items = _factorize_from_iterable(mapped_items.take(labels))
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=arguments-differ
""" losses for training neural networks """
from __future__ import absolute_import
__all__ = ['Loss', 'L2Loss', 'L1Loss',
'SigmoidBinaryCrossEntropyLoss', 'SigmoidBCELoss',
'SoftmaxCrossEntropyLoss', 'SoftmaxCELoss',
'KLDivLoss', 'CTCLoss', 'HuberLoss', 'HingeLoss',
'SquaredHingeLoss', 'LogisticLoss', 'TripletLoss', 'PoissonNLLLoss', 'CosineEmbeddingLoss']
import numpy as np
from .. import ndarray
from ..base import numeric_types
from .block import HybridBlock
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)
class Loss(HybridBlock):
"""Base class for loss.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight, batch_axis, **kwargs):
super(Loss, self).__init__(**kwargs)
self._weight = weight
self._batch_axis = batch_axis
def __repr__(self):
s = '{name}(batch_axis={_batch_axis}, w={_weight})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def hybrid_forward(self, F, x, *args, **kwargs):
"""Overrides to construct symbolic graph for this `Block`.
Parameters
----------
x : Symbol or NDArray
The first input tensor.
*args : list of Symbol or list of NDArray
Additional input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class L2Loss(Loss):
r"""Calculates the mean squared error between `label` and `pred`.
.. math:: L = \frac{1}{2} \sum_i \vert {label}_i - {pred}_i \vert^2.
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=1., batch_axis=0, **kwargs):
super(L2Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.square(label - pred)
loss = _apply_weighting(F, loss, self._weight / 2, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class L1Loss(Loss):
r"""Calculates the mean absolute error between `label` and `pred`.
.. math:: L = \sum_i \vert {label}_i - {pred}_i \vert.
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=None, batch_axis=0, **kwargs):
super(L1Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.abs(label - pred)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class SigmoidBinaryCrossEntropyLoss(Loss):
r"""The cross-entropy loss for binary classification. (alias: SigmoidBCELoss)
BCE loss is useful when training logistic regression. If `from_sigmoid`
is False (default), this loss computes:
.. math::
prob = \frac{1}{1 + \exp(-{pred})}
L = - \sum_i {label}_i * \log({prob}_i) * pos\_weight +
(1 - {label}_i) * \log(1 - {prob}_i)
If `from_sigmoid` is True, this loss computes:
.. math::
L = - \sum_i {label}_i * \log({pred}_i) * pos\_weight +
(1 - {label}_i) * \log(1 - {pred}_i)
A tensor `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
`pred` and `label` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
from_sigmoid : bool, default is `False`
Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and BCE together, which is more numerically
stable through log-sum-exp trick.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with values in range `[0, 1]`. Must have the
same size as `pred`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
- **pos_weight**: a weighting tensor of positive examples. Must be a vector with length
equal to the number of classes.For example, if pred has shape (64, 10),
pos_weight should have shape (1, 10).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
super(SigmoidBinaryCrossEntropyLoss, self).__init__(
weight, batch_axis, **kwargs)
self._from_sigmoid = from_sigmoid
def hybrid_forward(self, F, pred, label, sample_weight=None, pos_weight=None):
label = _reshape_like(F, label, pred)
if not self._from_sigmoid:
if pos_weight is None:
# We use the stable formula: max(x, 0) - x * z + log(1 + exp(-abs(x)))
loss = F.relu(pred) - pred * label + \
F.Activation(-F.abs(pred), act_type='softrelu')
else:
# We use the stable formula: x - x * z + (1 + z * pos_weight - z) * \
# (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + F.broadcast_mul(pos_weight - 1, label)
loss = pred - pred * label + log_weight * \
(F.Activation(-F.abs(pred), act_type='softrelu') + F.relu(-pred))
else:
eps = 1e-12
if pos_weight is None:
loss = -(F.log(pred + eps) * label
+ F.log(1. - pred + eps) * (1. - label))
else:
loss = -(F.broadcast_mul(F.log(pred + eps) * label, pos_weight)
+ F.log(1. - pred + eps) * (1. - label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SigmoidBCELoss = SigmoidBinaryCrossEntropyLoss
class SoftmaxCrossEntropyLoss(Loss):
r"""Computes the softmax cross entropy loss. (alias: SoftmaxCELoss)
If `sparse_label` is `True` (default), label should contain integer
category indicators:
.. math::
\DeclareMathOperator{softmax}{softmax}
p = \softmax({pred})
L = -\sum_i \log p_{i,{label}_i}
`label`'s shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape should
be (1,2,4).
If `sparse_label` is `False`, `label` should contain probability distribution
and `label`'s shape should be the same with `pred`:
.. math::
p = \softmax({pred})
L = -\sum_i \sum_j {label}_j \log p_{ij}
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: the prediction tensor, where the `batch_axis` dimension
ranges over batch size and `axis` dimension ranges over the number
of classes.
- **label**: the truth tensor. When `sparse_label` is True, `label`'s
shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape
should be (1,2,4) and values should be integers between 0 and 2. If
`sparse_label` is False, `label`'s shape must be the same as `pred`
and values should be floats in the range `[0, 1]`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as label. For example, if label has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None,
batch_axis=0, **kwargs):
super(SoftmaxCrossEntropyLoss, self).__init__(
weight, batch_axis, **kwargs)
self._axis = axis
self._sparse_label = sparse_label
self._from_logits = from_logits
def hybrid_forward(self, F, pred, label, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
if self._sparse_label:
loss = -F.pick(pred, label, axis=self._axis, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(pred * label, axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SoftmaxCELoss = SoftmaxCrossEntropyLoss
class KLDivLoss(Loss):
r"""The Kullback-Leibler divergence loss.
KL divergence measures the distance between contiguous distributions. It
can be used to minimize information loss when approximating a distribution.
If `from_logits` is True (default), loss is defined as:
.. math::
L = \sum_i {label}_i * \big[\log({label}_i) - {pred}_i\big]
If `from_logits` is False, loss is defined as:
.. math::
\DeclareMathOperator{softmax}{softmax}
prob = \softmax({pred})
L = \sum_i {label}_i * \big[\log({label}_i) - log({pred}_i)\big]
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
from_logits : bool, default is `True`
Whether the input is log probability (usually from log_softmax) instead
of unnormalized numbers.
axis : int, default -1
The dimension along with to compute softmax. Only used when `from_logits`
is False.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape. If `from_logits` is
True, `pred` should be log probabilities. Otherwise, it should be
unnormalized predictions, i.e. from a dense layer.
- **label**: truth tensor with values in range `(0, 1)`. Must have
the same size as `pred`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
References
----------
`Kullback-Leibler divergence
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`_
"""
def __init__(self, from_logits=True, axis=-1, weight=None, batch_axis=0,
**kwargs):
super(KLDivLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
self._axis = axis
def hybrid_forward(self, F, pred, label, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
loss = label * (F.log(label + 1e-12) - pred)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class CTCLoss(Loss):
r"""Connectionist Temporal Classification Loss.
Parameters
----------
layout : str, default 'NTC'
Layout of prediction tensor. 'N', 'T', 'C' stands for batch size,
sequence length, and alphabet_size respectively.
label_layout : str, default 'NT'
Layout of the labels. 'N', 'T' stands for batch size, and sequence
length respectively.
weight : float or None
Global scalar weight for loss.
Inputs:
- **pred**: unnormalized prediction tensor (before softmax).
Its shape depends on `layout`. If `layout` is 'TNC', pred
should have shape `(sequence_length, batch_size, alphabet_size)`.
Note that in the last dimension, index `alphabet_size-1` is reserved
for internal use as blank label. So `alphabet_size` is one plus the
actual alphabet size.
- **label**: zero-based label tensor. Its shape depends on `label_layout`.
If `label_layout` is 'TN', `label` should have shape
`(label_sequence_length, batch_size)`.
- **pred_lengths**: optional (default None), used for specifying the
length of each entry when different `pred` entries in the same batch
have different lengths. `pred_lengths` should have shape `(batch_size,)`.
- **label_lengths**: optional (default None), used for specifying the
length of each entry when different `label` entries in the same batch
have different lengths. `label_lengths` should have shape `(batch_size,)`.
Outputs:
- **loss**: output loss has shape `(batch_size,)`.
**Example**: suppose the vocabulary is `[a, b, c]`, and in one batch we
have three sequences 'ba', 'cbb', and 'abac'. We can index the labels as
`{'a': 0, 'b': 1, 'c': 2, blank: 3}`. Then `alphabet_size` should be 4,
where label 3 is reserved for internal use by `CTCLoss`. We then need to
pad each sequence with `-1` to make a rectangular `label` tensor::
[[1, 0, -1, -1],
[2, 1, 1, -1],
[0, 1, 0, 2]]
References
----------
`Connectionist Temporal Classification: Labelling Unsegmented
Sequence Data with Recurrent Neural Networks
<http://www.cs.toronto.edu/~graves/icml_2006.pdf>`_
"""
def __init__(self, layout='NTC', label_layout='NT', weight=None, **kwargs):
assert layout in ['NTC', 'TNC'],\
"Only 'NTC' and 'TNC' layouts for pred are supported. Got: %s" % layout
assert label_layout in ['NT', 'TN'],\
"Only 'NT' and 'TN' layouts for label are supported. Got: %s" % label_layout
self._layout = layout
self._label_layout = label_layout
batch_axis = label_layout.find('N')
super(CTCLoss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label,
pred_lengths=None, label_lengths=None, sample_weight=None):
if self._layout == 'NTC':
pred = F.swapaxes(pred, 0, 1)
if self._batch_axis == 1:
label = F.swapaxes(label, 0, 1)
loss = F.CTCLoss(pred, label, pred_lengths, label_lengths,
use_data_lengths=pred_lengths is not None,
use_label_lengths=label_lengths is not None,
blank_label='last')
return _apply_weighting(F, loss, self._weight, sample_weight)
class HuberLoss(Loss):
r"""Calculates smoothed L1 loss that is equal to L1 loss if absolute error
exceeds rho but is equal to L2 loss otherwise. Also called SmoothedL1 loss.
.. math::
L = \sum_i \begin{cases} \frac{1}{2 {rho}} ({label}_i - {pred}_i)^2 &
\text{ if } |{label}_i - {pred}_i| < {rho} \\
|{label}_i - {pred}_i| - \frac{{rho}}{2} &
\text{ otherwise }
\end{cases}
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
rho : float, default 1
Threshold for trimmed mean estimator.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, rho=1, weight=None, batch_axis=0, **kwargs):
super(HuberLoss, self).__init__(weight, batch_axis, **kwargs)
self._rho = rho
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.abs(label - pred)
loss = F.where(loss > self._rho, loss - 0.5 * self._rho,
(0.5 / self._rho) * F.square(loss))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class HingeLoss(Loss):
r"""Calculates the hinge loss function often used in SVMs:
.. math::
L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1. `label` and `pred` must have the same number of
elements.
Parameters
----------
margin : float
The margin in hinge loss. Defaults to 1.0
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape.
- **label**: truth tensor with values -1 or 1. Must have the same size
as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(HingeLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.relu(self._margin - pred * label)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class SquaredHingeLoss(Loss):
r"""Calculates the soft-margin loss function used in SVMs:
.. math::
L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)^2
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1. `label` and `pred` can have arbitrary shape as
long as they have the same number of elements.
Parameters
----------
margin : float
The margin in hinge loss. Defaults to 1.0
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: truth tensor with values -1 or 1. Must have the same size
as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(SquaredHingeLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.square(F.relu(self._margin - pred * label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class LogisticLoss(Loss):
r"""Calculates the logistic loss (for binary losses only):
.. math::
L = \sum_i \log(1 + \exp(- {pred}_i \cdot {label}_i))
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1 (0 or 1 if `label_format` is binary).
`label` and `pred` can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
label_format : str, default 'signed'
Can be either 'signed' or 'binary'. If the label_format is 'signed', all label values should
be either -1 or 1. If the label_format is 'binary', all label values should be either
0 or 1.
Inputs:
- **pred**: prediction tensor with arbitrary shape.
- **label**: truth tensor with values -1/1 (label_format is 'signed')
or 0/1 (label_format is 'binary'). Must have the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=None, batch_axis=0, label_format='signed', **kwargs):
super(LogisticLoss, self).__init__(weight, batch_axis, **kwargs)
self._label_format = label_format
if self._label_format not in ["signed", "binary"]:
raise ValueError("label_format can only be signed or binary, recieved %s."
% label_format)
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
if self._label_format == 'signed':
label = (label + 1.0) / 2.0 # Transform label to be either 0 or 1
# Use a stable formula in computation
loss = F.relu(pred) - pred * label + \
F.Activation(-F.abs(pred), act_type='softrelu')
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class TripletLoss(Loss):
r"""Calculates triplet loss given three input tensors and a positive margin.
Triplet loss measures the relative similarity between a positive
example, a negative example, and prediction:
.. math::
L = \sum_i \max(\Vert {pos_i}_i - {pred} \Vert_2^2 -
\Vert {neg_i}_i - {pred} \Vert_2^2 + {margin}, 0)
`positive`, `negative`, and 'pred' can have arbitrary shape as long as they
have the same number of elements.
Parameters
----------
margin : float
Margin of separation between correct and incorrect pair.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **positive**: positive example tensor with arbitrary shape. Must have
the same size as pred.
- **negative**: negative example tensor with arbitrary shape Must have
the same size as pred.
Outputs:
- **loss**: loss tensor with shape (batch_size,).
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(TripletLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, positive, negative):
positive = _reshape_like(F, positive, pred)
negative = _reshape_like(F, negative, pred)
loss = F.sum(F.square(positive - pred) - F.square(negative - pred),
axis=self._batch_axis, exclude=True)
loss = F.relu(loss + self._margin)
return _apply_weighting(F, loss, self._weight, None)
class PoissonNLLLoss(Loss):
r"""For a target (Random Variable) in a Poisson distribution, the function calculates the Negative
Log likelihood loss.
PoissonNLLLoss measures the loss accrued from a poisson regression prediction made by the model.
.. math::
L = \text{pred} - \text{target} * \log(\text{pred}) +\log(\text{target!})
`target`, 'pred' can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
from_logits : boolean, default True
indicating whether log(predicted) value has already been computed. If True, the loss is computed as
:math:`\exp(\text{pred}) - \text{target} * \text{pred}`, and if False, then loss is computed as
:math:`\text{pred} - \text{target} * \log(\text{pred}+\text{epsilon})`.The default value
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
compute_full: boolean, default False
Indicates whether to add an approximation(Stirling factor) for the Factorial term in the formula for the loss.
The Stirling factor is:
:math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`
epsilon: float, default 1e-08
This is to avoid calculating log(0) which is not defined.
Inputs:
- **pred**: Predicted value
- **target**: Random variable(count or number) which belongs to a Poisson distribution.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: Average loss (shape=(1,1)) of the loss tensor with shape (batch_size,).
"""
def __init__(self, weight=None, from_logits=True, batch_axis=0, compute_full=False, **kwargs):
super(PoissonNLLLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
self._compute_full = compute_full
def hybrid_forward(self, F, pred, target, sample_weight=None, epsilon=1e-08):
target = _reshape_like(F, target, pred)
if self._from_logits:
loss = F.exp(pred) - target * pred
else:
loss = pred - target * F.log(pred + epsilon)
if self._compute_full:
# Using numpy's pi value
stirling_factor = target * \
F.log(target) - target + 0.5 * F.log(2 * target * np.pi)
target_gt_1 = target > 1
stirling_factor *= target_gt_1
loss += stirling_factor
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss)
class CosineEmbeddingLoss(Loss):
r"""For a target label 1 or -1, vectors input1 and input2, the function computes the cosine distance
between the vectors. This can be interpreted as how similar/dissimilar two input vectors are.
.. math::
L = \sum_i \begin{cases} 1 - {cos\_sim({input1}_i, {input2}_i)} & \text{ if } {label}_i = 1\\
{cos\_sim({input1}_i, {input2}_i)} & \text{ if } {label}_i = -1 \end{cases}\\
cos\_sim(input1, input2) = \frac{{input1}_i.{input2}_i}{||{input1}_i||.||{input2}_i||}
`input1`, `input2` can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
margin : float
Margin of separation between correct and incorrect pair.
Inputs:
- **input1**: a tensor with arbitrary shape
- **input2**: another tensor with same shape as pred to which input1 is
compared for similarity and loss calculation
- **label**: A 1-D tensor indicating for each pair input1 and input2, target label is 1 or -1
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as input1. For example, if input1 has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: The loss tensor with shape (batch_size,).
"""
def __init__(self, weight=None, batch_axis=0, margin=0, **kwargs):
super(CosineEmbeddingLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, input1, input2, label, sample_weight=None):
input1 = _reshape_like(F, input1, input2)
label = label.reshape((-1, 1))
cos_sim = self._cosine_similarity(F, input1, input2)
y_1 = label == 1
y_minus_1 = label == -1
cos_sim_a = (1 - cos_sim) * y_1
if F is ndarray:
z_array = F.array([0])
else:
z_array = F.zeros((1, 1))
cos_sim_b = F.broadcast_maximum(
z_array, y_minus_1 * (cos_sim - self._margin), axis=1)
loss = cos_sim_a + cos_sim_b
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return loss
def _cosine_similarity(self, F, x, y, axis=-1):
# Calculates the cosine similarity between 2 vectors
x_norm = F.norm(x, axis=axis).reshape(-1, 1)
y_norm = F.norm(y, axis=axis).reshape(-1, 1)
x_dot_y = F.sum(x * y, axis=axis).reshape(-1, 1)
if F is ndarray:
eps_arr = F.array([1e-12])
else:
eps_arr = F.full((1, 1), 1e-12)
return (x_dot_y / F.broadcast_maximum(x_norm * y_norm, eps_arr))
|
|
# Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import six
from neutron.agent.common import utils
from neutron.agent.ovsdb import api as ovsdb
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
class Transaction(ovsdb.Transaction):
def __init__(self, context, check_error=False, log_errors=True, opts=None):
self.context = context
self.check_error = check_error
self.log_errors = log_errors
self.opts = ["--timeout=%d" % self.context.vsctl_timeout,
'--oneline', '--format=json']
if opts:
self.opts += opts
self.commands = []
def add(self, command):
self.commands.append(command)
return command
def commit(self):
args = []
for cmd in self.commands:
cmd.result = None
args += cmd.vsctl_args()
res = self.run_vsctl(args)
if res is None:
return
res = res.replace(r'\\', '\\').splitlines()
for i, record in enumerate(res):
self.commands[i].result = record
return [cmd.result for cmd in self.commands]
def run_vsctl(self, args):
full_args = ["ovs-vsctl"] + self.opts + args
try:
# We log our own errors, so never have utils.execute do it
return utils.execute(full_args, run_as_root=True,
log_fail_as_error=False).rstrip()
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if self.log_errors:
LOG.exception(_LE("Unable to execute %(cmd)s."),
{'cmd': full_args})
if not self.check_error:
ctxt.reraise = False
class BaseCommand(ovsdb.Command):
def __init__(self, context, cmd, opts=None, args=None):
self.context = context
self.cmd = cmd
self.opts = [] if opts is None else opts
self.args = [] if args is None else args
def execute(self, check_error=False, log_errors=True):
with Transaction(self.context, check_error=check_error,
log_errors=log_errors) as txn:
txn.add(self)
return self.result
def vsctl_args(self):
return itertools.chain(('--',), self.opts, (self.cmd,), self.args)
class MultiLineCommand(BaseCommand):
"""Command for ovs-vsctl commands that return multiple lines"""
@property
def result(self):
return self._result
@result.setter
def result(self, raw_result):
self._result = raw_result.split(r'\n') if raw_result else []
class DbCommand(BaseCommand):
def __init__(self, context, cmd, opts=None, args=None, columns=None):
if opts is None:
opts = []
if columns:
opts += ['--columns=%s' % ",".join(columns)]
super(DbCommand, self).__init__(context, cmd, opts, args)
@property
def result(self):
return self._result
@result.setter
def result(self, raw_result):
# If check_error=False, run_vsctl can return None
if not raw_result:
self._result = None
return
try:
json = jsonutils.loads(raw_result)
except (ValueError, TypeError):
# This shouldn't happen, but if it does and we check_errors
# log and raise.
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Could not parse: %s"), raw_result)
headings = json['headings']
data = json['data']
results = []
for record in data:
obj = {}
for pos, heading in enumerate(headings):
obj[heading] = ovsdb.val_to_py(record[pos])
results.append(obj)
self._result = results
class DbGetCommand(DbCommand):
@DbCommand.result.setter
def result(self, val):
# super()'s never worked for setters http://bugs.python.org/issue14965
DbCommand.result.fset(self, val)
# DbCommand will return [{'column': value}] and we just want value.
if self._result:
self._result = list(self._result[0].values())[0]
class BrExistsCommand(DbCommand):
@DbCommand.result.setter
def result(self, val):
self._result = val is not None
def execute(self):
return super(BrExistsCommand, self).execute(check_error=False,
log_errors=False)
class OvsdbVsctl(ovsdb.API):
def transaction(self, check_error=False, log_errors=True, **kwargs):
return Transaction(self.context, check_error, log_errors, **kwargs)
def add_br(self, name, may_exist=True):
opts = ['--may-exist'] if may_exist else None
return BaseCommand(self.context, 'add-br', opts, [name])
def del_br(self, name, if_exists=True):
opts = ['--if-exists'] if if_exists else None
return BaseCommand(self.context, 'del-br', opts, [name])
def br_exists(self, name):
return BrExistsCommand(self.context, 'list', args=['Bridge', name])
def port_to_br(self, name):
return BaseCommand(self.context, 'port-to-br', args=[name])
def iface_to_br(self, name):
return BaseCommand(self.context, 'iface-to-br', args=[name])
def list_br(self):
return MultiLineCommand(self.context, 'list-br')
def br_get_external_id(self, name, field):
return BaseCommand(self.context, 'br-get-external-id',
args=[name, field])
def db_set(self, table, record, *col_values):
args = [table, record]
args += _set_colval_args(*col_values)
return BaseCommand(self.context, 'set', args=args)
def db_clear(self, table, record, column):
return BaseCommand(self.context, 'clear', args=[table, record,
column])
def db_get(self, table, record, column):
# Use the 'list' command as it can return json and 'get' cannot so that
# we can get real return types instead of treating everything as string
# NOTE: openvswitch can return a single atomic value for fields that
# are sets, but only have one value. This makes directly iterating over
# the result of a db_get() call unsafe.
return DbGetCommand(self.context, 'list', args=[table, record],
columns=[column])
def db_list(self, table, records=None, columns=None, if_exists=False):
opts = ['--if-exists'] if if_exists else None
args = [table]
if records:
args += records
return DbCommand(self.context, 'list', opts=opts, args=args,
columns=columns)
def db_find(self, table, *conditions, **kwargs):
columns = kwargs.pop('columns', None)
args = itertools.chain([table],
*[_set_colval_args(c) for c in conditions])
return DbCommand(self.context, 'find', args=args, columns=columns)
def set_controller(self, bridge, controllers):
return BaseCommand(self.context, 'set-controller',
args=[bridge] + list(controllers))
def del_controller(self, bridge):
return BaseCommand(self.context, 'del-controller', args=[bridge])
def get_controller(self, bridge):
return MultiLineCommand(self.context, 'get-controller', args=[bridge])
def set_fail_mode(self, bridge, mode):
return BaseCommand(self.context, 'set-fail-mode', args=[bridge, mode])
def add_port(self, bridge, port, may_exist=True):
opts = ['--may-exist'] if may_exist else None
return BaseCommand(self.context, 'add-port', opts, [bridge, port])
def del_port(self, port, bridge=None, if_exists=True):
opts = ['--if-exists'] if if_exists else None
args = filter(None, [bridge, port])
return BaseCommand(self.context, 'del-port', opts, args)
def list_ports(self, bridge):
return MultiLineCommand(self.context, 'list-ports', args=[bridge])
def _set_colval_args(*col_values):
args = []
# TODO(twilson) This is ugly, but set/find args are very similar except for
# op. Will try to find a better way to default this op to '='
for entry in col_values:
if len(entry) == 2:
col, op, val = entry[0], '=', entry[1]
else:
col, op, val = entry
if isinstance(val, collections.Mapping):
args += ["%s:%s%s%s" % (
col, k, op, ovsdb.py_to_val(v)) for k, v in val.items()]
elif (isinstance(val, collections.Sequence)
and not isinstance(val, six.string_types)):
args.append(
"%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val))))
else:
args.append("%s%s%s" % (col, op, ovsdb.py_to_val(val)))
return args
|
|
"""Models for ecommerce"""
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.db.models import (
CharField,
CASCADE,
DecimalField,
ForeignKey,
IntegerField,
SET_NULL,
PROTECT,
TextField,
Sum,
)
from mitol.common.models import TimestampedModel
from ecommerce.constants import CARD_TYPES
from main.models import AuditableModel, AuditModel
from main.utils import serialize_model_object
from klasses.models import BootcampRun
class Order(AuditableModel, TimestampedModel):
"""
Represents a payment the user has made
"""
FULFILLED = "fulfilled"
FAILED = "failed"
CREATED = "created"
REFUNDED = "refunded"
STATUSES = [CREATED, FULFILLED, FAILED, REFUNDED]
CYBERSOURCE_TYPE = "cybersource"
WIRE_TRANSFER_TYPE = "wiretransfer"
REFUND_TYPE = "refund"
PAYMENT_TYPES = [CYBERSOURCE_TYPE, WIRE_TRANSFER_TYPE, REFUND_TYPE]
user = ForeignKey(settings.AUTH_USER_MODEL, on_delete=CASCADE)
status = CharField(
choices=[(status, status) for status in STATUSES],
default=CREATED,
max_length=30,
)
total_price_paid = DecimalField(decimal_places=2, max_digits=20)
application = ForeignKey(
"applications.BootcampApplication",
on_delete=CASCADE,
null=True,
blank=True,
db_index=True,
related_name="orders",
)
payment_type = CharField(
choices=[(payment_type, payment_type) for payment_type in PAYMENT_TYPES],
default=CYBERSOURCE_TYPE,
max_length=30,
)
def __str__(self):
"""Description for Order"""
return "Order {}, status={} for user={}".format(self.id, self.status, self.user)
@property
def line_description(self):
"""Description of the first line in the Order (usually there should be only one)"""
line = self.line_set.first()
if not line:
return ""
return line.description
@property
def run_title(self):
"""Display title of the bootcamp run that was purchased in this Order"""
bootcamp_run = self.get_bootcamp_run()
if not bootcamp_run:
return ""
return bootcamp_run.display_title
@classmethod
def get_audit_class(cls):
return OrderAudit
def to_dict(self):
"""
Add any Lines to the serialized representation of the Order
"""
data = serialize_model_object(self)
data["lines"] = [serialize_model_object(line) for line in self.line_set.all()]
return data
def get_bootcamp_run(self):
"""
Fetches the bootcamp run that was purchased in this Order
Returns:
BootcampRun: The bootcamp run that was purchased in this order.
"""
line = self.line_set.first()
if not line:
return None
return line.bootcamp_run
class OrderAudit(AuditModel):
"""
Audit model for Order. This also stores information for Line.
"""
order = ForeignKey(Order, null=True, on_delete=SET_NULL)
@classmethod
def get_related_field_name(cls):
return "order"
def __str__(self):
"""Description for Order"""
return "Order {}".format(self.id)
class Line(TimestampedModel):
"""
Represents a line item in the order
"""
order = ForeignKey(Order, on_delete=CASCADE)
bootcamp_run = ForeignKey(BootcampRun, null=False, on_delete=PROTECT)
price = DecimalField(decimal_places=2, max_digits=20)
description = TextField()
def __str__(self):
"""Description for Line"""
return "Line for {order}, price={price}, bootcamp_run_id={bootcamp_run_id}, description={description}".format(
order=self.order,
price=self.price,
bootcamp_run_id=self.bootcamp_run_id,
description=self.description,
)
@classmethod
def fulfilled_for_user(cls, user):
"""
Returns the list of lines for fulfilled orders for a specific user
"""
return cls.objects.filter(order__user=user, order__status=Order.FULFILLED)
@classmethod
def for_user_bootcamp_run(cls, user, bootcamp_run):
"""
Returns all the orders that are associated to the payment of a specific run_key
"""
return (
cls.fulfilled_for_user(user)
.filter(bootcamp_run=bootcamp_run)
.order_by("order__created_on")
)
@classmethod
def total_paid_for_bootcamp_run(cls, user, bootcamp_run):
"""
Returns the total amount paid for a bootcamp run
"""
return cls.for_user_bootcamp_run(user, bootcamp_run).aggregate(
total=Sum("price")
)
class Receipt(TimestampedModel):
"""
The contents of the message from CyberSource about an Order fulfillment or cancellation
"""
order = ForeignKey(Order, null=True, on_delete=CASCADE)
data = JSONField()
@property
def payment_method(self):
"""Try to guess the payment source based on the Cybersource receipt"""
payment_method = self.data.get("req_payment_method")
if payment_method == "card":
card_type = self.data.get("req_card_type")
card_type_description = CARD_TYPES.get(card_type, "")
card_number = self.data.get("req_card_number", "")
return f"{card_type_description} | {card_number}"
elif payment_method == "paypal":
return "PayPal"
def __str__(self):
"""Description of Receipt"""
if self.order:
return "Receipt for order {}".format(self.order.id)
else:
return "Receipt with no attached order"
class WireTransferReceipt(TimestampedModel):
"""
A record of a wire transfer
"""
wire_transfer_id = IntegerField()
order = ForeignKey(Order, null=True, on_delete=CASCADE)
data = JSONField()
def __str__(self):
"""Description of WireTransferReceipt"""
if self.order:
return f"Wire transfer receipt for order {self.order.id}"
else:
return "Wire transfer receipt with no attached order"
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the classes and utility functions for distance and
cartesian coordinates.
"""
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .angles import Angle
__all__ = ['Distance']
__doctest_requires__ = {'*': ['scipy']}
class Distance(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized in one of four ways:
* A distance ``value`` (array or float) and a ``unit``
* A `~astropy.units.Quantity` object
* A redshift and (optionally) a cosmology.
* Providing a distance modulus
Parameters
----------
value : scalar or `~astropy.units.Quantity`.
The value of this distance.
unit : `~astropy.units.UnitBase`
The units for this distance, *if* ``value`` is not a
`~astropy.units.Quantity`. Must have dimensions of distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : ``Cosmology`` or `None`
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle`
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible is some
cosmologies). Default: ``False``.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a distance.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``z`` is provided with a ``unit`` or ``cosmology`` is provided
when ``z`` is *not* given, or ``value`` is given as well as ``z``.
Examples
--------
>>> from astropy import units as u
>>> from astropy.cosmology import WMAP5, WMAP7
>>> d1 = Distance(10, u.Mpc)
>>> d2 = Distance(40, unit=u.au)
>>> d3 = Distance(value=5, unit=u.kpc)
>>> d4 = Distance(z=0.23)
>>> d5 = Distance(z=0.23, cosmology=WMAP5)
>>> d6 = Distance(distmod=24.47)
>>> d7 = Distance(Distance(10 * u.Mpc))
>>> d8 = Distance(parallax=21.34*u.mas)
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(cls, value=None, unit=None, z=None, cosmology=None,
distmod=None, parallax=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0, allow_negative=False):
if z is not None:
if value is not None or distmod is not None:
raise ValueError('Should given only one of `value`, `z` '
'or `distmod` in Distance constructor.')
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
else:
if cosmology is not None:
raise ValueError('A `cosmology` was given but `z` was not '
'provided in Distance constructor')
value_msg = ('Should given only one of `value`, `z`, `distmod`, or '
'`parallax` in Distance constructor.')
n_not_none = np.sum([x is not None
for x in [value, z, distmod, parallax]])
if n_not_none > 1:
raise ValueError(value_msg)
if distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the unit is not specified, guess based on the mean of
# the log of the distance
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
unit = u.Mpc
elif meanlogval > 3:
unit = u.kpc
elif meanlogval < -3: # ~200 AU
unit = u.AU
else:
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
elif parallax is not None:
value = parallax.to_value(u.pc, equivalencies=u.parallax())
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
if np.any(parallax < 0):
if allow_negative:
warnings.warn(
"Negative parallaxes are converted to NaN "
"distances even when `allow_negative=True`, "
"because negative parallaxes cannot be transformed "
"into distances. See discussion in this paper: "
"https://arxiv.org/abs/1507.02105", AstropyWarning)
else:
raise ValueError("Some parallaxes are negative, which "
"are notinterpretable as distances. "
"See the discussion in this paper: "
"https://arxiv.org/abs/1507.02105 . "
"If you want parallaxes to pass "
"through, with negative parallaxes "
"instead becoming NaN, use the "
"`allow_negative=True` argument.")
elif value is None:
raise ValueError('None of `value`, `z`, `distmod`, or '
'`parallax` were given to Distance '
'constructor')
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls, value, unit, dtype=dtype, copy=copy, order=order,
subok=subok, ndmin=ndmin)
# Make sure NaNs don't emit a warning
with np.errstate(invalid='ignore'):
any_negative = np.any(distance.value < 0)
if not allow_negative and any_negative:
raise ValueError("Distance must be >= 0. Use the argument "
"'allow_negative=True' to allow negative values.")
return distance
@property
def z(self):
"""Short for ``self.compute_z()``"""
return self.compute_z()
def compute_z(self, cosmology=None):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : ``Cosmology`` or `None`
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
Returns
-------
z : float
The redshift of this distance given the provided ``cosmology``.
"""
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
from astropy.cosmology import z_at_value
return z_at_value(cosmology.luminosity_distance, self, ztol=1.e-10)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`"""
val = 5. * np.log10(self.to_value(u.pc)) - 5.
return u.Quantity(val, u.mag, copy=False)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object"""
return Angle(self.to(u.milliarcsecond, u.parallax()))
|
|
#!/usr/bin/env python
"""
@package mi.core.instrument.test.test_protocol_param_dict
@file mi/core/instrument/test/test_protocol_param_dict.py
@author Steve Foley
@brief Test cases for the base protocol parameter dictionary module
"""
import json
import re
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentParameterExpirationException
from mi.core.instrument.protocol_param_dict import Parameter, FunctionParameter, RegexParameter
from mi.core.instrument.protocol_param_dict import ParameterDictKey
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict
from mi.core.instrument.test.test_strings import TestUnitStringsDict
from mi.logging import log
from nose.plugins.attrib import attr
@attr('UNIT', group='mi')
class TestUnitProtocolParameterDict(TestUnitStringsDict):
"""
Test cases for instrument driver class. Functions in this class provide
instrument driver unit tests and provide a tutorial on use of
the driver interface.
"""
__test__ = True
@staticmethod
def pick_byte2(input_val):
""" Get the 2nd byte as an example of something tricky and
arbitrary"""
val = int(input_val) >> 8
val &= 255
return val
def setUp(self):
self.param_dict = ProtocolParameterDict()
self.param_dict.add("foo", r'.*foo=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
direct_access=True,
startup_param=True,
default_value=10,
visibility=ParameterDictVisibility.READ_WRITE)
self.param_dict.add("bar", r'.*bar=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
direct_access=False,
startup_param=True,
default_value=15,
visibility=ParameterDictVisibility.READ_WRITE)
self.param_dict.add("baz", r'.*baz=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
direct_access=True,
default_value=20,
visibility=ParameterDictVisibility.DIRECT_ACCESS,
get_timeout=30,
set_timeout=40,
display_name="Baz",
description="The baz parameter",
type=ParameterDictType.INT,
units="nano-bazers",
value_description="Should be an integer between 2 and 2000")
self.param_dict.add("bat", r'.*bat=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
startup_param=False,
default_value=20,
visibility=ParameterDictVisibility.READ_ONLY,
get_timeout=10,
set_timeout=20,
display_name="Bat",
description="The bat parameter",
type=ParameterDictType.INT,
units="nano-batbit",
value_description="Should be an integer between 1 and 1000")
self.param_dict.add("qux", r'.*qux=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
startup_param=False,
visibility=ParameterDictVisibility.READ_ONLY)
self.param_dict.add("pho", r'.*qux=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
startup_param=False,
visibility=ParameterDictVisibility.IMMUTABLE)
self.param_dict.add("dil", r'.*qux=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
startup_param=False,
visibility=ParameterDictVisibility.IMMUTABLE)
self.param_dict.add("qut", r'.*qut=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
direct_access=True,
default_value=[10, 100],
visibility=ParameterDictVisibility.DIRECT_ACCESS,
expiration=1,
get_timeout=10,
set_timeout=20,
display_name="Qut",
description="The qut list parameter",
type=ParameterDictType.LIST,
units="nano-qutters",
value_description="Should be a 2-10 element list of integers between 2 and 2000")
self.target_schema = {
"bar": {
"direct_access": False,
"get_timeout": 10,
"set_timeout": 10,
"startup": True,
"value": {
"default": 15
},
"visibility": "READ_WRITE",
"range": None,
},
"bat": {
"description": "The bat parameter",
"direct_access": False,
"display_name": "Bat",
"get_timeout": 10,
"set_timeout": 20,
"startup": False,
"value": {
"default": 20,
"description": "Should be an integer between 1 and 1000",
"type": "int",
"units": "nano-batbit"
},
"visibility": "READ_ONLY",
"range": None,
},
"baz": {
"description": "The baz parameter",
"direct_access": True,
"display_name": "Baz",
"get_timeout": 30,
"set_timeout": 40,
"startup": False,
"value": {
"default": 20,
"description": "Should be an integer between 2 and 2000",
"type": "int",
"units": "nano-bazers"
},
"visibility": "DIRECT_ACCESS",
"range": None,
},
"dil": {
"direct_access": False,
"get_timeout": 10,
"set_timeout": 10,
"startup": False,
"value": {},
"visibility": "IMMUTABLE",
"range": None,
},
"foo": {
"direct_access": True,
"get_timeout": 10,
"set_timeout": 10,
"startup": True,
"value": {
"default": 10
},
"visibility": "READ_WRITE",
"range": None,
},
"pho": {
"direct_access": False,
"get_timeout": 10,
"set_timeout": 10,
"startup": False,
"value": {},
"visibility": "IMMUTABLE",
"range": None,
},
"qut": {
"description": "The qut list parameter",
"direct_access": True,
"display_name": "Qut",
"get_timeout": 10,
"set_timeout": 20,
"startup": False,
"value": {
"default": [
10,
100
],
"description": "Should be a 2-10 element list of integers between 2 and 2000",
"type": "list",
"units": "nano-qutters"
},
"visibility": "DIRECT_ACCESS",
"range": None,
},
"qux": {
"direct_access": False,
"get_timeout": 10,
"set_timeout": 10,
"startup": False,
"value": {},
"visibility": "READ_ONLY",
"range": None,
}
}
self.test_yaml = '''
parameters: {
qut: {
description: "QutFileDesc",
units: "QutFileUnits",
value_description: "QutFileValueDesc",
type: "QutFileType",
display_name: "QutDisplay"
},
extra_param: {
description: "ExtraFileDesc",
units: "ExtraFileUnits",
value_description: "ExtraFileValueDesc",
type: "ExtraFileType"
}
}
commands: {
dummy: stuff
}
'''
def test_get_direct_access_list(self):
"""
Test to see we can get a list of direct access parameters
"""
result = self.param_dict.get_direct_access_list()
self.assertTrue(isinstance(result, list))
self.assertEquals(len(result), 3)
self.assert_("foo" in result)
self.assert_("baz" in result)
self.assert_("qut" in result)
def test_get_startup_list(self):
"""
Test to see we can get a list of direct access parameters
"""
result = self.param_dict.get_startup_list()
self.assertTrue(isinstance(result, list))
self.assertEquals(len(result), 2)
self.assert_("foo" in result)
self.assert_("bar" in result)
def test_set_default(self):
"""
Test setting a default value
"""
result = self.param_dict.get_config()
self.assertEquals(result["foo"], None)
self.param_dict.set_default("foo")
self.assertEquals(self.param_dict.get("foo"), 10)
self.param_dict.update("foo=1000")
self.assertEquals(self.param_dict.get("foo"), 1000)
self.param_dict.set_default("foo")
self.assertEquals(self.param_dict.get("foo"), 10)
self.assertRaises(ValueError, self.param_dict.set_default, "qux")
def test_update_many(self):
"""
Test updating of multiple variables from the same input
"""
sample_input = """
foo=100
bar=200, baz=300
"""
self.assertNotEquals(self.param_dict.get("foo"), 100)
self.assertNotEquals(self.param_dict.get("bar"), 200)
self.assertNotEquals(self.param_dict.get("baz"), 300)
result = self.param_dict.update_many(sample_input)
log.debug("result: %s", result)
self.assertEquals(result["foo"], True)
self.assertEquals(result["bar"], True)
self.assertEquals(result["baz"], True)
self.assertEquals(self.param_dict.get("foo"), 100)
self.assertEquals(self.param_dict.get("bar"), 200)
self.assertEquals(self.param_dict.get("baz"), 300)
def test_update_specific_values(self):
"""
test to verify we can limit update to a specific
set of parameters
"""
sample_input = "foo=100, bar=200"
# First verify we can set both
self.assertNotEquals(self.param_dict.get("foo"), 100)
self.assertNotEquals(self.param_dict.get("bar"), 200)
self.assertTrue(self.param_dict.update(sample_input))
self.assertEquals(self.param_dict.get("foo"), 100)
self.assertEquals(self.param_dict.get("bar"), 200)
# Now let's only have it update 1 parameter with a name
sample_input = "foo=200, bar=300"
self.assertTrue(self.param_dict.update(sample_input, target_params="foo"))
self.assertEquals(self.param_dict.get("foo"), 200)
self.assertEquals(self.param_dict.get("bar"), 200)
# Now let's only have it update 1 parameter using a list
sample_input = "foo=300, bar=400"
self.assertTrue(self.param_dict.update(sample_input, target_params=["foo"]))
self.assertEquals(self.param_dict.get("foo"), 300)
self.assertEquals(self.param_dict.get("bar"), 200)
# Test our exceptions
with self.assertRaises(KeyError):
self.param_dict.update(sample_input, "key_does_not_exist")
with self.assertRaises(InstrumentParameterException):
self.param_dict.update(sample_input, {'bad': "key_does_not_exist"})
def test_visibility_list(self):
lst = self.param_dict.get_visibility_list(ParameterDictVisibility.READ_WRITE)
lst.sort()
self.assertEquals(lst, ["bar", "foo"])
lst = self.param_dict.get_visibility_list(ParameterDictVisibility.DIRECT_ACCESS)
lst.sort()
self.assertEquals(lst, ["baz", "qut"])
lst = self.param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY)
lst.sort()
self.assertEquals(lst, ["bat", "qux"])
lst = self.param_dict.get_visibility_list(ParameterDictVisibility.IMMUTABLE)
lst.sort()
self.assertEquals(lst, ["dil", "pho"])
def test_function_values(self):
"""
Make sure we can add and update values with functions instead of patterns
"""
self.param_dict.add_parameter(
FunctionParameter("fn_foo",
self.pick_byte2,
lambda x: str(x),
direct_access=True,
startup_param=True,
value=1,
visibility=ParameterDictVisibility.READ_WRITE)
)
self.param_dict.add_parameter(
FunctionParameter("fn_bar",
lambda x: bool(x & 2), # bit map example
lambda x: str(x),
direct_access=True,
startup_param=True,
value=False,
visibility=ParameterDictVisibility.READ_WRITE)
)
# check defaults just to be safe
val = self.param_dict.get("fn_foo")
self.assertEqual(val, 1)
val = self.param_dict.get("fn_bar")
self.assertEqual(val, False)
self.param_dict.update(1005) # just change first in list
val = self.param_dict.get("fn_foo")
self.assertEqual(val, 3)
val = self.param_dict.get("fn_bar")
self.assertEqual(val, False)
# fn_bar does not get updated here
result = self.param_dict.update_many(1205)
self.assertEqual(result['fn_foo'], True)
self.assertEqual(len(result), 1)
val = self.param_dict.get("fn_foo")
self.assertEqual(val, 4)
val = self.param_dict.get("fn_bar")
self.assertEqual(val, False)
# both are updated now
result = self.param_dict.update_many(6)
self.assertEqual(result['fn_foo'], True)
self.assertEqual(result['fn_bar'], True)
self.assertEqual(len(result), 2)
val = self.param_dict.get("fn_foo")
self.assertEqual(val, 0)
val = self.param_dict.get("fn_bar")
self.assertEqual(val, True)
def test_mixed_pdv_types(self):
""" Verify we can add different types of PDVs in one container """
self.param_dict.add_parameter(
FunctionParameter("fn_foo",
self.pick_byte2,
lambda x: str(x),
direct_access=True,
startup_param=True,
value=1,
visibility=ParameterDictVisibility.READ_WRITE)
)
self.param_dict.add_parameter(
RegexParameter("foo", r'.*foo=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
direct_access=True,
startup_param=True,
value=10,
visibility=ParameterDictVisibility.READ_WRITE)
)
self.param_dict.add("bar", r'.*bar=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
direct_access=False,
startup_param=True,
value=15,
visibility=ParameterDictVisibility.READ_WRITE)
self.assertEqual(self.param_dict.get("fn_foo"), 1)
self.assertEqual(self.param_dict.get("foo"), 10)
self.assertEqual(self.param_dict.get("bar"), 15)
def test_base_update(self):
pdv = Parameter("foo",
lambda x: str(x),
value=12)
self.assertEqual(pdv.get_value(), 12)
result = pdv.update(1)
self.assertEqual(result, True)
self.assertEqual(pdv.get_value(), 1)
# Its a base class...monkey see, monkey do
result = pdv.update("foo=1")
self.assertEqual(result, True)
self.assertEqual(pdv.get_value(), "foo=1")
def test_regex_val(self):
pdv = RegexParameter("foo",
r'.*foo=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
value=12)
self.assertEqual(pdv.get_value(), 12)
result = pdv.update(1)
self.assertEqual(result, False)
self.assertEqual(pdv.get_value(), 12)
result = pdv.update("foo=1")
self.assertEqual(result, True)
self.assertEqual(pdv.get_value(), 1)
def test_function_val(self):
pdv = FunctionParameter("foo",
self.pick_byte2,
lambda x: str(x),
value=12)
self.assertEqual(pdv.get_value(), 12)
self.assertRaises(TypeError, pdv.update(1))
result = pdv.update("1205")
self.assertEqual(pdv.get_value(), 4)
self.assertEqual(result, True)
def test_set_init_value(self):
result = self.param_dict.get("foo")
self.assertEqual(result, None)
self.param_dict.set_init_value("foo", 42)
result = self.param_dict.get_init_value("foo")
self.assertEqual(result, 42)
def test_schema_generation(self):
self.maxDiff = None
result = self.param_dict.generate_dict()
json_result = json.dumps(result, indent=4, sort_keys=True)
log.debug("Expected: %s", self.target_schema)
log.debug("Result: %s", json_result)
self.assertEqual(result, self.target_schema)
def test_empty_schema(self):
self.param_dict = ProtocolParameterDict()
result = self.param_dict.generate_dict()
self.assertEqual(result, {})
def test_bad_descriptions(self):
self.param_dict._param_dict["foo"].description = None
self.param_dict._param_dict["foo"].value = None
self.assertRaises(InstrumentParameterException,
self.param_dict.get_init_value, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.get_default_value, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.set_default, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.get_init_value, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.get_menu_path_read, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.get_submenu_read, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.get_menu_path_write, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.get_submenu_write, "foo")
self.assertRaises(InstrumentParameterException,
self.param_dict.format, "foo", 1)
self.assertRaises(InstrumentParameterException,
self.param_dict.get_direct_access_list)
self.assertRaises(InstrumentParameterException,
self.param_dict.is_startup_param, "foo")
def test_set(self):
"""
Test a simple set of the parameter. Make sure the right values get
called and the correct exceptions are raised.
"""
new_param = FunctionParameter("foo",
self.pick_byte2,
lambda x: str(x),
direct_access=True,
startup_param=True,
value=1000,
visibility=ParameterDictVisibility.READ_WRITE)
self.assertEquals(new_param.get_value(), 1000)
self.assertEquals(self.param_dict.get("foo"), None)
# overwrites existing param
self.param_dict.add_parameter(new_param)
self.assertEquals(self.param_dict.get("foo"), 1000)
self.param_dict.set_value("foo", 2000)
self.assertEquals(self.param_dict.get("foo"), 2000)
def test_invalid_type(self):
self.assertRaises(InstrumentParameterException,
FunctionParameter,
"fn_bar",
lambda x: bool(x & 2), # bit map example
lambda x: str(x),
direct_access=True,
startup_param=True,
value=False,
type="bad_type",
visibility=ParameterDictVisibility.READ_WRITE)
def test_get(self):
"""
test getting values with expiration
"""
# from mi.core.exceptions import InstrumentParameterExpirationException
pd = ProtocolParameterDict()
# No expiration, should work just fine
pd.add('noexp', r'', None, None, expiration=None)
pd.add('zeroexp', r'', None, None, expiration=0)
pd.add('lateexp', r'', None, None, expiration=2)
###
# Set and get with no expire
###
pd.set_value('noexp', 1)
self.assertEqual(pd.get('noexp'), 1)
###
# Set and get with a 0 expire
###
basetime = pd.get_current_timestamp()
pd.set_value('zeroexp', 2)
# We should fail because we are calculating exp against current time
with self.assertRaises(InstrumentParameterExpirationException):
pd.get('zeroexp')
# Should succeed because exp is calculated using basetime
self.assertEqual(pd.get('zeroexp', basetime), 2)
###
# Set and get with a delayed expire
###
basetime = pd.get_current_timestamp()
futuretime = pd.get_current_timestamp(3)
self.assertGreater(futuretime - basetime, 3)
pd.set_value('lateexp', 2)
# Success because data is not expired
self.assertEqual(pd.get('lateexp', basetime), 2)
# Fail because data is expired (simulated three seconds from now)
with self.assertRaises(InstrumentParameterExpirationException):
pd.get('lateexp', futuretime)
def test_regex_flags(self):
pdv = RegexParameter("foo",
r'.+foo=(\d+).+',
lambda match: int(match.group(1)),
lambda x: str(x),
regex_flags=re.DOTALL,
value=12)
# Assert something good with dotall update()
self.assertTrue(pdv)
pdv.update("\n\nfoo=1212\n\n")
self.assertEqual(pdv.get_value(), 1212)
# negative test with no regex_flags
pdv = RegexParameter("foo",
r'.+foo=(\d+).+',
lambda match: int(match.group(1)),
lambda x: str(x),
value=12)
# Assert something good with dotall update()
self.assertTrue(pdv)
pdv.update("\n\nfoo=1212\n\n")
self.assertEqual(pdv.get_value(), 12)
self.assertRaises(TypeError,
RegexParameter,
"foo",
r'.*foo=(\d+).*',
lambda match: int(match.group(1)),
lambda x: str(x),
regex_flags="bad flag",
value=12)
def test_format_current(self):
self.param_dict.add("test_format", r'.*foo=(\d+).*',
lambda match: int(match.group(1)),
lambda x: x + 5,
value=10)
self.assertEqual(self.param_dict.format("test_format", 20), 25)
self.assertEqual(self.param_dict.format("test_format"), 15)
self.assertRaises(KeyError,
self.param_dict.format, "bad_name")
def _assert_metadata_change(self):
new_dict = self.param_dict.generate_dict()
log.debug("Generated dictionary: %s", new_dict)
self.assertEqual(new_dict["qut"][ParameterDictKey.DESCRIPTION], "QutFileDesc")
self.assertEqual(new_dict["qut"][ParameterDictKey.DISPLAY_NAME], "QutDisplay")
self.assertEqual(new_dict["qut"][ParameterDictKey.VALUE][ParameterDictKey.UNITS], "QutFileUnits")
self.assertEqual(new_dict["qut"][ParameterDictKey.VALUE][ParameterDictKey.DESCRIPTION], "QutFileValueDesc")
self.assertEqual(new_dict["qut"][ParameterDictKey.VALUE][ParameterDictKey.TYPE], "QutFileType")
# Should come from hard code
# self.assertEqual(new_dict["qut"][ParameterDictKey.DISPLAY_NAME], "QutFileName")
# from base hard code
new_dict = self.param_dict.generate_dict()
self.assertEqual(new_dict["baz"][ParameterDictKey.DESCRIPTION],
"The baz parameter")
self.assertEqual(new_dict["baz"][ParameterDictKey.VALUE][ParameterDictKey.UNITS],
"nano-bazers")
self.assertEqual(new_dict["baz"][ParameterDictKey.VALUE][ParameterDictKey.DESCRIPTION],
"Should be an integer between 2 and 2000")
self.assertEqual(new_dict["baz"][ParameterDictKey.VALUE][ParameterDictKey.TYPE],
ParameterDictType.INT)
self.assertEqual(new_dict["baz"][ParameterDictKey.DISPLAY_NAME], "Baz")
self.assertTrue('extra_param' not in new_dict)
|
|
import sqlite3, uuid, sys, logging, time, os, json, zlib, hashlib, tempfile, multiprocessing
from util import mbtiles_connect, execute_commands_on_tile, process_tile, flip_y, prettify_connect_string
from util_check import check_mbtiles
from multiprocessing import Pool
logger = logging.getLogger(__name__)
def process_tiles(pool, tiles_to_process, con, count, total_tiles, start_time, print_progress, delete_vanished_tiles, known_tile_ids):
tmp_row_list = []
# Execute commands
processed_tiles = pool.map(process_tile, tiles_to_process)
for next_tile in processed_tiles:
tile_data = None
tile_id, tile_file_path, original_size, tile_x, tile_y, tile_z, tile_scale = next_tile['tile_id'], next_tile['filename'], next_tile['size'], next_tile['tile_x'], next_tile['tile_y'], next_tile['tile_z'], next_tile['tile_scale']
if os.path.isfile(tile_file_path):
tmp_file = open(tile_file_path, "r")
tile_data = tmp_file.read()
tmp_file.close()
os.remove(tile_file_path)
if tile_data and len(tile_data) > 0:
m = hashlib.md5()
m.update(tile_data)
new_tile_id = m.hexdigest()
known_tile_ids[tile_id] = new_tile_id
con.insert_tile_to_images(new_tile_id, tile_data)
tmp_row_list.append( (tile_z, tile_x, tile_y, tile_scale, new_tile_id, int(time.time())) )
else:
if delete_vanished_tiles:
logger.debug("Deleting vanished tile %s" % (tile_id, ))
con.expire_tile(tile_z, tile_x, tile_y, tile_scale)
else:
logger.error("tile %s vanished!" % (tile_id, ))
count = count + 1
if (count % 100) == 0:
logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
if print_progress:
sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
sys.stdout.flush()
if len(tmp_row_list) > 0:
con.insert_tiles_to_map(tmp_row_list)
return count
def merge_mbtiles(mbtiles_file1, mbtiles_file2, **kwargs):
scale = kwargs.get('tile_scale', None)
zoom = kwargs.get('zoom', -1)
min_zoom = kwargs.get('min_zoom', 0)
max_zoom = kwargs.get('max_zoom', 18)
tmp_dir = kwargs.get('tmp_dir', None)
auto_commit = kwargs.get('auto_commit', False)
journal_mode = kwargs.get('journal_mode', 'wal')
synchronous_off = kwargs.get('synchronous_off', False)
min_timestamp = kwargs.get('min_timestamp', 0)
max_timestamp = kwargs.get('max_timestamp', 0)
delete_after_export = kwargs.get('delete_after_export', False)
print_progress = kwargs.get('progress', False)
delete_vanished_tiles = kwargs.get('delete_vanished_tiles', False)
flip_tile_y = kwargs.get('flip_y', False)
debug = kwargs.get('debug', False)
if tmp_dir and not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
if zoom >= 0:
min_zoom = max_zoom = zoom
check_before_merge = kwargs.get('check_before_merge', False)
if check_before_merge and not check_mbtiles(mbtiles_file2, **kwargs):
sys.stderr.write("The pre-merge check on %s failed\n" % (mbtiles_file2))
sys.exit(1)
con1 = mbtiles_connect(mbtiles_file1, auto_commit, journal_mode, synchronous_off, False, False)
con2 = mbtiles_connect(mbtiles_file2, auto_commit, journal_mode, synchronous_off, False, True)
con1.mbtiles_setup()
# if not con1.is_compacted():
# sys.stderr.write('To merge two mbtiles databases, the receiver must already be compacted\n')
# con1.close()
# con2.close()
# sys.exit(1)
if not con2.is_compacted() and (min_timestamp != 0 or max_timestamp != 0):
con1.close()
con2.close()
sys.stderr.write('min-timestamp/max-timestamp can only be used with compacted databases.\n')
sys.exit(1)
zoom_level_string = None
if min_zoom == max_zoom:
zoom_level_string = "zoom level %d" % (min_zoom)
else:
zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom)
logger.info("Merging %s --> %s (%s)" % (prettify_connect_string(con2.connect_string), prettify_connect_string(con1.connect_string), zoom_level_string))
# Check that the old and new image formats are the same
original_format = new_format = None
try:
original_format = con1.metadata().get('format')
except:
pass
try:
new_format = con2.metadata().get('format')
except:
pass
if new_format == None:
logger.info("No image format found in the sending database, assuming 'png'")
new_format = "png"
if original_format != None and new_format != original_format:
con1.close()
con2.close()
sys.stderr.write('The files to merge must use the same image format (png or jpg)\n')
sys.exit(1)
if original_format == None and new_format != None:
con1.update_metadata("format", new_format)
if new_format == None:
new_format = original_format
count = 0
start_time = time.time()
chunk = 1000
total_tiles = 1
if print_progress or debug:
total_tiles = con2.tiles_count(min_zoom, max_zoom, min_timestamp, max_timestamp, scale)
if total_tiles == 0:
con1.close()
con2.close()
sys.stderr.write('No tiles to merge, exiting...\n')
return
logger.debug("%d tiles to merge" % (total_tiles))
if print_progress:
sys.stdout.write("%d tiles to merge\n" % (total_tiles))
sys.stdout.write("0 tiles merged (0% @ 0 tiles/sec)")
sys.stdout.flush()
# merge and process (--merge --execute)
if con2.is_compacted() and kwargs['command_list']:
default_pool_size = kwargs.get('poolsize', -1)
if default_pool_size < 1:
default_pool_size = None
logger.debug("Using default pool size")
else:
logger.debug("Using pool size = %d" % (default_pool_size))
pool = Pool(default_pool_size)
multiprocessing.log_to_stderr(logger.level)
tiles_to_process = []
known_tile_ids = {}
for t in con2.tiles_with_tile_id(min_zoom, max_zoom, min_timestamp, max_timestamp, scale):
tile_z = t[0]
tile_x = t[1]
tile_y = t[2]
tile_scale = t[3]
tile_data = str(t[4])
tile_id = t[5]
if flip_tile_y:
tile_y = flip_y(tile_z, tile_y)
new_tile_id = known_tile_ids.get(tile_id)
if new_tile_id is None:
tmp_file_fd, tmp_file_name = tempfile.mkstemp(suffix=".%s" % (new_format), prefix="tile_", dir=tmp_dir)
tmp_file = os.fdopen(tmp_file_fd, "w")
tmp_file.write(tile_data)
tmp_file.close()
tiles_to_process.append({
'tile_id':tile_id,
'filename':tmp_file_name,
'format':new_format,
'size':len(tile_data),
'command_list':kwargs['command_list'],
'tile_x':tile_x,
'tile_y':tile_y,
'tile_z':tile_z,
'tile_scale':tile_scale
})
else:
con1.insert_tile_to_map(tile_z, tile_x, tile_y, tile_scale, new_tile_id)
count = count + 1
if (count % 100) == 0:
logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
if print_progress:
sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
sys.stdout.flush()
if len(tiles_to_process) < chunk:
continue
count = process_tiles(pool, tiles_to_process, con1, count, total_tiles, start_time, print_progress, delete_vanished_tiles, known_tile_ids)
tiles_to_process = []
if len(tiles_to_process) > 0:
count = process_tiles(pool, tiles_to_process, con1, count, total_tiles, start_time, print_progress, delete_vanished_tiles, known_tile_ids)
# merge from a compacted database (--merge)
elif con2.is_compacted():
known_tile_ids = set()
tmp_images_list = []
tmp_row_list = []
tmp_tiles_list = []
for t in con2.tiles_with_tile_id(min_zoom, max_zoom, min_timestamp, max_timestamp, scale):
tile_z = t[0]
tile_x = t[1]
tile_y = t[2]
tile_scale = t[3]
tile_data = str(t[4])
tile_id = t[5]
if flip_tile_y:
tile_y = flip_y(tile_z, tile_y)
if con1.is_compacted():
if tile_id not in known_tile_ids:
tmp_images_list.append( (tile_id, tile_data) )
known_tile_ids.add(tile_id)
tmp_row_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_id, int(time.time())) )
else:
tmp_tiles_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_data, int(time.time())) )
count = count + 1
if (count % 100) == 0:
logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
if print_progress:
sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
sys.stdout.flush()
if len(tmp_images_list) > 250:
con1.insert_tiles_to_images(tmp_images_list)
tmp_images_list = []
if len(tmp_row_list) > 250:
con1.insert_tiles_to_map(tmp_row_list)
tmp_row_list = []
if len(tmp_tiles_list) > 250:
con1.insert_tiles(tmp_tiles_list)
tmp_tiles_list = []
# Push the remaining rows to the database
if len(tmp_images_list) > 0:
con1.insert_tiles_to_images(tmp_images_list)
if len(tmp_row_list) > 0:
con1.insert_tiles_to_map(tmp_row_list)
if len(tmp_tiles_list) > 0:
con1.insert_tiles(tmp_tiles_list)
# merge an uncompacted database (--merge)
else:
known_tile_ids = set()
tmp_images_list = []
tmp_row_list = []
tmp_tiles_list = []
for t in con2.tiles(min_zoom, max_zoom, min_timestamp, max_timestamp, scale):
tile_z = t[0]
tile_x = t[1]
tile_y = t[2]
tile_scale = t[3]
tile_data = str(t[4])
if flip_tile_y:
tile_y = flip_y(tile_z, tile_y)
# Execute commands
if kwargs.get('command_list'):
tile_data = execute_commands_on_tile(kwargs['command_list'], new_format, tile_data, tmp_dir)
if con1.is_compacted():
m = hashlib.md5()
m.update(tile_data)
tile_id = m.hexdigest()
if tile_id not in known_tile_ids:
tmp_images_list.append( (tile_id, tile_data) )
known_tile_ids.add(tile_id)
tmp_row_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_id, int(time.time())) )
else:
tmp_tiles_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_data, int(time.time())) )
count = count + 1
if (count % 100) == 0:
logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
if print_progress:
sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))
sys.stdout.flush()
if len(tmp_images_list) > 250:
con1.insert_tiles_to_images(tmp_images_list)
tmp_images_list = []
if len(tmp_row_list) > 250:
con1.insert_tiles_to_map(tmp_row_list)
tmp_row_list = []
if len(tmp_tiles_list) > 250:
con1.insert_tiles(tmp_tiles_list)
tmp_tiles_list = []
# Push the remaining rows to the database
if len(tmp_images_list) > 0:
con1.insert_tiles_to_images(tmp_images_list)
if len(tmp_row_list) > 0:
con1.insert_tiles_to_map(tmp_row_list)
if len(tmp_tiles_list) > 0:
con1.insert_tiles(tmp_tiles_list)
if print_progress:
sys.stdout.write('\n')
logger.info("%d tiles merged (100.0%% @ %.1f tiles/sec)" % (count, count / (time.time() - start_time)))
if print_progress:
sys.stdout.write("%d tiles merged (100.0%% @ %.1f tiles/sec)\n" % (count, count / (time.time() - start_time)))
sys.stdout.flush()
if delete_after_export:
logger.debug("WARNING: Removing merged tiles from %s" % (mbtiles_file2))
con2.delete_tiles(min_zoom, max_zoom, min_timestamp, max_timestamp, scale)
con2.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False))
con1.close()
con2.close()
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
TODO: module docs
"""
import sys
import os
import stat
try:
import pgdb
from gppylib.commands.unix import UserId
except ImportError, e:
sys.exit('Error: unable to import module: ' + str(e))
from gppylib import gplog
logger = gplog.get_default_logger()
class ConnectionError(StandardError): pass
class Pgpass():
""" Class for handling .pgpass file.
"""
entries = []
valid_pgpass = True
def __init__(self):
HOME = os.getenv('HOME')
PGPASSFILE = os.getenv('PGPASSFILE', '%s/.pgpass' % HOME)
if not os.path.exists(PGPASSFILE):
return
st_info = os.stat(PGPASSFILE)
mode = str(oct(st_info[stat.ST_MODE] & 0777))
if mode != "0600":
print 'WARNING: password file "%s" has group or world access; permissions should be u=rw (0600) or less' % PGPASSFILE
self.valid_pgpass = False
return
try:
fp = open(PGPASSFILE, 'r')
try:
lineno = 1
for line in fp:
line = line.strip()
if line.startswith('#'):
continue
try:
(hostname, port, database, username, password) = line.strip().split(':')
entry = {'hostname': hostname,
'port': port,
'database': database,
'username': username,
'password': password }
self.entries.append(entry)
except:
print 'Invalid line in .pgpass file. Line number %d' % lineno
lineno += 1
except IOError:
pass
finally:
if fp: fp.close()
except OSError:
pass
def get_password(self, username, hostname, port, database):
for entry in self.entries:
if ((entry['hostname'] == hostname or entry['hostname'] == '*') and
(entry['port'] == str(port) or entry['port'] == '*') and
(entry['database'] == database or entry['database'] == '*') and
(entry['username'] == username or entry['username'] == '*')):
return entry['password']
return None
def pgpass_valid(self):
return self.valid_pgpass
class DbURL:
""" DbURL is used to store all of the data required to get at a PG
or GP database.
"""
pghost='foo'
pgport=5432
pgdb='template1'
pguser='username'
pgpass='pass'
timeout=None
retries=None
def __init__(self,hostname=None,port=0,dbname=None,username=None,password=None,timeout=None,retries=None):
if hostname is None:
self.pghost = os.environ.get('PGHOST', 'localhost')
else:
self.pghost = hostname
if port is 0:
self.pgport = int(os.environ.get('PGPORT', '5432'))
else:
self.pgport = int(port)
if dbname is None:
self.pgdb = os.environ.get('PGDATABASE', 'template1')
else:
self.pgdb = dbname
if username is None:
self.pguser = os.environ.get('PGUSER', os.environ.get('USER', UserId.local('Get uid')))
if self.pguser is None or self.pguser == '':
raise Exception('Both $PGUSER and $USER env variables are not set!')
else:
self.pguser = username
if password is None:
pgpass = Pgpass()
if pgpass.pgpass_valid():
password = pgpass.get_password(self.pguser, self.pghost, self.pgport, self.pgdb)
if password:
self.pgpass = password
else:
self.pgpass = os.environ.get('PGPASSWORD', None)
else:
self.pgpass = password
if timeout is not None:
self.timeout = int(timeout)
if retries is None:
self.retries = 1
else:
self.retries = int(retries)
def __str__(self):
# MPP-13617
def canonicalize(s):
if ':' not in s: return s
return '[' + s + ']'
return "%s:%d:%s:%s:%s" % \
(canonicalize(self.pghost),self.pgport,self.pgdb,self.pguser,self.pgpass)
def connect(dburl, utility=False, verbose=False, encoding=None, allowSystemTableMods=None, upgrade=False):
if utility:
options = '-c gp_session_role=utility'
else:
options = ''
# MPP-13779, et al
if allowSystemTableMods in ['dml', 'ddl', 'all']:
options += ' -c allow_system_table_mods=' + allowSystemTableMods
elif allowSystemTableMods is not None:
raise Exception('allowSystemTableMods invalid: %s' % allowSystemTableMods)
# gpmigrator needs gpstart to make master connection in maintenance mode
if upgrade:
options += ' -c gp_maintenance_conn=true'
# bypass pgdb.connect() and instead call pgdb._connect_
# to avoid silly issues with : in ipv6 address names and the url string
#
dbbase = dburl.pgdb
dbhost = dburl.pghost
dbport = int(dburl.pgport)
dbopt = options
dbtty = "1"
dbuser = dburl.pguser
dbpasswd = dburl.pgpass
timeout = dburl.timeout
cnx = None
# MPP-14121, use specified connection timeout
#
if timeout is not None:
cstr = "dbname=%s connect_timeout=%s" % (dbbase, timeout)
retries = dburl.retries
else:
cstr = "dbname=%s" % dbbase
retries = 1
(logger.info if timeout is not None else logger.debug)("Connecting to %s" % cstr)
for i in range(retries):
try:
cnx = pgdb._connect_(cstr, dbhost, dbport, dbopt, dbtty, dbuser, dbpasswd)
break
except pgdb.InternalError, e:
if 'timeout expired' in str(e):
logger.warning('Timeout expired connecting to %s, attempt %d/%d' % (dbbase, i+1, retries))
continue
raise
if cnx is None:
raise ConnectionError('Failed to connect to %s' % dbbase)
conn = pgdb.pgdbCnx(cnx)
#by default, libpq will print WARNINGS to stdout
if not verbose:
cursor=conn.cursor()
cursor.execute("SET CLIENT_MIN_MESSAGES='ERROR'")
conn.commit()
cursor.close()
# set client encoding if needed
if encoding:
cursor=conn.cursor()
cursor.execute("SET CLIENT_ENCODING='%s'" % encoding)
conn.commit()
cursor.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
conn.__class__.__enter__, conn.__class__.__exit__ = __enter__, __exit__
return conn
def execSQL(conn,sql):
"""
If necessary, user must invoke conn.commit().
Do *NOT* violate that API here without considering
the existing callers of this function.
"""
cursor=conn.cursor()
cursor.execute(sql)
return cursor
def execSQLForSingletonRow(conn, sql):
"""
Run SQL that returns exactly one row, and return that one row
TODO: Handle like gppylib.system.comfigurationImplGpdb.fetchSingleOutputRow().
In the event of the wrong number of rows/columns, some logging would be helpful...
"""
cursor=conn.cursor()
cursor.execute(sql)
if cursor.rowcount != 1 :
raise UnexpectedRowsError(1, cursor.rowcount, sql)
res = cursor.fetchall()[0]
cursor.close()
return res
class UnexpectedRowsError(Exception):
def __init__(self, expected, actual, sql):
self.expected, self.actual, self.sql = expected, actual, sql
Exception.__init__(self, "SQL retrieved %d rows but %d was expected:\n%s" % \
(self.actual, self.expected, self.sql))
def execSQLForSingleton(conn, sql):
"""
Run SQL that returns exactly one row and one column, and return that cell
TODO: Handle like gppylib.system.comfigurationImplGpdb.fetchSingleOutputRow().
In the event of the wrong number of rows/columns, some logging would be helpful...
"""
row = execSQLForSingletonRow(conn, sql)
if len(row) > 1:
raise Exception("SQL retrieved %d columns but 1 was expected:\n%s" % \
(len(row), sql))
return row[0]
def executeUpdateOrInsert(conn, sql, expectedRowUpdatesOrInserts):
cursor=conn.cursor()
cursor.execute(sql)
if cursor.rowcount != expectedRowUpdatesOrInserts :
raise Exception("SQL affected %s rows but %s were expected:\n%s" % \
(cursor.rowcount, expectedRowUpdatesOrInserts, sql))
return cursor
|
|
#!/usr/bin/env python
# encoding: utf-8
import copy
import cadnano.util as util
from cadnano.strand import Strand
from cadnano.cnproxy import ProxyObject, ProxySignal, UndoCommand
from .applycolorcmd import ApplyColorCommand
from .applysequencecmd import ApplySequenceCommand
from .removeoligocmd import RemoveOligoCommand
class Oligo(ProxyObject):
"""
Oligo is a group of Strands that are connected via 5' and/or 3'
connections. It corresponds to the physical DNA strand, and is thus
used tracking and storing properties that are common to a single strand,
such as its color.
Commands that affect Strands (e.g. create, remove, merge, split) are also
responsible for updating the affected Oligos.
"""
def __init__(self, part, color=None):
# self.__class__.__base__.__init__(self, part)
super(Oligo, self).__init__(part)
self._part = part
self._strand5p = None
self._length = 0
self._is_loop = False
self._color = color if color else "#0066cc"
# end def
def __repr__(self):
cls_name = self.__class__.__name__
olg_id = str(id(self))[-4:]
if self._strand5p is not None:
strand_type = "Stap" if self.isStaple() else "Scaf"
vh_num = self._strand5p.strandSet().virtualHelix().number()
idx = self._strand5p.idx5Prime()
else:
strand_type = "None"
vh_num = -1
idx = -1
return "<%s %s>(%s %d[%d])" % (cls_name, olg_id, strand_type, vh_num, idx)
def shallowCopy(self):
olg = Oligo(self._part)
olg._strand5p = self._strand5p
olg._length = self._length
olg._is_loop = self._is_loop
olg._color = self._color
return olg
# end def
def deepCopy(self, part):
olg = Oligo(part)
olg._strand5p = None
olg._length = self._length
olg._is_loop = self._is_loop
olg._color = self._color
return olg
# end def
### SIGNALS ###
oligoIdentityChangedSignal = ProxySignal(ProxyObject, name='oligoIdentityChangedSignal') # new oligo
oligoAppearanceChangedSignal = ProxySignal(ProxyObject, name='oligoAppearanceChangedSignalpyqtSignal') # self
oligoSequenceAddedSignal = ProxySignal(ProxyObject, name='oligoSequenceAddedSignal') # self
oligoSequenceClearedSignal = ProxySignal(ProxyObject, name='oligoSequenceClearedSignal') # self
### SLOTS ###
### ACCESSORS ###
def color(self):
return self._color
# end def
def locString(self):
vh_num = self._strand5p.strandSet().virtualHelix().number()
idx = self._strand5p.idx5Prime()
return "%d[%d]" % (vh_num, idx)
# end def
def part(self):
return self._part
# end def
def strand5p(self):
return self._strand5p
# end def
def setStrand5p(self, strand):
self._strand5p = strand
# end def
def undoStack(self):
return self._part.undoStack()
# end def
### PUBLIC METHODS FOR QUERYING THE MODEL ###
def isLoop(self):
return self._is_loop
def isStaple(self):
if self._strand5p is not None:
return self._strand5p.isStaple()
else:
return False
def length(self):
return self._length
# end def
def sequence(self):
temp = self.strand5p()
if not temp:
return None
if temp.sequence():
return ''.join([Strand.sequence(strand) \
for strand in self.strand5p().generator3pStrand()])
else:
return None
# end def
def sequenceExport(self):
part = self.part()
vh_num5p = self.strand5p().virtualHelix().number()
strand5p = self.strand5p()
idx5p = strand5p.idx5Prime()
seq = ''
if self.isLoop():
# print("A loop exists")
raise Exception
for strand in strand5p.generator3pStrand():
seq = seq + Strand.sequence(strand, for_export=True)
if strand.connection3p() == None: # last strand in the oligo
vh_num3p = strand.virtualHelix().number()
idx3p = strand.idx3Prime()
modseq5p, modseq5p_name = part.getModSequence(strand5p, idx5p, 0)
modseq3p, modseq3p_name = part.getModSequence(strand, idx3p, 1)
seq = modseq5p + seq + modseq3p
output = "%d[%d],%d[%d],%s,%s,%s,%s,%s\n" % \
(vh_num5p, idx5p, vh_num3p, idx3p, seq, len(seq),
self._color, modseq5p_name, modseq3p_name)
return output
# end def
def shouldHighlight(self):
if not self._strand5p:
return False
if self._strand5p.isScaffold():
return False
if self.length() < 18:
return True
if self.length() > 50:
return True
return False
# end def
### PUBLIC METHODS FOR EDITING THE MODEL ###
def remove(self, use_undostack=True):
c = RemoveOligoCommand(self)
util.execCommandList(self, [c], desc="Color Oligo", use_undostack=use_undostack)
# end def
def applyColor(self, color, use_undostack=True):
if color == self._color:
return # oligo already has color
c = ApplyColorCommand(self, color)
util.execCommandList(self, [c], desc="Color Oligo", use_undostack=use_undostack)
# end def
def applySequence(self, sequence, use_undostack=True):
c = ApplySequenceCommand(self, sequence)
util.execCommandList(self, [c], desc="Apply Sequence", use_undostack=use_undostack)
# end def
def applySequenceCMD(self, sequence, use_undostack=True):
return ApplySequenceCommand(self, sequence)
# end def
def setLoop(self, bool):
self._is_loop = bool
### PUBLIC SUPPORT METHODS ###
def addToPart(self, part):
self._part = part
self.setParent(part)
part.addOligo(self)
# end def
def destroy(self):
# QObject also emits a destroyed() Signal
self.setParent(None)
self.deleteLater()
# end def
def decrementLength(self, delta):
self.setLength(self._length-delta)
# end def
def incrementLength(self, delta):
self.setLength(self._length+delta)
# end def
def refreshLength(self):
temp = self.strand5p()
if not temp:
return
length = 0
for strand in temp.generator3pStrand():
length += strand.totalLength()
self.setLength(length)
# end def
def removeFromPart(self):
"""
This method merely disconnects the object from the model.
It still lives on in the undoStack until clobbered
Note: don't set self._part = None because we need to continue passing
the same reference around.
"""
self._part.removeOligo(self)
self.setParent(None)
# end def
def setColor(self, color):
self._color = color
# end def
def setLength(self, length):
before = self.shouldHighlight()
self._length = length
if before != self.shouldHighlight():
self.oligoSequenceClearedSignal.emit(self)
self.oligoAppearanceChangedSignal.emit(self)
# end def
def strandMergeUpdate(self, old_strand_low, old_strand_high, new_strand):
"""
This method sets the isLoop status of the oligo and the oligo's
5' strand.
"""
# check loop status
if old_strand_low.oligo() == old_strand_high.oligo():
self._is_loop = True
self._strand5p = new_strand
return
# leave the _strand5p as is?
# end if
# Now get correct 5p end to oligo
if old_strand_low.isDrawn5to3():
if old_strand_low.connection5p() is not None:
self._strand5p = old_strand_low.oligo()._strand5p
else:
self._strand5p = new_strand
else:
if old_strand_high.connection5p() is not None:
self._strand5p = old_strand_high.oligo()._strand5p
else:
self._strand5p = new_strand
# end if
# end def
def strandResized(self, delta):
"""
Called by a strand after resize. Delta is used to update the length,
which may case an appearance change.
"""
pass
# end def
def strandSplitUpdate(self, new_strand5p, new_strand3p, oligo3p, old_merged_strand):
"""
If the oligo is a loop, splitting the strand does nothing. If the
oligo isn't a loop, a new oligo must be created and assigned to the
new_strand and everything connected to it downstream.
"""
# if you split it can't be a loop
self._is_loop = False
if old_merged_strand.oligo().isLoop():
self._strand5p = new_strand3p
return
else:
if old_merged_strand.connection5p() == None:
self._strand5p = new_strand5p
else:
self._strand5p = old_merged_strand.oligo()._strand5p
oligo3p._strand5p = new_strand3p
# end else
# end def
# end class
|
|
from __future__ import absolute_import
import email
import logging
import re
import time
import warnings
from collections import namedtuple
from itertools import takewhile
from ..exceptions import (
ConnectTimeoutError,
InvalidHeader,
MaxRetryError,
ProtocolError,
ProxyError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple(
"RequestHistory", ["method", "url", "error", "status", "redirect_location"]
)
# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
_Default = object()
class _RetryMeta(type):
@property
def DEFAULT_METHOD_WHITELIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_METHODS_ALLOWED' instead",
DeprecationWarning,
)
return cls.DEFAULT_ALLOWED_METHODS
@DEFAULT_METHOD_WHITELIST.setter
def DEFAULT_METHOD_WHITELIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
DeprecationWarning,
)
cls.DEFAULT_ALLOWED_METHODS = value
@property
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
@DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
@six.add_metaclass(_RetryMeta)
class Retry(object):
"""Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param int other:
How many times to retry on other errors.
Other errors are errors that are not connect, read, redirect or status errors.
These errors might be raised after the request was sent to the server, so the
request might have side-effects.
Set to ``0`` to fail on the first retry of this type.
If ``total`` is not set, it's a good idea to set this to 0 to account
for unexpected edge cases and avoid infinite retry loops.
:param iterable allowed_methods:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
Set to a ``False`` value to retry on any verb.
.. warning::
Previously this parameter was named ``method_whitelist``, that
usage is deprecated in v1.26.0 and will be removed in v2.0.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``allowed_methods``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
#: Default methods to be used for ``allowed_methods``
DEFAULT_ALLOWED_METHODS = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
#: Default status codes to be used for ``status_forcelist``
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Default headers to be used for ``remove_headers_on_redirect``
DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(
self,
total=10,
connect=None,
read=None,
redirect=None,
status=None,
other=None,
allowed_methods=_Default,
status_forcelist=None,
backoff_factor=0,
raise_on_redirect=True,
raise_on_status=True,
history=None,
respect_retry_after_header=True,
remove_headers_on_redirect=_Default,
# TODO: Deprecated, remove in v2.0
method_whitelist=_Default,
):
if method_whitelist is not _Default:
if allowed_methods is not _Default:
raise ValueError(
"Using both 'allowed_methods' and "
"'method_whitelist' together is not allowed. "
"Instead only use 'allowed_methods'"
)
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
stacklevel=2,
)
allowed_methods = method_whitelist
if allowed_methods is _Default:
allowed_methods = self.DEFAULT_ALLOWED_METHODS
if remove_headers_on_redirect is _Default:
remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
self.total = total
self.connect = connect
self.read = read
self.status = status
self.other = other
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.allowed_methods = allowed_methods
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
[h.lower() for h in remove_headers_on_redirect]
)
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
other=self.other,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
)
# TODO: If already given in **kw we use what's given to us
# If not given we need to figure out what to pass. We decide
# based on whether our class has the 'method_whitelist' property
# and if so we pass the deprecated 'method_whitelist' otherwise
# we use 'allowed_methods'. Remove in v2.0
if "method_whitelist" not in kw and "allowed_methods" not in kw:
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
params["method_whitelist"] = self.allowed_methods
else:
params["allowed_methods"] = self.allowed_methods
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
"""Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate_tz(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
if retry_date_tuple[9] is None: # Python 2
# Assume UTC if no timezone was specified
# On Python2.7, parsedate_tz returns None for a timezone offset
# instead of 0 if no timezone is given, where mktime_tz treats
# a None timezone offset as local time.
retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
retry_date = email.utils.mktime_tz(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
"""Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
"""Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
"""Checks if a given HTTP method should be retried upon, depending if
it is included in the allowed_methods
"""
# TODO: For now favor if the Retry implementation sets its own method_whitelist
# property outside of our constructor to avoid breaking custom implementations.
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
allowed_methods = self.method_whitelist
else:
allowed_methods = self.allowed_methods
if allowed_methods and method.upper() not in allowed_methods:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
"""Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (
self.total,
self.connect,
self.read,
self.redirect,
self.status,
self.other,
)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method=None,
url=None,
response=None,
error=None,
_pool=None,
_stacktrace=None,
):
"""Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
other = self.other
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif error:
# Other retry?
if other is not None:
other -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and the given method is in the allowed_methods
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
other=other,
history=history,
)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return (
"{cls.__name__}(total={self.total}, connect={self.connect}, "
"read={self.read}, redirect={self.redirect}, status={self.status})"
).format(cls=type(self), self=self)
def __getattr__(self, item):
if item == "method_whitelist":
# TODO: Remove this deprecated alias in v2.0
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
return self.allowed_methods
try:
return getattr(super(Retry, self), item)
except AttributeError:
return getattr(Retry, item)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
|
#!/usr/bin/env python
import sys
import os
import dropbox
import subprocess
import requests
import json
from dropbox.exceptions import AuthError
import yaml
import pipes
class Config(object):
def __init__(self):
self.homedir = os.path.expanduser("~")
self.our_dir = os.path.join(self.homedir, ".dropblame")
self.config_path = os.path.join(self.our_dir, "config.yml")
self.storage_dir = os.path.join(self.our_dir, "storage")
self.dropbox_dir = None
self.token = None
if not os.path.exists(self.config_path):
print("Creating new config file at ~/.dropblame/config.yml")
if not os.path.exists(self.our_dir):
os.makedirs(self.our_dir)
self.save_config()
self.load_config()
def read_dropbox_dir(self):
while self.dropbox_dir is None:
path = raw_input("Please enter the path to your Dropbox " +
"directory (default: ~/Dropbox): ").strip()
if path == '':
path = "~/Dropbox"
if not os.path.exists(os.path.expanduser(path)):
print("No directory could be found at {0}".format(path))
continue
self.dropbox_dir = os.path.expanduser(path)
def read_token(self):
print("\nTo link this to Dropbox, you will first need to generate " +
"an access token: https://blogs.dropbox.com/developers/2014/"
"05/generate-an-access-token-for-your-own-account/")
while self.token is None:
token = raw_input("Enter your token here: ").strip()
if token == '':
continue
print("Testing your token now...")
dbx = dropbox.Dropbox(token)
try:
dbx.users_get_current_account()
except AuthError:
print("ERROR: Invalid access token. Please try again!")
continue
print("Token looks good, thanks!")
self.token = token
def load_config(self):
data = {}
with open(self.config_path, 'r') as f:
data = yaml.load(f.read())
if 'dropbox_dir' in data:
self.dropbox_dir = data['dropbox_dir']
else:
self.read_dropbox_dir()
if 'token' in data:
self.token = data['token']
else:
self.read_token()
self.save_config()
def save_config(self):
data = {}
if self.dropbox_dir is not None:
data['dropbox_dir'] = self.dropbox_dir
if self.token is not None:
data['token'] = self.token
yaml_text = yaml.dump(data, default_flow_style=False)
with open(self.config_path, 'w') as f:
f.write(yaml_text)
def cmd(line, cwd=None):
p = subprocess.Popen(line, shell=True, cwd=cwd, stdout=subprocess.PIPE)
return p.communicate()[0]
# Convert the revision history of a given file into a git repository
def sync_repo(filepath):
basename = os.path.basename(filepath)
relpath = os.path.relpath(os.path.realpath(filepath),
os.path.realpath(config.dropbox_dir))
gitdir = os.path.join(config.storage_dir, relpath)
if not os.path.exists(gitdir):
os.makedirs(gitdir)
revs = [entry.rev for entry in
dbx.files_list_revisions("/"+relpath, limit=100).entries]
revs.reverse()
current_revs = []
if os.path.exists(os.path.join(gitdir, ".git")):
current_revs = cmd("git log --format=%B", gitdir).split()
else:
cmd("git init", gitdir)
# As we find more user ids who contributed to the file, we
# request and cache their info here
userinfo = {}
missing_revs = [rev for rev in revs if rev not in current_revs]
if len(missing_revs) > 0:
print("Found {0} new revisions to download for {1}".
format(len(missing_revs), relpath))
i = 0
for rev in missing_revs:
i += 1
localpath = os.path.join(gitdir, basename)
revpath = "rev:{0}".format(rev)
print("{0}/{1} Fetching revision {2}".
format(i, len(missing_revs), rev))
# Bypass dropbox python package due to missing sharing_info
# https://github.com/dropbox/dropbox-sdk-python/issues/40
r = requests.post(
"https://api.dropboxapi.com/2/files/get_metadata",
headers={'Authorization': "Bearer {0}".format(config.token),
'Content-Type': "application/json"},
data=json.dumps({'path': revpath}))
meta = json.loads(r.text)
author_name = "You"
if 'sharing_info' in meta:
author_id = meta['sharing_info']['modified_by']
if author_id not in userinfo:
userinfo[author_id] = dbx.users_get_account(author_id)
author_name = userinfo[author_id].name.display_name
dbx.files_download_to_file(localpath, revpath)
cmd(("git add -A . && git commit -m {0} --author=\"{1} " +
"<[email protected]>\" --date=\"{2}\"").
format(rev, pipes.quote(author_name), meta['client_modified']),
gitdir)
return gitdir
def print_usage():
usage = """
USAGE
{0} blame /path/to/Dropbox/file
Syncs Dropbox revisions to a git repo and runs git blame. Any additional
arguments will be passed to git blame.
{1} cd /path/to/Dropbox/file
Syncs Dropbox revisions to a git repo and then opens a shell there, if
you want to run diff or other operations. Note that the repo is readonly.
---
The first time you run drop you will be asked for configuration details to
connect to Dropbox, which will be stored in ~/.dropblame/config.yml.
Note that this tool can only go back as far as the Dropbox API will allow,
which is currently 100 revisions.
""".format(os.path.basename(sys.argv[0]),
os.path.basename(sys.argv[0])).strip()
print(usage)
def main():
global config, dbx
config = Config()
dbx = dropbox.Dropbox(config.token)
if len(sys.argv) < 3:
print_usage()
sys.exit(1)
if sys.argv[1] == "help":
print_usage()
sys.exit(0)
if sys.argv[1] not in ["blame", "cd"]:
print_usage()
sys.exit(1)
path = os.path.expanduser(sys.argv[2])
if not os.path.exists(path):
print("cannot access {0}: No such file or directory".
format(sys.argv[2]))
sys.exit(1)
gitdir = sync_repo(path)
if sys.argv[1] == "cd":
p = subprocess.Popen("$SHELL", shell=True, cwd=gitdir)
p.wait()
else:
cmd = ['git', 'blame', os.path.basename(path)] + sys.argv[3:]
p = subprocess.Popen(cmd, cwd=gitdir)
p.wait()
if __name__ == '__main__':
main()
|
|
import warnings
from io import StringIO
from django.template.base import Lexer, TokenType
from django.utils.regex_helper import _lazy_re_compile
from . import TranslatorCommentWarning, trim_whitespace
TRANSLATOR_COMMENT_MARK = 'Translators'
dot_re = _lazy_re_compile(r'\S')
def blankout(src, char):
"""
Change every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = _lazy_re_compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = _lazy_re_compile(
# Match the trans/translate 'some text' part.
r"""^\s*trans(?:late)?\s+((?:"[^"]*?")|(?:'[^']*?'))"""
# Match and ignore optional filters
r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
# Match the optional context part
r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
)
block_re = _lazy_re_compile(r"""^\s*blocktrans(?:late)?(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = _lazy_re_compile(r"""^\s*endblocktrans(?:late)?$""")
plural_re = _lazy_re_compile(r"""^\s*plural$""")
constant_re = _lazy_re_compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turn a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
out = StringIO('')
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
# Adding the u prefix allows gettext to recognize the string (#26093).
raw_prefix = 'u'
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src).tokenize():
if incomment:
if t.token_type == TokenType.BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TokenType.BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext({p}{!r}, {p}{!r}, {p}{!r},count) '.format(
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
else:
out.write(' ngettext({p}{!r}, {p}{!r}, count) '.format(
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context,
join_tokens(singular, trimmed),
p=raw_prefix,
))
else:
out.write(' gettext({p}{!r}) '.format(
join_tokens(singular, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError(
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
elif t.token_type == TokenType.VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TokenType.TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TokenType.COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = (
"The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't "
"the last item on the line."
) % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TokenType.BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch[1]
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = g.replace('%', '%%')
if imatch[2]:
# A context is provided
context_match = context_re.match(imatch[2])
message_context = context_match[1]
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context, g, p=raw_prefix
))
message_context = None
else:
out.write(' gettext({p}{!r}) '.format(g, p=raw_prefix))
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch[1]:
# A context is provided
context_match = context_re.match(bmatch[1])
message_context = context_match[1]
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TokenType.VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch[1])
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TokenType.COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
|
|
import os, multiprocessing, subprocess
from runner import BrowserCore, path_from_root
from tools.shared import *
def clean_pids(pids):
import signal, errno
def pid_exists(pid):
try:
# NOTE: may just kill the process in Windows
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def kill_pids(pids, sig):
for pid in pids:
if not pid_exists(pid):
break
print '[killing %d]' % pid
try:
os.kill(pid, sig)
print '[kill succeeded]'
except:
print '[kill fail]'
# ask nicely (to try and catch the children)
kill_pids(pids, signal.SIGTERM)
time.sleep(1)
# extreme prejudice, may leave children
kill_pids(pids, signal.SIGKILL)
def make_relay_server(port1, port2):
print >> sys.stderr, 'creating relay server on ports %d,%d' % (port1, port2)
proc = Popen([PYTHON, path_from_root('tests', 'sockets', 'socket_relay.py'), str(port1), str(port2)])
return proc
class WebsockifyServerHarness:
def __init__(self, filename, args, listen_port):
self.pids = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port-1
self.args = args or []
def __enter__(self):
import socket, websockify
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
Popen([CLANG_CC, path_from_root('tests', self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + get_clang_native_args() + self.args).communicate()
process = Popen([os.path.abspath('server')])
self.pids.append(process.pid)
# start the websocket proxy
print >> sys.stderr, 'running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.pids.append(self.websockify.pid)
print '[Websockify on process %s]' % str(self.pids[-2:])
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_pids(self.pids)
class CompiledServerHarness:
def __init__(self, filename, args, listen_port):
self.pids = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
child = Popen(NODE_JS + ['-e', 'require("ws");'])
child.communicate()
assert child.returncode == 0, 'ws module for Node.js not installed. Please run \'npm install\' from %s' % EMSCRIPTEN_ROOT
# compile the server
Popen([PYTHON, EMCC, path_from_root('tests', self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args).communicate()
process = Popen(NODE_JS + ['server.js'])
self.pids.append(process.pid)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_pids(self.pids)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# NOTE all datagram tests are temporarily disabled, as
# we can't truly test datagram sockets until we have
# proper listen server support.
def filter_harnesses(harnesses):
# XXX avoid websockify for now due to intermittent errors. see issue #2700
return filter(lambda harness: (harness[0].__class__ if type(harness) is tuple else harness.__class__) is not WebsockifyServerHarness, harnesses)
class sockets(BrowserCore):
def test_inet(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x,%x,%x*\n", htonl(0xa1b2c3d4), htonl(0xfe3572e0), htonl(0x07abcdf0), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
in_addr_t i = inet_addr("190.180.10.78");
printf("%x\n", i);
return 0;
}
'''
self.do_run(src, '*d4c3b2a1,e07235fe,f0cdab07,cdab,34122143,afbe*\n4e0ab4be\n')
def test_inet2(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntoa(x));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntoa(x2));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet3(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
int main() {
char dst[64];
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntop(AF_INET,&x,dst,sizeof dst));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntop(AF_INET,&x2,dst,sizeof dst));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet4(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
void test(char *test_addr){
char str[40];
struct in6_addr addr;
unsigned char *p = (unsigned char*)&addr;
int ret;
ret = inet_pton(AF_INET6,test_addr,&addr);
if(ret == -1) return;
if(ret == 0) return;
if(inet_ntop(AF_INET6,&addr,str,sizeof(str)) == NULL ) return;
printf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x - %s\n",
p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15],str);
}
int main(){
test("::");
test("::1");
test("::1.2.3.4");
test("::17.18.19.20");
test("::ffff:1.2.3.4");
test("1::ffff");
test("::255.255.255.255");
test("0:ff00:1::");
test("0:ff::");
test("abcd::");
test("ffff::a");
test("ffff::a:b");
test("ffff::a:b:c");
test("ffff::a:b:c:d");
test("ffff::a:b:c:d:e");
test("::1:2:0:0:0");
test("0:0:1:2:3::");
test("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
test("1::255.255.255.255");
//below should fail and not produce results..
test("1.2.3.4");
test("");
test("-");
}
'''
self.do_run(src,
"0000:0000:0000:0000:0000:0000:0000:0000 - ::\n"
"0000:0000:0000:0000:0000:0000:0000:0001 - ::1\n"
"0000:0000:0000:0000:0000:0000:0102:0304 - ::1.2.3.4\n"
"0000:0000:0000:0000:0000:0000:1112:1314 - ::17.18.19.20\n"
"0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4\n"
"0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff\n"
"0000:0000:0000:0000:0000:0000:ffff:ffff - ::255.255.255.255\n"
"0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::\n"
"0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::\n"
"abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::\n"
"ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a\n"
"ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b\n"
"ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c\n"
"ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d\n"
"ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e\n"
"0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0\n"
"0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::\n"
"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\n"
"0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff\n"
)
def test_getaddrinfo(self):
self.emcc_args=[]
self.do_run(open(path_from_root('tests', 'sockets', 'test_getaddrinfo.c')).read(), 'success')
def test_getnameinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getnameinfo.c')).read(), 'success')
def test_gethostbyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_gethostbyname.c')).read(), 'success')
def test_getprotobyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getprotobyname.c')).read(), 'success')
def test_sockets_echo(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sockets_async_echo(self):
# Run with ./runner.py sockets.test_sockets_async_echo
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49165), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49166), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49167), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49168), 0)
]
#harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256*256*2):
message += str(unichr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = path_from_root('tests', 'sockets', 'test_sockets_echo_client.c')
input = open(input_filename).read()
output = input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49170), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49171), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49172), 1)
]
harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(output, expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], force_c=True)
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_partial_client.c'), expected='165', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
def test_enet(self):
try_delete(self.in_dir('enet'))
shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
pwd = os.getcwd()
os.chdir(self.in_dir('enet'))
Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
os.chdir(pwd)
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
# This test is no longer in use for WebSockets as we can't truly emulate
# a server in the browser (in the past, there were some hacks to make it
# somewhat work, but those have been removed). However, with WebRTC it
# should be able to resurect this test.
# def test_enet_in_browser(self):
# try_delete(self.in_dir('enet'))
# shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
# pwd = os.getcwd()
# os.chdir(self.in_dir('enet'))
# Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
# Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
# enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
# os.chdir(pwd)
# Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_enet_server.c'), '-o', 'server.html', '-DSOCKK=2235'] + enet).communicate()
# with WebsockifyServerHarness('', [], 2235, 2234):
# with WebsockifyServerHarness('', [], 2237, 2236):
# pids = []
# try:
# proc = make_relay_server(2234, 2236)
# pids.append(proc.pid)
# self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=['-DSOCKK=2237', '-DUSE_IFRAME=1'] + enet)
# finally:
# clean_pids(pids);
def zzztest_webrtc(self): # XXX see src/settings.js, this is disabled pending investigation
host_src = 'webrtc_host.c'
peer_src = 'webrtc_peer.c'
host_outfile = 'host.html'
peer_outfile = 'peer.html'
host_filepath = path_from_root('tests', 'sockets', host_src)
temp_host_filepath = os.path.join(self.get_dir(), os.path.basename(host_src))
with open(host_filepath) as f: host_src = f.read()
with open(temp_host_filepath, 'w') as f: f.write(self.with_report_result(host_src))
peer_filepath = path_from_root('tests', 'sockets', peer_src)
temp_peer_filepath = os.path.join(self.get_dir(), os.path.basename(peer_src))
with open(peer_filepath) as f: peer_src = f.read()
with open(temp_peer_filepath, 'w') as f: f.write(self.with_report_result(peer_src))
open(os.path.join(self.get_dir(), 'host_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: undefined,
onpeer: function(peer, route) {
window.open('http://localhost:8888/peer.html?' + route);
// iframe = document.createElement("IFRAME");
// iframe.setAttribute("src", "http://localhost:8888/peer.html?" + route);
// iframe.style.display = "none";
// document.body.appendChild(iframe);
peer.listen();
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
},
onerror: function(error) {
console.error(error);
}
},
};
''')
open(os.path.join(self.get_dir(), 'peer_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: window.location.toString().split('?')[1],
onpeer: function(peer, route) {
peer.connect(Module['webrtc']['session']);
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
// Calling window.close() from this handler hangs my browser, so run it in the next turn
setTimeout(window.close, 0);
},
onerror: function(error) {
console.error(error);
}
}
};
''')
Popen([PYTHON, EMCC, temp_host_filepath, '-o', host_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'host_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
Popen([PYTHON, EMCC, temp_peer_filepath, '-o', peer_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'peer_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
# note: you may need to run this manually yourself, if npm is not in the path, or if you need a version that is not in the path
Popen(['npm', 'install', path_from_root('tests', 'sockets', 'p2p')]).communicate()
broker = Popen(NODE_JS + [path_from_root('tests', 'sockets', 'p2p', 'broker', 'p2p-broker.js')])
expected = '1'
self.run_browser(host_outfile, '.', ['/report_result?' + e for e in expected])
broker.kill();
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
# Run with ./runner.py sockets.test_nodejs_sockets_echo
if not NODE_JS in JS_ENGINES:
return self.skip('node is not present')
sockets_include = '-I'+path_from_root('tests', 'sockets')
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
harnesses = filter_harnesses(harnesses)
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print "\nTesting compile time WebSocket configuration.\n"
for harness in filter_harnesses([
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]):
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166', '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print "\nTesting runtime WebSocket configuration.\n"
for harness in filter_harnesses([
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]):
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345', '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
|
|
# Copyright 2009, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Classes and global objects related to U{XML Namespaces<http://www.w3.org/TR/2006/REC-xml-names-20060816/index.html>}.
Since namespaces hold all referenceable objects, this module also defines the
infrastructure for resolving named object references, such as schema
components.
"""
import pyxb
import os
import fnmatch
import pyxb.utils.utility
import xml.dom
import logging
_log = logging.getLogger(__name__)
class ExpandedName (pyxb.cscRoot):
"""Represent an U{expanded name
<http://www.w3.org/TR/REC-xml-names/#dt-expname>}, which pairs a
namespace with a local name.
Because a large number of local elements, and most attributes, have no
namespace associated with them, this is optimized for representing names
with an absent namespace. The hash and equality test methods are set so
that a plain string is equivalent to a tuple of C{None} and that string.
Note that absent namespaces can be represented in two ways: with a
namespace of C{None} (the name "has no namespace"), and with a namespace
that is an L{absent namespace <Namespace.CreateAbsentNamespace>} (the name
"has an absent namespace"). Hash code calculations are done so that the
two alternatives produce the same hash; however, comparison is done so
that the two are distinguished. The latter is the intended behavior; the
former should not be counted upon.
This class allows direct lookup of the named object within a category by
using the category name as an accessor function. That is, if the
namespace of the expanded name C{en} has a category 'typeDefinition', then
the following two expressions are equivalent::
en.typeDefinition()
en.namespace().categoryMap('typeDefinition').get(en.localName())
This class descends from C{tuple} so that its values can be used as
dictionary keys without concern for pointer equivalence.
"""
def namespace (self):
"""The L{Namespace} part of the expanded name."""
return self.__namespace
__namespace = None
def namespaceURI (self):
"""Return the URI of the namespace, or C{None} if the namespace is absent."""
return self.__namespaceURI
__namespaceURI = None
def localName (self):
"""The local part of the expanded name."""
return self.__localName
__localName = None
# Cached tuple representation
__expandedName = None
def validateComponentModel (self):
"""Pass model validation through to namespace part."""
return self.namespace().validateComponentModel()
def uriTuple (self):
"""Return a tuple consisting of the namespace URI and the local name.
This presents the expanded name as base Python types for persistent
storage. Be aware, though, that it will lose the association of the
name with an absent namespace, if that matters to you."""
return ( self.__namespaceURI, self.__localName )
# Treat unrecognized attributes as potential accessor functions
def __getattr__ (self, name):
# Don't try to recognize private names (like __setstate__)
if name.startswith('__'):
return super(ExpandedName, self).__getattr__(name)
if self.namespace() is None:
return lambda: None
# NOTE: This will raise pyxb.NamespaceError if the category does not exist.
category_value = self.namespace().categoryMap(name).get(self.localName())
return lambda : category_value
def createName (self, local_name):
"""Return a new expanded name in the namespace of this name.
@param local_name: The local name portion of an expanded name.
@return: An instance of L{ExpandedName}.
"""
return ExpandedName(self.namespace(), local_name)
def adoptName (self, name):
"""Return the input name, except if the input name has no namespace,
return a name that uses the namespace from this name with the local
name from the input name.
Use this when the XML document has an unqualified name and we're
processing using an absent default namespace.
@warning: Be careful when using a global name to adopt a name from a
local element: if the local element (with no namespace) has the same
localName as but is different from the global element (with a
namespace), this will improperly provide a namespace when one should
not be present. See the comments in
L{pyxb.binding.basis.element.elementForName}.
"""
if not isinstance(name, ExpandedName):
name = ExpandedName(name)
if name.namespace() is None:
name = self.createName(name.localName())
return name
def __init__ (self, *args, **kw):
"""Create an expanded name.
Expected argument patterns are:
( C{str} ) -- the local name in an absent namespace
( L{ExpandedName} ) -- a copy of the given expanded name
( C{xml.dom.Node} ) -- The name extracted from node.namespaceURI and node.localName
( C{str}, C{str} ) -- the namespace URI and the local name
( L{Namespace}, C{str} ) -- the namespace and the local name
( L{ExpandedName}, C{str}) -- the namespace from the expanded name, and the local name
Wherever C{str} occurs C{unicode} is also permitted.
@keyword fallback_namespace: Optional Namespace instance to use if the
namespace would otherwise be None. This is only used if it is an
absent namespace.
"""
fallback_namespace = kw.get('fallback_namespace')
if 0 == len(args):
raise pyxb.LogicError('Too few arguments to ExpandedName constructor')
if 2 < len(args):
raise pyxb.LogicError('Too many arguments to ExpandedName constructor')
if 2 == len(args):
# Namespace(str, unicode, Namespace) and local name (str, unicode)
( ns, ln ) = args
else:
# Local name (str, unicode) or ExpandedName or Node
assert 1 == len(args)
ln = args[0]
ns = None
if isinstance(ln, basestring):
pass
elif isinstance(ln, tuple) and (2 == len(ln)):
(ns, ln) = ln
elif isinstance(ln, ExpandedName):
ns = ln.namespace()
ln = ln.localName()
elif isinstance(ln, xml.dom.Node):
if not(ln.nodeType in (xml.dom.Node.ELEMENT_NODE, xml.dom.Node.ATTRIBUTE_NODE)):
raise pyxb.LogicError('Cannot create expanded name from non-element DOM node %s' % (ln.nodeType,))
ns = ln.namespaceURI
ln = ln.localName
else:
raise pyxb.LogicError('Unrecognized argument type %s' % (type(ln),))
if (ns is None) and (fallback_namespace is not None):
if fallback_namespace.isAbsentNamespace():
ns = fallback_namespace
if isinstance(ns, (str, unicode)):
ns = NamespaceForURI(ns, create_if_missing=True)
if isinstance(ns, ExpandedName):
ns = ns.namespace()
if (ns is not None) and not isinstance(ns, Namespace):
raise pyxb.LogicError('ExpandedName must include a valid (perhaps absent) namespace, or None.')
self.__namespace = ns
if self.__namespace is not None:
self.__namespaceURI = self.__namespace.uri()
self.__localName = ln
assert self.__localName is not None
self.__expandedName = ( self.__namespace, self.__localName )
self.__uriTuple = ( self.__namespaceURI, self.__localName )
def __str__ (self):
assert self.__localName is not None
if self.__namespaceURI is not None:
return '{%s}%s' % (self.__namespaceURI, self.__localName)
return self.localName()
def __hash__ (self):
if self.__namespaceURI is None:
# Handle both str and unicode hashes
return type(self.__localName).__hash__(self.__localName)
return tuple.__hash__(self.__expandedName)
def __cmp__ (self, other):
if other is None: # None is below everything else
return cmp(1, -1)
if isinstance(other, (str, unicode)):
other = ( None, other )
if not isinstance(other, tuple):
other = other.__uriTuple
if isinstance(other[0], Namespace):
other = ( other[0].uri(), other[1] )
return cmp(self.__uriTuple, other)
def getAttribute (self, dom_node):
"""Return the value of the attribute identified by this name in the given node.
@return: An instance of C{xml.dom.Attr}, or C{None} if the node does
not have an attribute with this name.
"""
if dom_node.hasAttributeNS(self.__namespaceURI, self.__localName):
return dom_node.getAttributeNS(self.__namespaceURI, self.__localName)
return None
def nodeMatches (self, dom_node):
"""Return C{True} iff the dom node expanded name matches this expanded name."""
return (dom_node.localName == self.__localName) and (dom_node.namespaceURI == self.__namespaceURI)
class NamedObjectMap (dict):
"""An extended dictionary intended to assist with QName resolution.
These dictionaries have an attribute that identifies a category of named
objects within a Namespace; the specifications for various documents
require that certain groups of objects must be unique, while uniqueness is
not required between groups. The dictionary also retains a pointer to the
Namespace instance for which it holds objects."""
def namespace (self):
"""The namespace to which the object map belongs."""
return self.__namespace
__namespace = None
def category (self):
"""The category of objects (e.g., typeDefinition, elementDeclaration)."""
return self.__category
__category = None
def __init__ (self, category, namespace, *args, **kw):
self.__category = category
self.__namespace = namespace
super(NamedObjectMap, self).__init__(*args, **kw)
class _NamespaceCategory_mixin (pyxb.cscRoot):
"""Mix-in that aggregates those aspects of XMLNamespaces that hold
references to categories of named objects.
Arbitrary groups of named objects, each requiring unique names within
themselves, can be saved. Unless configured otherwise, the Namespace
instance is extended with accessors that provide direct access to
individual category maps. The name of the method is the category name
with a suffix of "s"; e.g., if a category "typeDefinition" exists, it can
be accessed from the namespace using the syntax C{ns.typeDefinitions()}.
Note that the returned value from the accessor is a live reference to
the category map; changes made to the map are reflected in the
namespace.
"""
# Map from category strings to NamedObjectMap instances that
# contain the dictionary for that category.
__categoryMap = None
def _reset (self):
"""CSC extension to reset fields of a Namespace.
This one handles category-related data."""
getattr(super(_NamespaceCategory_mixin, self), '_reset', lambda *args, **kw: None)()
self.__categoryMap = { }
def categories (self):
"""The list of individual categories held in this namespace."""
return self.__categoryMap.keys()
def _categoryMap (self):
"""Return the whole map from categories to named objects."""
return self.__categoryMap
def categoryMap (self, category):
"""Map from local names to NamedObjectMap instances for the given category."""
try:
return self.__categoryMap[category]
except KeyError:
raise pyxb.NamespaceError(self, '%s has no category %s' % (self, category))
def __defineCategoryAccessors (self):
"""Define public methods on the Namespace which provide access to
individual NamedObjectMaps based on their category.
"""
for category in self.categories():
accessor_name = category + 's'
setattr(self, accessor_name, lambda _map=self.categoryMap(category): _map)
def configureCategories (self, categories):
"""Ensure there is a map for each of the given categories.
Category configuration
L{activates<archive._NamespaceArchivable_mixin.isActive>} a namespace.
Existing maps are not affected."""
self._activate()
if self.__categoryMap is None:
self.__categoryMap = { }
for category in categories:
if not (category in self.__categoryMap):
self.__categoryMap[category] = NamedObjectMap(category, self)
self.__defineCategoryAccessors()
return self
def addCategoryObject (self, category, local_name, named_object):
"""Allow access to the named_object by looking up the local_name in
the given category.
Raises pyxb.NamespaceUniquenessError if an object with the same name
already exists in the category."""
name_map = self.categoryMap(category)
old_object = name_map.get(local_name)
if (old_object is not None) and (old_object != named_object):
raise pyxb.NamespaceUniquenessError(self, '%s: name %s used for multiple values in %s' % (self, local_name, category))
name_map[local_name] = named_object
return named_object
def replaceCategoryObject (self, category, local_name, old_object, new_object):
"""Replace the referenced object in the category.
The new object will be added only if the old_object matches the
current entry for local_name in the category."""
name_map = self.categoryMap(category)
if old_object == name_map.get(local_name):
name_map[local_name] = new_object
return name_map[local_name]
def _replaceComponent_csc (self, existing_def, replacement_def):
"""Replace a component definition where present in the category maps.
@note: This is a high-cost operation, as every item in every category
map must be examined to see whether its value field matches
C{existing_def}."""
for (cat, registry) in self.__categoryMap.items():
for (k, v) in registry.items():
if v == existing_def:
_log.info('Replacing value for %s in %s', k, cat)
del registry[k]
if replacement_def is not None:
registry[k] = replacement_def
return getattr(super(_NamespaceCategory_mixin, self), '_replaceComponent_csc', lambda *args, **kw: replacement_def)(existing_def, replacement_def)
# Verify that the namespace category map has no components recorded. This
# is the state that should hold prior to loading a saved namespace; at
# tthe moment, we do not support aggregating components defined separately
# into the same namespace. That should be done at the schema level using
# the "include" element.
def __checkCategoriesEmpty (self):
if self.__categoryMap is None:
return True
assert isinstance(self.__categoryMap, dict)
if 0 == len(self.__categoryMap):
return True
for k in self.categories():
if 0 < len(self.categoryMap(k)):
return False
return True
def _namedObjects (self):
objects = set()
for category_map in self.__categoryMap.values():
objects.update(category_map.values())
return objects
def _loadNamedObjects (self, category_map):
"""Add the named objects from the given map into the set held by this namespace.
It is an error to name something which is already present."""
self.configureCategories(category_map.keys())
for category in category_map.keys():
current_map = self.categoryMap(category)
new_map = category_map[category]
for (local_name, component) in new_map.iteritems():
existing_component = current_map.get(local_name)
if existing_component is None:
current_map[local_name] = component
elif existing_component._allowUpdateFromOther(component):
existing_component._updateFromOther(component)
else:
raise pyxb.NamespaceError(self, 'Load attempted to override %s %s in %s' % (category, ln, self.uri()))
self.__defineCategoryAccessors()
def hasSchemaComponents (self):
"""Return C{True} iff schema components have been associated with this namespace.
This only checks whether the corresponding categories have been added,
not whether there are any entries in those categories. It is useful
for identifying namespaces that were incorporated through a
declaration but never actually referenced."""
return 'typeDefinition' in self.__categoryMap
def _associateOrigins (self, module_record):
assert module_record.namespace() == self
module_record.resetCategoryObjects()
self.configureCategories([archive.NamespaceArchive._AnonymousCategory()])
origin_set = module_record.origins()
for (cat, cat_map) in self.__categoryMap.iteritems():
for (n, v) in cat_map.iteritems():
if isinstance(v, archive._ArchivableObject_mixin) and (v._objectOrigin() in origin_set):
v._objectOrigin().addCategoryMember(cat, n, v)
class _ComponentDependency_mixin (pyxb.utils.utility.PrivateTransient_mixin, pyxb.cscRoot):
"""Mix-in for components that can depend on other components."""
__PrivateTransient = set()
# Cached frozenset of components on which this component depends.
__bindingRequires = None
__PrivateTransient.add('bindingRequires')
def _resetClone_csc (self, **kw):
"""CSC extension to reset fields of a component. This one clears
dependency-related data, since the clone will have to revise its
dependencies.
@rtype: C{None}"""
getattr(super(_ComponentDependency_mixin, self), '_resetClone_csc', lambda *_args, **_kw: None)(**kw)
self.__bindingRequires = None
def bindingRequires (self, reset=False, include_lax=False):
"""Return a set of components upon whose bindings this component's
bindings depend.
For example, bindings that are extensions or restrictions depend on
their base types. Complex type definition bindings require that the
types of their attribute declarations be available at the class
definition, and the types of their element declarations in the
postscript.
@keyword include_lax: if C{False} (default), only the requirements of
the class itself are returned. If C{True}, all requirements are
returned.
@rtype: C{set(L{pyxb.xmlschema.structures._SchemaComponent_mixin})}
"""
if reset or (self.__bindingRequires is None):
if isinstance(self, resolution._Resolvable_mixin) and not (self.isResolved()):
raise pyxb.LogicError('Unresolved %s in %s: %s' % (self.__class__.__name__, self._namespaceContext().targetNamespace(), self.name()))
self.__bindingRequires = self._bindingRequires_vx(include_lax)
return self.__bindingRequires
def _bindingRequires_vx (self, include_lax):
"""Placeholder for subclass method that identifies the necessary components.
@note: Override in subclasses.
@return: The component instances on which this component depends
@rtype: C{frozenset}
@raise LogicError: A subclass failed to implement this method
"""
raise pyxb.LogicError('%s does not implement _bindingRequires_vx' % (type(self),))
class _NamespaceComponentAssociation_mixin (pyxb.cscRoot):
"""Mix-in for managing components defined within this namespace.
The component set includes not only top-level named components (such as
those accessible through category maps), but internal anonymous
components, such as those involved in representing the content model of a
complex type definition. We need to be able to get a list of these
components, sorted in dependency order, so that generated bindings do not
attempt to refer to a binding that has not yet been generated."""
# A set containing all components, named or unnamed, that belong to this
# namespace.
__components = None
def _reset (self):
"""CSC extension to reset fields of a Namespace.
This one handles data related to component association with a
namespace."""
getattr(super(_NamespaceComponentAssociation_mixin, self), '_reset', lambda *args, **kw: None)()
self.__components = set()
self.__origins = set()
self.__schemaMap = { }
def _associateComponent (self, component):
"""Record that the responsibility for the component belongs to this namespace."""
self._activate()
assert self.__components is not None
assert isinstance(component, _ComponentDependency_mixin)
assert component not in self.__components
self.__components.add(component)
def _replaceComponent_csc (self, existing_def, replacement_def):
"""Replace a component definition in the set of associated components.
@raise KeyError: C{existing_def} is not in the set of components."""
self.__components.remove(existing_def)
if replacement_def is not None:
self.__components.add(replacement_def)
return getattr(super(_NamespaceComponentAssociation_mixin, self), '_replaceComponent_csc', lambda *args, **kw: replacement_def)(existing_def, replacement_def)
def addSchema (self, schema):
for sr in self.__origins:
if isinstance(sr, archive._SchemaOrigin) and sr.match(schema=schema):
_log.info('Schema at %s already registered in %s', schema.location(), self)
raise pyxb.SchemaUniquenessError(self, schema.location(), sr.schema())
sr = archive._SchemaOrigin(schema=schema)
schema.generationUID().associateObject(sr)
self.__origins.add(sr)
return sr
def lookupSchemaByLocation (self, schema_location):
for sr in self.__origins:
if isinstance(sr, archive._SchemaOrigin) and sr.match(location=schema_location):
return (True, sr.schema())
for mr in self.moduleRecords():
if mr.hasMatchingOrigin(location=schema_location):
return (True, None)
return (False, None)
def schemas (self):
s = set()
for sr in self.__origins:
if isinstance(sr, archive._SchemaOrigin) and (sr.schema() is not None):
s.add(sr.schema())
return s
__origins = None
def components (self):
"""Return a frozenset of all components, named or unnamed, belonging
to this namespace."""
return frozenset(self.__components)
def _releaseNamespaceContexts (self):
for c in self.__components:
c._clearNamespaceContext()
import archive
import resolution
from utility import *
class Namespace (_NamespaceCategory_mixin, resolution._NamespaceResolution_mixin, _NamespaceComponentAssociation_mixin, archive._NamespaceArchivable_mixin):
"""Represents an XML namespace (a URI).
There is at most one L{Namespace} class instance per namespace (URI). The
instance also supports associating arbitrary L{maps<NamedObjectMap>} from
names to objects, in separate categories. The default categories are
configured externally; for example, the
L{Schema<pyxb.xmlschema.structures.Schema>} component defines a category
for each named component in XMLSchema, and the customizing subclass for
WSDL definitions adds categories for the service bindings, messages, etc.
Namespaces can be written to and loaded from pickled files. See
L{NamespaceArchive} for information.
"""
# The URI for the namespace. If the URI is None, this is an absent
# namespace.
__uri = None
# An identifier, unique within a program using PyXB, used to distinguish
# absent namespaces. Currently this value is not accessible to the user,
# and exists solely to provide a unique identifier when printing the
# namespace as a string. The class variable is used as a one-up counter,
# which is assigned to the instance variable when an absent namespace
# instance is created.
__absentNamespaceID = 0
# A prefix bound to this namespace by standard. Current set known are applies to
# xml and xmlns.
__boundPrefix = None
# A prefix set as a preferred prefix, generally by processing a namespace
# declaration.
__prefix = None
# A map from URIs to Namespace instances. Namespaces instances
# must be unique for their URI. See __new__().
__Registry = { }
# A set of all absent namespaces created.
__AbsentNamespaces = set()
# Optional description of the namespace
__description = None
# Indicates whether this namespace is built-in to the system
__isBuiltinNamespace = False
# Indicates whether this namespace is undeclared (available always)
__isUndeclaredNamespace = False
# Indicates whether this namespace was loaded from an archive
__isLoadedNamespace = False
# Archive from which the namespace can be read, or None if no archive
# defines this namespace.
__namespaceArchive = None
# Indicates whether this namespace has been written to an archive
__hasBeenArchived = False
# Holds the module path for builtin modules until we get a ModuleRecord to
# store that in.
__builtinModulePath = None
# A set of options defining how the Python bindings for this namespace
# were generated. Not currently used, since we don't have different
# binding configurations yet.
__bindingConfiguration = None
# The namespace to use as the default namespace when constructing the
# The namespace context used when creating built-in components that belong
# to this namespace. This is used to satisfy the low-level requirement
# that all schema components have a namespace context; normally, that
# context is built dynamically from the schema element.
__initialNamespaceContext = None
# The default_namespace parameter when creating the initial namespace
# context. Only used with built-in namespaces.
__contextDefaultNamespace = None
# The map from prefixes to namespaces as defined by the schema element for
# this namespace. Only used with built-in namespaces.
__contextInScopeNamespaces = None
@classmethod
def _NamespaceForURI (cls, uri):
"""If a Namespace instance for the given URI exists, return it; otherwise return None.
Note; Absent namespaces are not stored in the registry. If you use
one (e.g., for a schema with no target namespace), don't lose hold of
it."""
assert uri is not None
return cls.__Registry.get(uri, None)
def __getnewargs__ (self):
"""Pickling support.
To ensure that unpickled Namespace instances are unique per
URI, we ensure that the routine that creates unpickled
instances knows what it's supposed to return."""
if self.uri() is None:
raise pyxb.LogicError('Illegal to serialize absent namespaces')
return (self.uri(),)
def __new__ (cls, *args, **kw):
"""Pickling and singleton support.
This ensures that no more than one Namespace instance exists
for any given URI. We could do this up in __init__, but that
doesn't normally get called when unpickling instances; this
does. See also __getnewargs__()."""
(uri,) = args
if not (uri in cls.__Registry):
instance = object.__new__(cls)
# Do this one step of __init__ so we can do checks during unpickling
instance.__uri = uri
instance._reset()
# Absent namespaces are not stored in the registry.
if uri is None:
cls.__AbsentNamespaces.add(instance)
return instance
cls.__Registry[uri] = instance
return cls.__Registry[uri]
@classmethod
def AvailableNamespaces (cls):
"""Return a set of all Namespace instances defined so far."""
return cls.__AbsentNamespaces.union(cls.__Registry.values())
def __init__ (self, uri,
description=None,
builtin_namespace=None,
builtin_module_path=None,
is_undeclared_namespace=False,
is_loaded_namespace=False,
bound_prefix=None,
default_namespace=None,
in_scope_namespaces=None):
"""Create a new Namespace.
The URI must be non-None, and must not already be assigned to
a Namespace instance. See NamespaceForURI().
User-created Namespace instances may also provide a description.
Users should never provide a builtin_namespace parameter.
"""
# New-style superclass invocation
super(Namespace, self).__init__()
self.__contextDefaultNamespace = default_namespace
self.__contextInScopeNamespaces = in_scope_namespaces
# Make sure that we're not trying to do something restricted to
# built-in namespaces
is_builtin_namespace = not (builtin_namespace is None)
if not is_builtin_namespace:
if bound_prefix is not None:
raise pyxb.LogicError('Only permanent Namespaces may have bound prefixes')
# We actually set the uri when this instance was allocated;
# see __new__().
assert self.__uri == uri
self.__boundPrefix = bound_prefix
self.__description = description
self.__isBuiltinNamespace = is_builtin_namespace
self.__builtinNamespaceVariable = builtin_namespace
self.__builtinModulePath = builtin_module_path
self.__isUndeclaredNamespace = is_undeclared_namespace
self.__isLoadedNamespace = is_loaded_namespace
self._reset()
assert (self.__uri is None) or (self.__Registry[self.__uri] == self)
def _reset (self):
assert not self.isActive()
getattr(super(Namespace, self), '_reset', lambda *args, **kw: None)()
self.__initialNamespaceContext = None
def uri (self):
"""Return the URI for the namespace represented by this instance.
If the URI is None, this is an absent namespace, used to hold
declarations not associated with a namespace (e.g., from schema with
no target namespace)."""
return self.__uri
def setPrefix (self, prefix):
if self.__boundPrefix is not None:
if self.__boundPrefix == prefix:
return self
raise pyxb.NamespaceError(self, 'Cannot change the prefix of a bound namespace')
self.__prefix = prefix
return self
def prefix (self):
if self.__boundPrefix:
return self.__boundPrefix
return self.__prefix
def isAbsentNamespace (self):
"""Return True iff this namespace is an absent namespace.
Absent namespaces have no namespace URI; they exist only to
hold components created from schemas with no target
namespace."""
return self.__uri is None
def fallbackNamespace (self):
"""When known to be operating in this namespace, provide the Namespace
instance to be used when names are associated with no namespace."""
if self.isAbsentNamespace():
return self
return None
@classmethod
def CreateAbsentNamespace (cls):
"""Create an absent namespace.
Use this instead of the standard constructor, in case we need
to augment it with a uuid or the like."""
rv = Namespace(None)
rv.__absentNamespaceID = cls.__absentNamespaceID
cls.__absentNamespaceID += 1
return rv
def _overrideAbsentNamespace (self, uri):
assert self.isAbsentNamespace()
self.__uri = uri
def boundPrefix (self):
"""Return the standard prefix to be used for this namespace.
Only a few namespace prefixes are bound to namespaces: xml and xmlns
are two. In all other cases, this method should return None. The
infrastructure attempts to prevent user creation of Namespace
instances that have bound prefixes."""
return self.__boundPrefix
def isBuiltinNamespace (self):
"""Return True iff this namespace was defined by the infrastructure.
That is the case for all namespaces in the Namespace module."""
return self.__isBuiltinNamespace
def builtinNamespaceRepresentation (self):
assert self.__builtinNamespaceVariable is not None
return 'pyxb.namespace.%s' % (self.__builtinNamespaceVariable,)
def builtinModulePath (self):
if not self.__builtinModulePath:
raise pyxb.LogicError('Namespace has no built-in module: %s' % (self,))
mr = self.lookupModuleRecordByUID(BuiltInObjectUID)
assert mr is not None
assert mr.modulePath() == self.__builtinModulePath
return self.__builtinModulePath
def isUndeclaredNamespace (self):
"""Return True iff this namespace is always available
regardless of whether there is a declaration for it.
This is the case only for the
xml(http://www.w3.org/XML/1998/namespace) and
xmlns(http://www.w3.org/2000/xmlns/) namespaces."""
return self.__isUndeclaredNamespace
def isLoadedNamespace (self):
"""Return C{True} iff this namespace was loaded from a namespace archive."""
return self.__isLoadedNamespace
def hasBeenArchived (self):
"""Return C{True} iff this namespace has been saved to a namespace archive.
See also L{isLoadedNamespace}."""
return self.__hasBeenArchived
def description (self, description=None):
"""Get, or set, a textual description of the namespace."""
if description is not None:
self.__description = description
return self.__description
def nodeIsNamed (self, node, *local_names):
return (node.namespaceURI == self.uri()) and (node.localName in local_names)
def createExpandedName (self, local_name):
return ExpandedName(self, local_name)
def __getstate__ (self):
"""Support pickling.
Well, no, not really. Because namespace instances must be unique, we
represent them as their URI, and that's done by __getnewargs__
above. All the interesting information is in the ModuleRecords."""
return {}
def _defineBuiltins_ox (self, structures_module):
pass
__definedBuiltins = False
def _defineBuiltins (self, structures_module):
assert self.isBuiltinNamespace()
if not self.__definedBuiltins:
mr = self.lookupModuleRecordByUID(BuiltInObjectUID, create_if_missing=True, module_path=self.__builtinModulePath)
self._defineBuiltins_ox(structures_module)
self.__definedBuiltins = True
mr.markIncorporated()
return self
def _loadComponentsFromArchives (self, structures_module):
"""Attempts to load the named objects held in this namespace.
The base class implementation looks at the set of available archived
namespaces, and if one contains this namespace unserializes its named
object maps.
Sub-classes may choose to look elsewhere, if this version fails or
before attempting it.
There is no guarantee that any particular category of named object has
been located when this returns. Caller must check.
"""
for mr in self.moduleRecords():
if mr.isLoadable():
if mr.isPublic():
_log.info('Load %s from %s', mr, mr.archive())
try:
mr.archive().readNamespaces()
except pyxb.NamespaceArchiveError, e:
_log.exception("%s", str(e))
else:
_log.info('Ignoring private module %s in validation', mr)
self._activate()
__didValidation = False
__inValidation = False
def validateComponentModel (self, structures_module=None):
"""Ensure this namespace is ready for use.
If the namespace does not have a map of named objects, the system will
attempt to load one.
"""
if not self.__didValidation:
# assert not self.__inValidation, 'Nested validation of %s' % (self.uri(),)
if structures_module is None:
import pyxb.xmlschema.structures as structures_module
if self.isBuiltinNamespace():
self._defineBuiltins(structures_module)
try:
self.__inValidation = True
self._loadComponentsFromArchives(structures_module)
self.__didValidation = True
finally:
self.__inValidation = False
return True
def _replaceComponent (self, existing_def, replacement_def):
"""Replace the existing definition with another.
This is used in a situation where building the component model
resulted in a new component instance being created and registered, but
for which an existing component is to be preferred. An example is
when parsing the schema for XMLSchema itself: the built-in datatype
components should be retained instead of the simple type definition
components dynamically created from the schema.
By providing the value C{None} as the replacement definition, this can
also be used to remove components.
@note: Invoking this requires scans of every item in every category
map in the namespace.
@return: C{replacement_def}
"""
# We need to do replacements in the category map handler, the
# resolver, and the component associator.
return self._replaceComponent_csc(existing_def, replacement_def)
def initialNamespaceContext (self):
"""Obtain the namespace context to be used when creating components in this namespace.
Usually applies only to built-in namespaces, but is also used in the
autotests when creating a namespace without a xs:schema element. .
Note that we must create the instance dynamically, since the
information that goes into it has cross-dependencies that can't be
resolved until this module has been completely loaded."""
if self.__initialNamespaceContext is None:
isn = { }
if self.__contextInScopeNamespaces is not None:
for (k, v) in self.__contextInScopeNamespaces.items():
isn[k] = self.__identifyNamespace(v)
kw = { 'target_namespace' : self
, 'default_namespace' : self.__identifyNamespace(self.__contextDefaultNamespace)
, 'in_scope_namespaces' : isn }
self.__initialNamespaceContext = resolution.NamespaceContext(None, **kw)
return self.__initialNamespaceContext
def __identifyNamespace (self, nsval):
"""Identify the specified namespace, which should be a built-in.
Normally we can just use a reference to the Namespace module instance,
but when creating those instances we sometimes need to refer to ones
for which the instance has not yet been created. In that case, we use
the name of the instance, and resolve the namespace when we need to
create the initial context."""
if nsval is None:
return self
if isinstance(nsval, (str, unicode)):
nsval = globals().get(nsval)
if isinstance(nsval, Namespace):
return nsval
raise pyxb.LogicError('Cannot identify namespace from %s' % (nsval,))
def __str__ (self):
if self.__uri is None:
return 'AbsentNamespace%d' % (self.__absentNamespaceID,)
assert self.__uri is not None
if self.__boundPrefix is not None:
rv = '%s=%s' % (self.__boundPrefix, self.__uri)
else:
rv = self.__uri
return rv
from builtin import *
resolution.NamespaceContext._AddTargetNamespaceAttribute(XMLSchema.createExpandedName('schema'), ExpandedName('targetNamespace'))
## Local Variables:
## fill-column:78
## End:
|
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from __future__ import print_function
import sys, os, tempfile, time
from collections import OrderedDict
from .api_base import BuildResult, Progress, UnknownJobException, ApiInvocationMixin
from .speed import Speed
from .rest_api_wrapper import ResourceNotFound, RequestsRestApi
major_version = sys.version_info.major
jenkins_cli_jar = 'jenkins-cli.jar'
hudson_cli_jar = 'hudson-cli.jar'
_superseded = -1
_dequeued = -2
_ct_url_enc = {'Content-Type': 'application/x-www-form-urlencoded'}
def _result_and_progress(build_dct):
result = build_dct['result']
progress = Progress.RUNNING if result is None else Progress.IDLE
result = BuildResult.UNKNOWN if result is None else BuildResult[result]
return (result, progress)
class Jenkins(Speed):
"""Optimized minimal set of methods needed for jenkinsflow to access Jenkins jobs.
Args:
direct_uri (str): Should be a non-proxied uri if possible (e.g. http://localhost:<port> if flow job is running on master)
The public URI will be retrieved from Jenkins and used in output.
job_prefix_filter (str): Jobs with names that don't start with this string, will be skpped when polling Jenkins.
If you are using Hudson and have many jobs, it might be a good idea to enable Team support and create a job-runner user,
which only has access to the jobs in the flow that it is executing. That way the job list will be filtered serverside.
username (str): Name of user authorized to execute all jobs in flow.
password (str): Password of user.
invocation_class (class): Defaults to `Invocation`.
"""
def __init__(self, direct_uri, job_prefix_filter=None, username=None, password=None, invocation_class=None, rest_access_provider=RequestsRestApi):
if username or password:
if not (username and password):
raise Exception("You must specify both username and password or neither")
self.rest_api = rest_access_provider(direct_uri, username, password)
self.direct_uri = direct_uri
self.username = username
self.password = password
self.invocation_class = invocation_class or Invocation
self.job_prefix_filter = job_prefix_filter
self._public_uri = None
self.jobs = None
self.queue_items = {}
self.is_jenkins = None
def get_content(self, url, **params):
return self.rest_api.get_content(url, **params)
def get_json(self, url="", **params):
return self.rest_api.get_json(url, **params)
def post(self, url, payload=None, headers=None, **params):
return self.rest_api.post(url, payload, headers, **params)
def headers(self):
return self.rest_api.headers()
@property
def public_uri(self):
if not self._public_uri:
query = "primaryView[url]"
dct = self.get_json(tree=query)
self._public_uri = dct['primaryView']['url'].rstrip('/')
return self._public_uri
def _public_job_url(self, job_name):
return self.public_uri + '/job/' + job_name
def poll(self):
# Determine whether we are talking to Jenkins or Hudson
if self.is_jenkins is None:
# TODO: A lot of Nonsense here because Hudson does not respond reliably
for _ in (1, 2, 3):
try:
head_response = self.headers()
if head_response.get("X-Jenkins"):
self.is_jenkins = True
break
if head_response.get("X-Hudson"):
self.is_jenkins = False
break
except Exception as ex:
head_response = "HEAD request to " + repr(self.direct_uri) + " failed:" + str(ex)
time.sleep(0.1)
else:
raise Exception("Not connected to Jenkins or Hudson (expected X-Jenkins or X-Hudson header, got: " + repr(head_response))
query = "jobs[name,lastBuild[number,result],queueItem[why],actions[parameterDefinitions[name,type]]],primaryView[url]"
dct = self.get_json(tree=query)
self._public_uri = dct['primaryView']['url'].rstrip('/')
self.jobs = {}
for job_dct in dct.get('jobs') or []:
job_name = str(job_dct['name'])
if self.job_prefix_filter and not job_name.startswith(self.job_prefix_filter):
continue
self.jobs[job_name] = ApiJob(self, job_dct, job_name)
def quick_poll(self):
query = "jobs[name,lastBuild[number,result],queueItem[why]]"
dct = self.get_json(tree=query)
for job_dct in dct.get('jobs') or []:
job_name = str(job_dct['name'])
if self.job_prefix_filter and not job_name.startswith(self.job_prefix_filter):
continue
job = self.jobs.get(job_name)
if job:
job.dct = job_dct
continue
# A new job was created while flow was running, get the remaining properties
try:
query = "lastBuild[number,result],queueItem[why],actions[parameterDefinitions[name,type]]"
job_dct = self.get_json("/job/" + job_name, tree=query)
job = ApiJob(self, job_dct, job_name)
self.jobs[job_name] = job
except ResourceNotFound: # pragma: no cover
# Ignore this, the job came and went
pass
def queue_poll(self):
query = "items[task[name],id]"
dct = self.get_json("/queue", tree=query)
queue_items = {}
for qi_dct in dct.get('items') or []:
job_name = str(qi_dct['task']['name'])
if self.job_prefix_filter and not job_name.startswith(self.job_prefix_filter):
continue
queue_items.setdefault(job_name, []).append(qi_dct['id'])
self.queue_items = queue_items
def get_job(self, name):
try:
return self.jobs[name]
except KeyError:
raise UnknownJobException(self._public_job_url(name))
def create_job(self, job_name, config_xml):
self.post('/createItem', name=job_name,
headers={'Content-Type': 'application/xml header;charset=utf-8'},
payload=config_xml if major_version < 3 else config_xml.encode('utf-8'))
def delete_job(self, job_name):
try:
self.post('/job/' + job_name + '/doDelete')
except ResourceNotFound as ex:
# TODO: Check error
raise UnknownJobException(self._public_job_url(job_name), ex)
def set_build_description(self, job_name, build_number, description, replace=False, separator='\n'):
"""Utility to set/append build description
Args:
job_name (str): Name of the Jenkins job
build_number (int): The build number for which to set the description
description (str): The description to set on the build
replace (bool): If True, replace existing description, if any, instead of appending to it
separator (str): A separator to insert between any existing description and the new :py:obj:`description` if :py:obj:`replace` is False.
"""
self.poll()
job_path = "/job/" + job_name
build_url = job_path + '/' + str(build_number)
try:
if not replace:
dct = self.get_json(build_url, tree="description")
existing_description = dct['description']
if existing_description:
description = existing_description + separator + description
self.post(build_url + '/submitDescription', headers=_ct_url_enc, payload={'description': description})
except ResourceNotFound as ex:
raise Exception("Build not found " + repr(build_url), ex)
def _download_cli(self, cli_jar):
public_uri = self.public_uri.rstrip('/') + '/'
direct_uri = self.direct_uri.rstrip('/') + '/'
path = 'jnlpJars/' + cli_jar
public_cli_url = public_uri + path
if direct_uri != public_uri:
download_cli_url = direct_uri + path
print("INFO: Downloading cli: '{public_cli_url}' (using direct url: '{direct_cli_url}')".format(
public_cli_url=public_cli_url, direct_cli_url=download_cli_url))
else:
download_cli_url = public_cli_url
print("INFO: Downloading cli: '{download_cli_url}'".format(download_cli_url=download_cli_url))
with open(cli_jar, 'w' if major_version < 3 else 'w+b') as ff:
ff.write(self.get_content('/' + path))
print("INFO: Download finished:", repr(cli_jar))
def set_build_result(self, result, java='java', cli_call=False):
"""Change the result of a Jenkins job.
Note: set_build_result can only be done from within the job, not after the job has finished.
Note: Only available if URL is set in `Jenkins <http://jenkins-ci.org/>`_ system configuration.
This command uses the Jenkins `cli` to change the result. It requires a java executable to run the cli.
Note: In some versions of Jenkins the `cli` is broken, it has no manifest file! This is the case for
e.g. 1.625.1, the version installed on Fedora 23 using `dnf` at the time of Fedora 23 release.
Args:
result (str): The result to set. Should probably be 'unstable'
java (str): Alternative `java` executable. Use this if you don't wish to use the java in the PATH.
"""
print("INFO: Setting job result to", repr(result))
cli_jar = jenkins_cli_jar if self.is_jenkins else hudson_cli_jar
if major_version < 3:
import subprocess32 as subprocess
else:
import subprocess
def set_res():
command = [java, '-jar', cli_jar, '-s', self.direct_uri, 'set-build-result', result]
if self.username:
fname = None
try:
fhandle, fname = tempfile.mkstemp()
fhandle = os.fdopen(fhandle, 'w')
fhandle.write(self.password)
fhandle.close()
subprocess.check_call(command + ['--username', self.username, '--password-file', fname])
finally:
try:
os.remove(fname)
fhandle.close()
except IOError: # pragma: no cover
pass
else:
subprocess.check_call(command)
try:
# If cli_jar is already present attempt to use it
set_res()
except subprocess.CalledProcessError:
# We failed for some reason, try again with updated cli_jar
self._download_cli(cli_jar)
set_res()
class ApiJob(object):
def __init__(self, jenkins, dct, name):
self.jenkins = jenkins
self.dct = dct.copy()
self.name = name
self.public_uri = self.jenkins._public_job_url(self.name) # pylint: disable=protected-access
actions = self.dct.get('actions') or []
self._path = "/job/" + self.name
for action in actions:
if action is None:
continue
if action.get('parameterDefinitions'):
self._build_trigger_path = self._path + "/buildWithParameters"
break
else:
self._build_trigger_path = self._path + "/build"
self.old_build_number = None
self._invocations = OrderedDict()
self.queued_why = None
def invoke(self, securitytoken, build_params, cause, description):
try:
if cause:
build_params = build_params or {}
build_params['cause'] = cause
headers = _ct_url_enc if build_params else None
params = {}
if securitytoken:
params['token'] = securitytoken
response = self.jenkins.post(self._build_trigger_path, headers=headers, payload=build_params, **params)
except ResourceNotFound as ex:
raise UnknownJobException(self.jenkins._public_job_url(self.name), ex) # pylint: disable=protected-access
location = response.headers['location'][len(self.jenkins.direct_uri):-1]
old_inv = self._invocations.get(location)
if old_inv:
old_inv.build_number = _superseded
inv = self.jenkins.invocation_class(self, location, description)
self._invocations[location] = inv
return inv
def poll(self):
for invocation in self._invocations.values():
if not invocation.build_number:
# Hudson does not return queue item from invoke, instead it returns the job URL :(
query = "executable[number],why" if self.jenkins.is_jenkins else "queueItem[why],lastBuild[number]"
dct = self.jenkins.get_json(invocation.queued_item_path, tree=query)
if self.jenkins.is_jenkins:
executable = dct.get('executable')
if executable:
invocation.build_number = executable['number']
invocation.queued_why = None
invocation.set_description()
else:
invocation.queued_why = dct['why']
# If we still have invocations in the queue, wait until next poll to query again
break
else: # Hudson
# Note, this is not guaranteed to be correct in case of simultaneously running flows!
# Should handle multiple invocations in same flow
qi = dct.get('queueItem')
if qi:
invocation.queued_why = qi['why']
last_build = dct.get('lastBuild')
if last_build:
last_build_number = last_build['number']
if last_build_number > self.old_build_number:
invocation.build_number = last_build['number']
self.old_build_number = invocation.build_number
invocation.set_description()
else:
break
def job_status(self):
"""Result, progress and latest buildnumber info for the JOB, NOT the invocation
Return (result, progress_info, latest_build_number) (str, str, int or None):
If there is no finished build, result will be BuildResult.UNKNOWN and latest_build_number will be None
"""
progress = None
qi = self.dct['queueItem']
if qi:
progress = Progress.QUEUED
self.queued_why = qi['why']
dct = self.dct.get('lastBuild')
if dct:
self.old_build_number = dct['number']
result, latest_progress = _result_and_progress(dct)
return (result, progress or latest_progress, self.old_build_number)
return (BuildResult.UNKNOWN, progress or Progress.IDLE, None)
def stop_all(self):
# First remove pending builds from queue
queue_item_ids = self.jenkins.queue_items.get(self.name) or []
for qid in queue_item_ids:
try:
self.jenkins.post('/queue/cancelItem', id=repr(qid))
except ResourceNotFound:
# Job is no longer queued, so just ignore
# NOTE: bug https://issues.jenkins-ci.org/browse/JENKINS-21311 also brings us here!
pass
# Abort running builds
query = "builds[number,result]"
dct = self.jenkins.get_json("/job/" + self.name, tree=query)
for build in dct['builds']:
_result, progress = _result_and_progress(build)
if progress != Progress.IDLE:
build_number = build['number']
try:
self.jenkins.post(self._path + '/' + repr(build_number) + '/stop')
except ResourceNotFound: # pragma: no cover
# Build was deleted, just ignore
pass
def update_config(self, config_xml):
self.jenkins.post("/job/" + self.name + "/config.xml",
headers={'Content-Type': 'application/xml header;charset=utf-8'},
payload=config_xml if major_version < 3 else config_xml.encode('utf-8'))
def __repr__(self):
return str(dict(name=self.name, dct=self.dct))
class Invocation(ApiInvocationMixin):
def __init__(self, job, queued_item_path, description):
self.job = job
self.queued_item_path = queued_item_path
self.description = description
self.build_number = None
self.queued_why = None
def __repr__(self):
return 'Invocation: ' + repr(self.queued_item_path) + ' ' + repr(self.build_number) + ' ' + repr(self.queued_why)
def status(self):
"""Result and Progress info for the invocation
Return (result, progress_info) (str, str):
If the build has not started or has not finished running, result will be BuildResult.UNKNOWN
"""
if self.build_number is None:
return (BuildResult.UNKNOWN, Progress.QUEUED)
if self.build_number == _superseded:
return (BuildResult.SUPERSEDED, Progress.IDLE)
if self.build_number == _dequeued:
return (BuildResult.DEQUEUED, Progress.IDLE)
# It seems that even after the executor has been assigned a number in the queue item, the lastBuild might not yet exist
dct = self.job.dct.get('lastBuild')
last_number = dct['number'] if dct else None
if last_number is None:
return (BuildResult.UNKNOWN, Progress.QUEUED)
if last_number == self.build_number:
return _result_and_progress(dct)
if last_number < self.build_number:
# TODO: Why does this happen?
pass # pragma: no cover
# Latest build is not ours, get the correct build
query = "builds[number,result]"
dct = self.job.jenkins.get_json("/job/" + self.job.name, tree=query)
for build in dct['builds']:
if build['number'] == self.build_number:
return _result_and_progress(build)
raise Exception("Build deleted while flow running? This may happen if you invoke more builds than the job is configured to keep. " + repr(self))
def set_description(self):
"""Sets the build description"""
if not self.description:
return
build_url = self.job._path + '/' + repr(self.build_number)
try:
self.job.jenkins.post(build_url + '/submitDescription', headers=_ct_url_enc, payload={'description': self.description})
except ResourceNotFound as ex:
raise Exception("Build deleted while flow running? " + repr(build_url), ex)
def stop(self, dequeue):
try:
if self.build_number is not None and self.build_number >= 0 and not dequeue:
# Job has started
self.job.jenkins.post(self.job._path + '/' + repr(self.build_number) + '/stop')
return
if self.build_number is None and dequeue:
# Job is queued
qid = self.queued_item_path.strip('/').split('/')[2]
self.job.jenkins.post('/queue/cancelItem', id=qid)
self.build_number = _dequeued
except ResourceNotFound as ex: # pragma: no cover
# Job is no longer queued or running, except that it may have just changed from queued to running
# We leave it up to the flow logic to handle that
# NOTE: bug https://issues.jenkins-ci.org/browse/JENKINS-21311 also brings us here!
pass
|
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test tls server certificate verification options. Exercise conf_remap
'''
# need Curl
Test.SkipUnless(
Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")
)
# Define default ATS
ts = Test.MakeATSProcess("ts", select_ports=False)
server_foo = Test.MakeOriginServer("server_foo", ssl=True, options = {"--key": "{0}/signed-foo.key".format(Test.RunDirectory), "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)})
server_bar = Test.MakeOriginServer("server_bar", ssl=True, options = {"--key": "{0}/signed-bar.key".format(Test.RunDirectory), "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)})
server = Test.MakeOriginServer("server", ssl=True)
dns = Test.MakeDNServer("dns")
request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bad_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: bad_foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bad_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bad_bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server_foo.addResponse("sessionlog.json", request_foo_header, response_header)
server_foo.addResponse("sessionlog.json", request_bad_foo_header, response_header)
server_bar.addResponse("sessionlog.json", request_bar_header, response_header)
server_bar.addResponse("sessionlog.json", request_bad_bar_header, response_header)
# add ssl materials like key, certificates for the server
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.addSSLfile("ssl/signer.key")
ts.Variables.ssl_port = 4443
ts.Disk.remap_config.AddLine(
'map http://foo.com/basic https://foo.com:{0}'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://foo.com/override https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/basic https://bar.com:{0}'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/overridesignature https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/overrideenforced https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /basic https://127.0.0.1:{0}'.format(server.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /overrideenforce https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(server.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /overridename https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME'.format(server.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicyfooremap https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format(server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicyfoohost https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format(server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicybarremap https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format(server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicybarhost https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format(server_bar.Variables.SSL_Port))
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
# Case 1, global config policy=permissive properties=signature
# override for foo.com policy=enforced properties=all
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
# enable ssl port
'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port),
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
# set global policy
'proxy.config.ssl.client.verify.server' : 2,
'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.CA.cert.filename': 'signer.pem',
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL'
})
dns.addRecords(records={"foo.com.": ["127.0.0.1"]})
dns.addRecords(records={"bar.com.": ["127.0.0.1"]})
# Should succeed without message
tr = Test.AddTestRun("default-permissive-success")
tr.Setup.Copy("ssl/signed-foo.key")
tr.Setup.Copy("ssl/signed-foo.pem")
tr.Setup.Copy("ssl/signed-bar.key")
tr.Setup.Copy("ssl/signed-bar.pem")
tr.Processes.Default.Command = 'curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/basic'.format(ts.Variables.port)
tr.ReturnCode = 0
tr.Processes.Default.StartBefore(dns)
tr.Processes.Default.StartBefore(server_foo)
tr.Processes.Default.StartBefore(server_bar)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.port))
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
# Should succed. No message
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr2 = Test.AddTestRun("default-permissive-fail")
tr2.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/basic".format(ts.Variables.port)
tr2.ReturnCode = 0
tr2.StillRunningAfter = server
tr2.StillRunningAfter = ts
# Should succeed, but will be message in log about name mismatch
tr2.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr2 = Test.AddTestRun("default-permissive-fail2")
tr2.Processes.Default.Command = "curl -k -H \"host: random.com\" http://127.0.0.1:{0}/basic".format(ts.Variables.port)
tr2.ReturnCode = 0
tr2.StillRunningAfter = server
tr2.StillRunningAfter = ts
# Should succeed, but will be message in log about signature
tr2.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr3 = Test.AddTestRun("override-foo")
tr3.Processes.Default.Command = "curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/override".format(ts.Variables.port)
tr3.ReturnCode = 0
tr3.StillRunningAfter = server
tr3.StillRunningAfter = ts
# Should succeed. No error messages
tr3.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr4 = Test.AddTestRun("override-bar-disabled")
tr4.Processes.Default.Command = "curl -k -H \"host: bad_bar.com\" http://127.0.0.1:{0}/overridedisabled".format(ts.Variables.port)
tr4.ReturnCode = 0
tr4.StillRunningAfter = server
tr4.StillRunningAfter = ts
# Succeed. No error messages
tr4.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr5 = Test.AddTestRun("override-bar-signature-enforced")
tr5.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/overridesignature".format(ts.Variables.port)
tr5.ReturnCode = 0
tr5.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr5.StillRunningAfter = server
tr5.StillRunningAfter = ts
tr6 = Test.AddTestRun("override-bar-enforced")
tr6.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/overrideenforced".format(ts.Variables.port)
tr6.ReturnCode = 0
# Should fail
tr6.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Curl attempt should have failed")
tr6.StillRunningAfter = server
tr6.StillRunningAfter = ts
# Should succeed
tr = Test.AddTestRun("foo-to-bar-sni-policy-remap")
tr.Processes.Default.Command = "curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/snipolicybarremap".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could not connect", "Curl attempt should succeed")
# Should fail
tr = Test.AddTestRun("foo-to-bar-sni-policy-host")
tr.Processes.Default.Command = "curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/snipolicybarhost".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could not connect", "Curl attempt should fail")
# Should fail
tr = Test.AddTestRun("bar-to-foo-sni-policy-remap")
tr.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/snipolicyfooremap".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could not connect", "Curl attempt should fail")
# Should succeed
tr = Test.AddTestRun("bar-to-foo-sni-policy-host")
tr.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/snipolicyfoohost".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could not connect", "Curl attempt should succeed")
# Over riding the built in ERROR check since we expect some cases to fail
# checks on random.com should fail with message only
ts.Disk.diags_log.Content = Testers.ContainsExpression("WARNING: Core server certificate verification failed for \(random.com\). Action=Continue Error=self signed certificate server=127.0.0.1\(127.0.0.1\) depth=0", "Warning for self signed certificate")
# permissive failure for bar.com
ts.Disk.diags_log.Content += Testers.ContainsExpression("WARNING: SNI \(bar.com\) not in certificate. Action=Continue server=bar.com\(127.0.0.1\)", "Warning on missing name for bar.com")
# name check failure for random.com
ts.Disk.diags_log.Content += Testers.ContainsExpression("WARNING: SNI \(random.com\) not in certificate. Action=Continue server=127.0.0.1\(127.0.0.1\)", "Warning on missing name for randome.com")
# name check failure for bar.com
ts.Disk.diags_log.Content += Testers.ContainsExpression("WARNING: SNI \(bar.com\) not in certificate. Action=Terminate server=bar.com\(127.0.0.1\)", "Failure on missing name for bar.com")
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
import datetime
meta = MetaData()
resources = [
'instances',
'cores',
'volumes',
'gigabytes',
'floating_ips',
'metadata_items',
]
def old_style_quotas_table(name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
default=datetime.datetime.utcnow),
Column('updated_at', DateTime(),
onupdate=datetime.datetime.utcnow),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), default=False),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('instances', Integer()),
Column('cores', Integer()),
Column('volumes', Integer()),
Column('gigabytes', Integer()),
Column('floating_ips', Integer()),
Column('metadata_items', Integer()),
)
def new_style_quotas_table(name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
default=datetime.datetime.utcnow),
Column('updated_at', DateTime(),
onupdate=datetime.datetime.utcnow),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), default=False),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('resource',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=False),
Column('hard_limit', Integer(), nullable=True),
)
def existing_quotas_table(migrate_engine):
return Table('quotas', meta, autoload=True, autoload_with=migrate_engine)
def _assert_no_duplicate_project_ids(quotas):
project_ids = set()
message = ('There are multiple active quotas for project "%s" '
'(among others, possibly). '
'Please resolve all ambiguous quotas before '
'reattempting the migration.')
for quota in quotas:
assert quota.project_id not in project_ids, message % quota.project_id
project_ids.add(quota.project_id)
def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
"""Ensure that there are no duplicate non-deleted quota entries."""
select = quotas.select().where(quotas.c.deleted == False)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))
def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
"""Ensure that there are no duplicate non-deleted quota entries."""
for resource in resources:
select = quotas.select().\
where(quotas.c.deleted == False).\
where(quotas.c.resource == resource)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))
def convert_forward(migrate_engine, old_quotas, new_quotas):
quotas = list(migrate_engine.execute(old_quotas.select()))
for quota in quotas:
for resource in resources:
hard_limit = getattr(quota, resource)
if hard_limit is None:
continue
insert = new_quotas.insert().values(
created_at=quota.created_at,
updated_at=quota.updated_at,
deleted_at=quota.deleted_at,
deleted=quota.deleted,
project_id=quota.project_id,
resource=resource,
hard_limit=hard_limit)
migrate_engine.execute(insert)
def earliest(date1, date2):
if date1 is None and date2 is None:
return None
if date1 is None:
return date2
if date2 is None:
return date1
if date1 < date2:
return date1
return date2
def latest(date1, date2):
if date1 is None and date2 is None:
return None
if date1 is None:
return date2
if date2 is None:
return date1
if date1 > date2:
return date1
return date2
def convert_backward(migrate_engine, old_quotas, new_quotas):
quotas = {}
for quota in migrate_engine.execute(new_quotas.select()):
if (quota.resource not in resources
or quota.hard_limit is None or quota.deleted):
continue
if not quota.project_id in quotas:
quotas[quota.project_id] = {
'project_id': quota.project_id,
'created_at': quota.created_at,
'updated_at': quota.updated_at,
quota.resource: quota.hard_limit
}
else:
quotas[quota.project_id]['created_at'] = earliest(
quota.created_at, quotas[quota.project_id]['created_at'])
quotas[quota.project_id]['updated_at'] = latest(
quota.updated_at, quotas[quota.project_id]['updated_at'])
quotas[quota.project_id][quota.resource] = quota.hard_limit
for quota in quotas.itervalues():
insert = old_quotas.insert().values(**quota)
migrate_engine.execute(insert)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
old_quotas = existing_quotas_table(migrate_engine)
assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
new_quotas = new_style_quotas_table('quotas_new')
new_quotas.create()
convert_forward(migrate_engine, old_quotas, new_quotas)
old_quotas.drop()
new_quotas.rename('quotas')
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
new_quotas = existing_quotas_table(migrate_engine)
assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
old_quotas = old_style_quotas_table('quotas_old')
old_quotas.create()
convert_backward(migrate_engine, old_quotas, new_quotas)
new_quotas.drop()
old_quotas.rename('quotas')
|
|
__all__=(
'Ean13BarcodeWidget','isEanString',
'Ean8BarcodeWidget', 'UPCA', 'Ean5BarcodeWidget', 'ISBNBarcodeWidget',
)
from reportlab.graphics.shapes import Group, String, Rect
from reportlab.lib import colors
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.lib.validators import isNumber, isColor, isString, Validator, isBoolean, NoneOr
from reportlab.lib.attrmap import *
from reportlab.graphics.charts.areas import PlotArea
from reportlab.lib.units import mm
from reportlab.lib.utils import asNative
#work out a list of manufacturer codes....
_eanNumberSystems = [
('00-13', 'USA & Canada'),
('20-29', 'In-Store Functions'),
('30-37', 'France'),
('40-44', 'Germany'),
('45', 'Japan (also 49)'),
('46', 'Russian Federation'),
('471', 'Taiwan'),
('474', 'Estonia'),
('475', 'Latvia'),
('477', 'Lithuania'),
('479', 'Sri Lanka'),
('480', 'Philippines'),
('482', 'Ukraine'),
('484', 'Moldova'),
('485', 'Armenia'),
('486', 'Georgia'),
('487', 'Kazakhstan'),
('489', 'Hong Kong'),
('49', 'Japan (JAN-13)'),
('50', 'United Kingdom'),
('520', 'Greece'),
('528', 'Lebanon'),
('529', 'Cyprus'),
('531', 'Macedonia'),
('535', 'Malta'),
('539', 'Ireland'),
('54', 'Belgium & Luxembourg'),
('560', 'Portugal'),
('569', 'Iceland'),
('57', 'Denmark'),
('590', 'Poland'),
('594', 'Romania'),
('599', 'Hungary'),
('600-601', 'South Africa'),
('609', 'Mauritius'),
('611', 'Morocco'),
('613', 'Algeria'),
('619', 'Tunisia'),
('622', 'Egypt'),
('625', 'Jordan'),
('626', 'Iran'),
('64', 'Finland'),
('690-692', 'China'),
('70', 'Norway'),
('729', 'Israel'),
('73', 'Sweden'),
('740', 'Guatemala'),
('741', 'El Salvador'),
('742', 'Honduras'),
('743', 'Nicaragua'),
('744', 'Costa Rica'),
('746', 'Dominican Republic'),
('750', 'Mexico'),
('759', 'Venezuela'),
('76', 'Switzerland'),
('770', 'Colombia'),
('773', 'Uruguay'),
('775', 'Peru'),
('777', 'Bolivia'),
('779', 'Argentina'),
('780', 'Chile'),
('784', 'Paraguay'),
('785', 'Peru'),
('786', 'Ecuador'),
('789', 'Brazil'),
('80-83', 'Italy'),
('84', 'Spain'),
('850', 'Cuba'),
('858', 'Slovakia'),
('859', 'Czech Republic'),
('860', 'Yugloslavia'),
('869', 'Turkey'),
('87', 'Netherlands'),
('880', 'South Korea'),
('885', 'Thailand'),
('888', 'Singapore'),
('890', 'India'),
('893', 'Vietnam'),
('899', 'Indonesia'),
('90-91', 'Austria'),
('93', 'Australia'),
('94', 'New Zealand'),
('955', 'Malaysia'),
('977', 'International Standard Serial Number for Periodicals (ISSN)'),
('978', 'International Standard Book Numbering (ISBN)'),
('979', 'International Standard Music Number (ISMN)'),
('980', 'Refund receipts'),
('981-982', 'Common Currency Coupons'),
('99', 'Coupons')
]
manufacturerCodes = {}
for (k, v) in _eanNumberSystems:
words = k.split('-')
if len(words)==2:
fromCode = int(words[0])
toCode = int(words[1])
for code in range(fromCode, toCode+1):
manufacturerCodes[code] = v
else:
manufacturerCodes[int(k)] = v
def nDigits(n):
class _ndigits(Validator):
def test(self,x):
return type(x) is str and len(x)<=n and len([c for c in x if c in "0123456789"])==n
return _ndigits()
class Ean13BarcodeWidget(PlotArea):
codeName = "EAN13"
_attrMap = AttrMap(BASE=PlotArea,
value = AttrMapValue(nDigits(12), desc='the number'),
fontName = AttrMapValue(isString, desc='fontName'),
fontSize = AttrMapValue(isNumber, desc='font size'),
x = AttrMapValue(isNumber, desc='x-coord'),
y = AttrMapValue(isNumber, desc='y-coord'),
barFillColor = AttrMapValue(isColor, desc='bar color'),
barHeight = AttrMapValue(isNumber, desc='Height of bars.'),
barWidth = AttrMapValue(isNumber, desc='Width of bars.'),
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
barStrokeColor = AttrMapValue(isColor, desc='Color of bar borders.'),
textColor = AttrMapValue(isColor, desc='human readable text color'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
quiet = AttrMapValue(isBoolean, desc='if quiet zone to be used'),
lquiet = AttrMapValue(isBoolean, desc='left quiet zone length'),
rquiet = AttrMapValue(isBoolean, desc='right quiet zone length'),
)
_digits=12
_start_right = 7 #for ean-13 left = [0:7] right=[7:13]
_nbars = 113
barHeight = 25.93*mm #millimeters
barWidth = (37.29/_nbars)*mm
humanReadable = 1
_0csw = 1
_1csw = 3
#Left Hand Digits.
_left = ( ("0001101", "0011001", "0010011", "0111101",
"0100011", "0110001", "0101111", "0111011",
"0110111", "0001011",
), #odd left hand digits
("0100111", "0110011", "0011011", "0100001",
"0011101", "0111001", "0000101", "0010001",
"0001001", "0010111"), #even left hand digits
)
_right = ("1110010", "1100110", "1101100", "1000010",
"1011100", "1001110", "1010000", "1000100",
"1001000", "1110100")
quiet = 1
rquiet = lquiet = None
_tail = "101"
_sep = "01010"
_lhconvert={
"0": (0,0,0,0,0,0),
"1": (0,0,1,0,1,1),
"2": (0,0,1,1,0,1),
"3": (0,0,1,1,1,0),
"4": (0,1,0,0,1,1),
"5": (0,1,1,0,0,1),
"6": (0,1,1,1,0,0),
"7": (0,1,0,1,0,1),
"8": (0,1,0,1,1,0),
"9": (0,1,1,0,1,0)
}
fontSize = 8 #millimeters
fontName = 'Helvetica'
textColor = barFillColor = colors.black
barStrokeColor = None
barStrokeWidth = 0
x = 0
y = 0
def __init__(self,value='123456789012',**kw):
value = str(value) if isinstance(value,int) else asNative(value)
self.value=max(self._digits-len(value),0)*'0'+value[:self._digits]
for k, v in kw.items():
setattr(self, k, v)
width = property(lambda self: self.barWidth*(self._nbars-18+self._calc_quiet(self.lquiet)+self._calc_quiet(self.rquiet)))
def wrap(self,aW,aH):
return self.width,self.barHeight
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left
z = ord('0')
for i,c in enumerate(s[1:self._start_right]):
a(_left[cp[i]][ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<55) or (57<i<101))
def _calc_quiet(self,v):
if self.quiet:
if v is None:
v = 9
else:
x = float(max(v,0))/self.barWidth
v = int(x)
if v-x>0: v += 1
else:
v = 0
return v
def draw(self):
g = Group()
gAdd = g.add
barWidth = self.barWidth
width = self.width
barHeight = self.barHeight
x = self.x
y = self.y
gAdd(Rect(x,y,width,barHeight,fillColor=None,strokeColor=None,strokeWidth=0))
s = self.value+self._checkdigit(self.value)
self._lquiet = lquiet = self._calc_quiet(self.lquiet)
rquiet = self._calc_quiet(self.rquiet)
b = [lquiet*'0',self._tail] #the signal string
a = b.append
self._encode_left(s,a)
a(self._sep)
z = ord('0')
_right = self._right
for c in s[self._start_right:]:
a(_right[ord(c)-z])
a(self._tail)
a(rquiet*'0')
fontSize = self.fontSize
barFillColor = self.barFillColor
barStrokeWidth = self.barStrokeWidth
barStrokeColor = self.barStrokeColor
fth = fontSize*1.2
b = ''.join(b)
lrect = None
for i,c in enumerate(b):
if c=="1":
dh = self._short_bar(i) and fth or 0
yh = y+dh
if lrect and lrect.y==yh:
lrect.width += barWidth
else:
lrect = Rect(x,yh,barWidth,barHeight-dh,fillColor=barFillColor,strokeWidth=barStrokeWidth,strokeColor=barStrokeColor)
gAdd(lrect)
else:
lrect = None
x += barWidth
if self.humanReadable: self._add_human_readable(s,gAdd)
return g
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
c = s[0]
w = stringWidth(c,fontName,fontSize)
x = self.x+barWidth*(self._lquiet-8)
y = self.y + 0.2*fth
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
x = self.x + (33-9+self._lquiet)*barWidth
c = s[1:7]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 47*barWidth
c = s[7:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
def _checkdigit(cls,num):
z = ord('0')
iSum = cls._0csw*sum([(ord(x)-z) for x in num[::2]]) \
+ cls._1csw*sum([(ord(x)-z) for x in num[1::2]])
return chr(z+((10-(iSum%10))%10))
_checkdigit=classmethod(_checkdigit)
class Ean8BarcodeWidget(Ean13BarcodeWidget):
codeName = "EAN8"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(7), desc='the number'),
)
_start_right = 4 #for ean-13 left = [0:7] right=[7:13]
_nbars = 85
_digits=7
_0csw = 3
_1csw = 1
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left[0]
z = ord('0')
for i,c in enumerate(s[0:self._start_right]):
a(_left[ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<41) or (43<i<73))
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
y = self.y + 0.2*fth
x = (26.5-9+self._lquiet)*barWidth
c = s[0:4]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x = (59.5-9+self._lquiet)*barWidth
c = s[4:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
class UPCA(Ean13BarcodeWidget):
codeName = "UPCA"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(11), desc='the number'),
)
_start_right = 6
_digits = 11
_0csw = 3
_1csw = 1
_nbars = 1+7*11+2*3+5
#these methods contributed by Kyle Macfarlane
#https://bitbucket.org/kylemacfarlane/
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left[0]
z = ord('0')
for i,c in enumerate(s[0:self._start_right]):
a(_left[ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((18<i<55) or (57<i<93))
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
c = s[0]
w = stringWidth(c,fontName,fontSize)
x = self.x+barWidth*(self._lquiet-8)
y = self.y + 0.2*fth
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
x = self.x + (38-9+self._lquiet)*barWidth
c = s[1:6]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 36*barWidth
c = s[6:11]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 32*barWidth
c = s[11]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
class Ean5BarcodeWidget(Ean13BarcodeWidget):
"""
EAN-5 barcodes can print the human readable price, set:
price=True
"""
codeName = "EAN5"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
price=AttrMapValue(isBoolean,
desc='whether to display the price or not'),
value=AttrMapValue(nDigits(5), desc='the number'),
)
_nbars = 48
_digits = 5
_sep = '01'
_tail = '01011'
_0csw = 3
_1csw = 9
_lhconvert = {
"0": (1, 1, 0, 0, 0),
"1": (1, 0, 1, 0, 0),
"2": (1, 0, 0, 1, 0),
"3": (1, 0, 0, 0, 1),
"4": (0, 1, 1, 0, 0),
"5": (0, 0, 1, 1, 0),
"6": (0, 0, 0, 1, 1),
"7": (0, 1, 0, 1, 0),
"8": (0, 1, 0, 0, 1),
"9": (0, 0, 1, 0, 1)
}
def _checkdigit(cls, num):
z = ord('0')
iSum = cls._0csw * sum([(ord(x) - z) for x in num[::2]]) \
+ cls._1csw * sum([(ord(x) - z) for x in num[1::2]])
return chr(z + iSum % 10)
def _encode_left(self, s, a):
check = self._checkdigit(s)
cp = self._lhconvert[check]
_left = self._left
_sep = self._sep
z = ord('0')
full_code = []
for i, c in enumerate(s):
full_code.append(_left[cp[i]][ord(c) - z])
a(_sep.join(full_code))
def _short_bar(self, i):
i += 9 - self._lquiet
return self.humanReadable and ((12 < i < 41) or (43 < i < 73))
def _add_human_readable(self, s, gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize * 1.2
# draw the num below the line.
y = self.y + 0.2 * fth
x = self.x + (self._nbars + self._lquiet * 2) * barWidth / 2
gAdd(String(x, y, s, fontName=fontName, fontSize=fontSize,
fillColor=textColor, textAnchor='middle'))
price = getattr(self,'price',None)
if price:
price = None
if s[0] in '3456':
price = '$'
elif s[0] in '01':
price = '\xc2\xa3'
if price is None:
return
price += s[1:3] + '.' + s[3:5]
y += self.barHeight
gAdd(String(x, y, price, fontName=fontName, fontSize=fontSize,
fillColor=textColor, textAnchor='middle'))
def draw(self):
g = Group()
gAdd = g.add
barWidth = self.barWidth
width = self.width
barHeight = self.barHeight
x = self.x
y = self.y
gAdd(Rect(x, y, width, barHeight, fillColor=None, strokeColor=None,
strokeWidth=0))
s = self.value
self._lquiet = lquiet = self._calc_quiet(self.lquiet)
rquiet = self._calc_quiet(self.rquiet)
b = [lquiet * '0' + self._tail] # the signal string
a = b.append
self._encode_left(s, a)
a(rquiet * '0')
fontSize = self.fontSize
barFillColor = self.barFillColor
barStrokeWidth = self.barStrokeWidth
barStrokeColor = self.barStrokeColor
fth = fontSize * 1.2
b = ''.join(b)
lrect = None
for i, c in enumerate(b):
if c == "1":
dh = fth
yh = y + dh
if lrect and lrect.y == yh:
lrect.width += barWidth
else:
lrect = Rect(x, yh, barWidth, barHeight - dh,
fillColor=barFillColor,
strokeWidth=barStrokeWidth,
strokeColor=barStrokeColor)
gAdd(lrect)
else:
lrect = None
x += barWidth
if self.humanReadable:
self._add_human_readable(s, gAdd)
return g
class ISBNBarcodeWidget(Ean13BarcodeWidget):
"""
ISBN Barcodes optionally print the EAN-5 supplemental price
barcode (with the price in dollars or pounds). Set price to a string
that follows the EAN-5 for ISBN spec:
leading digit 0, 1 = GBP
3 = AUD
4 = NZD
5 = USD
6 = CAD
next 4 digits = price between 00.00 and 99.98, i.e.:
price='52499' # $24.99 USD
"""
codeName = 'ISBN'
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
price=AttrMapValue(
NoneOr(nDigits(5)),
desc='None or the price to display'),
)
def draw(self):
g = Ean13BarcodeWidget.draw(self)
price = getattr(self,'price',None)
if not price:
return g
bounds = g.getBounds()
x = bounds[2]
pricecode = Ean5BarcodeWidget(x=x, value=price, price=True,
humanReadable=True,
barHeight=self.barHeight, quiet=self.quiet)
g.add(pricecode)
return g
def _add_human_readable(self, s, gAdd):
Ean13BarcodeWidget._add_human_readable(self,s, gAdd)
barWidth = self.barWidth
barHeight = self.barHeight
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize * 1.2
y = self.y + 0.2 * fth + barHeight
x = self._lquiet * barWidth
isbn = 'ISBN '
segments = [s[0:3], s[3:4], s[4:9], s[9:12], s[12]]
isbn += '-'.join(segments)
gAdd(String(x, y, isbn, fontName=fontName, fontSize=fontSize,
fillColor=textColor))
|
|
# Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import Callable
from unittest.mock import Mock
from tests.base import SpaceTest
from .base import (LibModelTest, ModelObjectTest, StateMixinTest,
EqualityMixinTest)
from lib.error import ObjectNotFound
from lib.model import building
from lib import model
class TestLibModelBuilding(LibModelTest):
def setUp(self):
self.expected_exports = [
building.Mine, building.SolarPowerPlant, building.ALL_BUILDINGS,
building.get_building, building.get_all_building_names,
building.get_all_building_abbr]
class TestBuildingModule(SpaceTest):
def setUp(self):
self.expected_building_classes = ['Mine', 'SolarPowerPlant']
self.expected_building_names = ['Mine', 'Solar Power Plant']
self.expected_building_abbrs = ['Mn', 'SPP']
def _get_building_subclass_count(self):
building_subclass_count = 0
for attr in dir(building):
obj = getattr(building, attr, None)
if not isinstance(obj, type):
continue
if obj and obj.__base__ is building.Building:
building_subclass_count += 1
return building_subclass_count
def test_expected_vars(self):
"""Try to catch developer ommissions"""
subclass_count = self._get_building_subclass_count()
self.assertEqual(subclass_count, len(self.expected_building_classes))
self.assertEqual(subclass_count, len(self.expected_building_names))
self.assertEqual(subclass_count, len(self.expected_building_abbrs))
def test_all_buildings_list(self):
self.assertEqual(len(self.expected_building_classes),
len(building.ALL_BUILDINGS))
all_building_names = [bld.__name__ for bld in building.ALL_BUILDINGS]
for bldng in self.expected_building_classes:
self.assertIn(bldng, all_building_names)
def test_get_all_building_names(self):
all_building_names = building.get_all_building_names()
self.assertEqual(set(all_building_names),
set(self.expected_building_names))
def test_get_all_building_abbr(self):
all_building_abbr = building.get_all_building_abbr()
self.assertEqual(set(all_building_abbr),
set(self.expected_building_abbrs))
def test_get_building_not_found(self):
self.assertRaises(ObjectNotFound, building.get_building, 'flabber')
def test_get_building_type_no_level(self):
for building_type in building.ALL_BUILDINGS:
test_val = building.get_building(building_type)
self.assertEqual(building_type, test_val)
def test_get_building_name_no_level(self):
for building_type in building.ALL_BUILDINGS:
test_val = building.get_building(building_type.name)
self.assertEqual(building_type, test_val)
def test_get_building_abbr_no_level(self):
for building_type in building.ALL_BUILDINGS:
test_val = building.get_building(building_type.abbr)
self.assertEqual(building_type, test_val)
def test_get_building_type(self):
level = 1
for building_type in building.ALL_BUILDINGS:
test_val = building.get_building(building_type, level=level)
self.assertIsInstance(test_val, building_type)
def test_get_building_name(self):
level = 1
for building_type in building.ALL_BUILDINGS:
test_val = building.get_building(building_type.name, level=level)
self.assertIsInstance(test_val, building_type)
def test_get_building_abbr(self):
level = 1
for building_type in building.ALL_BUILDINGS:
test_val = building.get_building(building_type.abbr, level=level)
self.assertIsInstance(test_val, building_type)
def test_are_requirements_met_resources(self):
site = Mock(spec=model.Planet)
bld = building.Mine
site.resources = model.Resources()
self.assertFalse(bld.are_requirements_met(site, level=1))
site.resources.ore = 11
self.assertTrue(bld.are_requirements_met(site))
def test_are_requirements_met_buildings(self):
self.skipTest('NI: Need to have buildings with building '
'requirements to test.')
def test_are_requirements_met_research(self):
self.skipTest('NI: Need to have buildings with research '
'requirements to test.')
class TestBuildingRequirements(ModelObjectTest, StateMixinTest):
def get_new_instance(self):
return building.BuildingRequirements()
def get_tst_state(self):
return (model.Resources(ore=3, metal=11), {'Mine', 3}, {})
def setUp(self):
self.object = self.get_new_instance()
self.expected_attrs = {'resources': model.Resources,
'research': dict,
'buildings': dict}
self.classname_in_repr = True
self.expected_state = (model.Resources, dict, dict)
def test_repr(self):
super().test_repr()
rep = repr(self.object)
self.assertTrue(rep.startswith('BuildingRequirements('))
self.assertTrue(rep.endswith(')'))
self.assertEqual(0, rep.count('\n'))
self.assert_attrs_in_string(rep)
def test_str(self):
str(self.object)
super().test_str()
class TestBuildingBaseClass(ModelObjectTest, EqualityMixinTest):
def get_new_instance(self, level=None):
return building.Building(level=level)
def setUp(self):
self.max_level = 1000 # max level to consider for these tests
self.sun_energy = 1 # must return int > 0
self.level = random.randint(0, self.max_level)
self.expected_state = (int, Callable)
self.expected_attrs = {'level': int, 'modifier': model.Resources,
'requirements': building.BuildingRequirements,
'name': str, 'abbr': str,
"under_construction": bool}
self.expected_modifier_type = model.Resources
self.expected_requirements_type = building.BuildingRequirements
self.object = self.get_new_instance()
self.classname_in_repr = True
def filter_presentation_attrs(self):
self.expected_attrs.pop('name')
self.expected_attrs.pop('abbr')
def test_str(self):
self.filter_presentation_attrs()
super().test_str()
def test_repr(self):
self.filter_presentation_attrs()
super().test_repr()
def get_equal_tst_values(self):
self.object = self.get_new_instance(self.level)
return self.get_new_instance(self.level)
def get_non_equal_tst_values(self):
self.object = self.get_new_instance(self.level)
return self.get_new_instance(self.level+1)
def test_constructor(self):
no_arg = self.get_new_instance()
self.assertEqual(1, no_arg.level)
arg = self.get_new_instance(level=self.level)
self.assertEqual(self.level, arg.level)
def test_modifier(self):
self.assertIsInstance(self.object.modifier,
self.expected_modifier_type)
def test_electricity(self):
self.assertGreaterEqual(self.object.electricity(self.sun_energy), 0)
def test_requirements(self):
self.assertIsInstance(self.object.requirements,
self.expected_requirements_type)
def test_compare(self):
"""
Although value is based on level, modifier and electricity values
the modifier and electricity values are calculated based on level.
"""
test_obj = self.get_non_equal_tst_values()
test_val = self.object._compare(test_obj)
assert_a, _ = self.get_equality_assert_methods()
assert_a(test_val < 0)
test_val = test_obj._compare(self.object)
assert_a(test_val > 0)
class TestMine(TestBuildingBaseClass):
def get_new_instance(self, level=None):
return building.Mine(level=level)
def setUp(self):
super().setUp()
self.disable_prediction = False
self.negative_equality_logic = False
def test_electricity(self):
self.assertLessEqual(self.object.electricity(self.sun_energy), 0)
def predict_avg(self):
low = self.get_new_instance(self.level)
high = self.get_new_instance(self.level+1)
retval = ((high.level - low.level) +
(high.modifier.trade_value - low.modifier.trade_value)) / 2.0
return retval
def get_non_equal_tst_values(self):
if not self.disable_prediction:
avg = self.predict_avg()
self.negative_equality_logic = ((avg < 0) is True)
return super().get_non_equal_tst_values()
def test_not_equal_eq_ne(self):
self.disable_prediction = True
super().test_not_equal_eq_ne()
def test_compare(self):
self.disable_prediction = False
super().test_compare()
class TestSolarPowerPlant(TestBuildingBaseClass):
def get_new_instance(self, level=None):
return building.SolarPowerPlant(level=level)
def test_electricity(self):
for level in range(1000):
self.level = level
test_spp = self.get_non_equal_tst_values()
self.assertLessEqual(self.object.electricity(self.sun_energy),
test_spp.electricity(self.sun_energy))
|
|
import sublime, sublime_plugin, sys, os, shutil, re, subprocess
from configuration import ConfigurationReader
class ProjectMakerCommand(sublime_plugin.WindowCommand):
def run(self):
settings = sublime.load_settings("yeoman.sublime-settings")
self.non_parsed = settings.get("non_parsed")
self.plugin_path = os.path.join(sublime.packages_path(), "SublimeYeoman")
self.templates_path = os.path.join(self.plugin_path, "Templates")
self.template_names = []
self.choose_template()
def choose_template(self):
files = self.get_templates()
for file_name in files:
if os.path.isdir(os.path.join(self.templates_path, file_name)):
self.template_names.append(file_name)
self.window.show_quick_panel(self.template_names, self.on_template_chosen)
def get_templates(self):
files = os.listdir(self.templates_path)
files = [(f.lower(), f) for f in files]
return [f[1] for f in sorted(files)]
def on_template_chosen(self, index):
self.chosen_template_name = self.template_names[index]
self.chosen_template_path = os.path.join(self.templates_path, self.chosen_template_name)
self.get_project_path()
def get_project_path(self):
self.project_name = "My" + self.chosen_template_name + "Project"
if sublime.platform() == "windows":
default_project_path = os.path.expanduser("~\\My Documents\\" + self.project_name)
else:
default_project_path = os.path.expanduser("~/Documents/" + self.project_name)
self.window.show_input_panel("Project Location:", default_project_path, self.on_project_path, None, None)
def get_sublime_path(self):
if sublime.platform() == 'osx':
return '/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl'
if sublime.platform() == 'linux':
return open('/proc/self/cmdline').read().split(chr(0))[0]
return sys.executable
def load_app(self,args):
args.insert(0, self.get_sublime_path())
return subprocess.Popen(args)
def on_project_path(self, path):
self.project_path = path
self.project_name = os.path.basename(self.project_path)
if os.path.exists(self.project_path):
sublime.error_message("Something already exists at " + self.project_path)
else:
self.create_project()
def create_project(self):
self.copy_project()
self.get_tokens(self.project_path);
self.get_token_values()
def copy_project(self):
shutil.copytree(self.chosen_template_path, self.project_path)
def get_tokens(self, path):
self.tokens = []
self.tokenized_files = []
self.tokenized_titles = []
self.get_tokens_from_path(path)
def get_tokens_from_path(self, path):
files = os.listdir(path)
for file_name in files:
ext = os.path.splitext(file_name)[1];
if ext in self.non_parsed:
continue
file_path = os.path.join(path, file_name)
self.get_token_from_file_name(path, file_name)
if os.path.isdir(file_path):
self.get_tokens_from_path(file_path)
else:
self.get_tokens_from_file(file_path)
def get_token_from_file_name(self, path, file_name):
dot_index = file_name.find(".")
if file_name[0:1] == "_" and file_name[dot_index-1:dot_index] == "_":
file_path = os.path.join(path, file_name)
self.tokenized_titles.append(file_path)
token = file_name[1:dot_index-1]
if not token in self.tokens:
self.tokens.append(token)
def get_tokens_from_file(self, file_path):
file_ref = open(file_path, "rU")
content = file_ref.read()
file_ref.close()
#r = re.compile(r"\${[^}]*}")
r = re.compile(r"\${{[^}]*}}")
matches = r.findall(content)
if len(matches) > 0:
self.tokenized_files.append(file_path)
for match in matches:
token = match[2:-1]
if not token in self.tokens:
self.tokens.append(token)
def get_token_values(self):
self.token_values = []
self.token_index = 0
self.get_next_token_value()
def get_next_token_value(self):
# are there any tokens left?
if self.token_index < len(self.tokens):
token = self.tokens[self.token_index]
# built-in values (may need to extract these):
if token == "project_path":
self.token_values.append((token, self.project_path))
self.token_index += 1
self.get_next_token_value()
elif token == "project_name":
self.token_values.append((token, self.project_name))
self.token_index += 1
self.get_next_token_value()
# custom token. get value from user:
else:
self.window.show_input_panel("Value for token \"" + token + "\"", "", self.on_token_value, None, None)
else:
# all done. do replacements
self.customize_project()
def on_token_value(self, token_value):
self.token_values.append((self.tokens[self.token_index], token_value));
self.token_index += 1
self.get_next_token_value()
def customize_project(self):
index_path = self.project_path+"/app/index.html"
self.replace_tokens()
self.rename_files()
self.find_project_file()
self.read_configuration()
self.load_app(['-a',self.project_path])
if os.path.exists(index_path):
self.window.open_file(index_path)
def replace_tokens(self):
for file_path in self.tokenized_files:
self.replace_tokens_in_file(file_path)
def replace_tokens_in_file(self, file_path):
file_ref = open(file_path, "rU")
template = file_ref.read()
file_ref.close()
for token, value in self.token_values:
r = re.compile(r"\${" + token + "}")
template = r.sub(value, template)
file_ref = open(file_path, "w")
file_ref.write(template)
file_ref.close()
def rename_files(self):
for file_path in self.tokenized_titles:
for token, value in self.token_values:
# we do NOT want to use a full path for a single file name!
if token != "project_path":
r = re.compile(r"_" + token + "_")
if r.search(file_path):
os.rename(file_path, r.sub(value, file_path))
break
def find_project_file(self):
files = os.listdir(self.project_path)
r = re.compile(r".*\.sublime-project")
self.project_file = None
for file_name in files:
if r.search(file_name):
self.project_file = os.path.join(self.project_path, file_name)
if self.project_file == None:
self.create_project_file()
def create_project_file(self):
file_name = self.project_name + ".sublime-project"
self.project_file = os.path.join(self.project_path, file_name)
file_ref = open(self.project_file, "w")
file_ref.write(("{\n"
" \"folders\":\n"
" [\n"
" {\n"
" \"path\": \".\"\n"
" }\n"
" ]\n"
"}\n"));
file_ref.close()
def read_configuration(self):
config_file = os.path.join(self.chosen_template_path, 'config.json')
if os.path.exists(config_file):
ConfigurationReader().read(config_file, self.project_path)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing volumes.
"""
import json
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.template.defaultfilters import slugify # noqa
from django.utils.decorators import method_decorator
from django.utils import encoding
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import cache_control
from django.views.decorators.cache import never_cache
from django.views import generic
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import exceptions as dashboard_exception
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
from openstack_dashboard.dashboards.project.volumes \
.volumes import forms as project_forms
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables as project_tables
from openstack_dashboard.dashboards.project.volumes \
.volumes import tabs as project_tabs
class DetailView(tabs.TabView):
tab_group_class = project_tabs.VolumeDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ volume.name|default:volume.id }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
volume = self.get_data()
table = project_tables.VolumesTable(self.request)
context["volume"] = volume
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(volume)
choices = project_tables.VolumesTableBase.STATUS_DISPLAY_CHOICES
volume.status_label = filters.get_display_label(choices, volume.status)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
snapshots = cinder.volume_snapshot_list(
self.request, search_opts={'volume_id': volume.id})
if snapshots:
setattr(volume, 'has_snapshot', True)
for att in volume.attachments:
att['instance'] = api.nova.server_get(self.request,
att['server_id'])
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return volume
def get_redirect_url(self):
return reverse('horizon:project:volumes:index')
def get_tabs(self, request, *args, **kwargs):
volume = self.get_data()
return self.tab_group_class(request, volume=volume, **kwargs)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateForm
modal_header = _("Create Volume")
template_name = 'project/volumes/volumes/create.html'
submit_label = _("Create Volume")
submit_url = reverse_lazy("horizon:project:volumes:volumes:create")
success_url = reverse_lazy('horizon:project:volumes:volumes_tab')
page_title = _("Create a Volume")
def get_initial(self):
initial = super(CreateView, self).get_initial()
self.default_vol_type = None
try:
self.default_vol_type = cinder.volume_type_default(self.request)
initial['type'] = self.default_vol_type.name
except dashboard_exception.NOT_FOUND:
pass
return initial
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_limit_usages(self.request)
context['volume_types'] = self._get_volume_types()
except Exception:
exceptions.handle(self.request)
return context
def _get_volume_types(self):
volume_types = []
try:
volume_types = cinder.volume_type_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume type list.'))
# check if we have default volume type so we can present the
# description of no volume type differently
no_type_description = None
if self.default_vol_type is None:
message = \
_("If \"No volume type\" is selected, the volume will be "
"created without a volume type.")
no_type_description = encoding.force_text(message)
type_descriptions = [{'name': '',
'description': no_type_description}] + \
[{'name': type.name,
'description': getattr(type, "description", "")}
for type in volume_types]
return json.dumps(type_descriptions)
class ExtendView(forms.ModalFormView):
form_class = project_forms.ExtendForm
modal_header = _("Extend Volume")
template_name = 'project/volumes/volumes/extend.html'
submit_label = _("Extend Volume")
submit_url = "horizon:project:volumes:volumes:extend"
success_url = reverse_lazy("horizon:project:volumes:index")
page_title = _("Extend Volume")
def get_object(self):
if not hasattr(self, "_object"):
volume_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return self._object
def get_context_data(self, **kwargs):
context = super(ExtendView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
usages = quotas.tenant_limit_usages(self.request)
usages['gigabytesUsed'] = (usages['gigabytesUsed']
- context['volume'].size)
context['usages'] = usages
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
volume = self.get_object()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'orig_size': volume.size}
class CreateSnapshotView(forms.ModalFormView):
form_class = project_forms.CreateSnapshotForm
modal_header = _("Create Volume Snapshot")
template_name = 'project/volumes/volumes/create_snapshot.html'
submit_url = "horizon:project:volumes:volumes:create_snapshot"
success_url = reverse_lazy('horizon:project:volumes:snapshots_tab')
page_title = _("Create a Volume Snapshot")
def get_context_data(self, **kwargs):
context = super(CreateSnapshotView, self).get_context_data(**kwargs)
context['volume_id'] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
volume = cinder.volume_get(self.request, context['volume_id'])
if (volume.status == 'in-use'):
context['attached'] = True
context['form'].set_warning(_("This volume is currently "
"attached to an instance. "
"In some cases, creating a "
"snapshot from an attached "
"volume can result in a "
"corrupted snapshot."))
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return context
def get_initial(self):
return {'volume_id': self.kwargs["volume_id"]}
class UploadToImageView(forms.ModalFormView):
form_class = project_forms.UploadToImageForm
modal_header = _("Upload Volume to Image")
template_name = 'project/volumes/volumes/upload_to_image.html'
submit_label = _("Upload")
submit_url = "horizon:project:volumes:volumes:upload_to_image"
success_url = reverse_lazy("horizon:project:volumes:index")
page_title = _("Upload Volume to Image")
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
error_message = _(
'Unable to retrieve volume information for volume: "%s"') \
% volume_id
exceptions.handle(self.request,
error_message,
redirect=self.success_url)
return volume
def get_context_data(self, **kwargs):
context = super(UploadToImageView, self).get_context_data(**kwargs)
context['volume'] = self.get_data()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
volume = self.get_data()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'status': volume.status}
class CreateTransferView(forms.ModalFormView):
form_class = project_forms.CreateTransferForm
template_name = 'project/volumes/volumes/create_transfer.html'
success_url = reverse_lazy('horizon:project:volumes:volumes_tab')
modal_id = "create_volume_transfer_modal"
modal_header = _("Create Volume Transfer")
submit_label = _("Create Volume Transfer")
submit_url = "horizon:project:volumes:volumes:create_transfer"
page_title = _("Create a Volume Transfer")
def get_context_data(self, *args, **kwargs):
context = super(CreateTransferView, self).get_context_data(**kwargs)
volume_id = self.kwargs['volume_id']
context['volume_id'] = volume_id
context['submit_url'] = reverse(self.submit_url, args=[volume_id])
return context
def get_initial(self):
return {'volume_id': self.kwargs["volume_id"]}
class AcceptTransferView(forms.ModalFormView):
form_class = project_forms.AcceptTransferForm
template_name = 'project/volumes/volumes/accept_transfer.html'
success_url = reverse_lazy('horizon:project:volumes:volumes_tab')
modal_id = "accept_volume_transfer_modal"
modal_header = _("Accept Volume Transfer")
submit_label = _("Accept Volume Transfer")
submit_url = reverse_lazy(
"horizon:project:volumes:volumes:accept_transfer")
page_title = _("Accept Volume Transfer")
class ShowTransferView(forms.ModalFormView):
form_class = project_forms.ShowTransferForm
template_name = 'project/volumes/volumes/show_transfer.html'
success_url = reverse_lazy('horizon:project:volumes:volumes_tab')
modal_id = "show_volume_transfer_modal"
modal_header = _("Volume Transfer")
submit_url = "horizon:project:volumes:volumes:show_transfer"
cancel_label = _("Close")
download_label = _("Download transfer credentials")
page_title = _("Volume Transfer Details")
def get_object(self):
try:
return self._object
except AttributeError:
transfer_id = self.kwargs['transfer_id']
try:
self._object = cinder.transfer_get(self.request, transfer_id)
return self._object
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume transfer.'))
def get_context_data(self, **kwargs):
context = super(ShowTransferView, self).get_context_data(**kwargs)
context['transfer_id'] = self.kwargs['transfer_id']
context['auth_key'] = self.kwargs['auth_key']
context['submit_url'] = reverse(self.submit_url, args=[
context['transfer_id'], context['auth_key']])
context['download_label'] = self.download_label
context['download_url'] = reverse(
'horizon:project:volumes:volumes:download_transfer_creds',
args=[context['transfer_id'], context['auth_key']]
)
return context
def get_initial(self):
transfer = self.get_object()
return {'id': transfer.id,
'name': transfer.name,
'auth_key': self.kwargs['auth_key']}
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
modal_header = _("Edit Volume")
modal_id = "update_volume_modal"
template_name = 'project/volumes/volumes/update.html'
submit_url = "horizon:project:volumes:volumes:update"
success_url = reverse_lazy("horizon:project:volumes:index")
page_title = _("Edit Volume")
def get_object(self):
if not hasattr(self, "_object"):
vol_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, vol_id)
except Exception:
msg = _('Unable to retrieve volume.')
url = reverse('horizon:project:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
volume = self.get_object()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'description': volume.description,
'bootable': volume.is_bootable}
class EditAttachmentsView(tables.DataTableView, forms.ModalFormView):
table_class = project_tables.AttachmentsTable
form_class = project_forms.AttachForm
form_id = "attach_volume_form"
modal_header = _("Manage Volume Attachments")
modal_id = "attach_volume_modal"
template_name = 'project/volumes/volumes/attach.html'
submit_url = "horizon:project:volumes:volumes:attach"
success_url = reverse_lazy("horizon:project:volumes:index")
page_title = _("Manage Volume Attachments")
@memoized.memoized_method
def get_object(self):
volume_id = self.kwargs['volume_id']
try:
return cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
def get_data(self):
attachments = []
volume = self.get_object()
if volume is not None:
for att in volume.attachments:
att['volume_name'] = getattr(volume, 'name', att['device'])
attachments.append(att)
return attachments
def get_initial(self):
try:
instances, has_more = api.nova.server_list(self.request)
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to retrieve attachment information."))
return {'volume': self.get_object(),
'instances': instances}
@memoized.memoized_method
def get_form(self, **kwargs):
form_class = kwargs.get('form_class', self.get_form_class())
return super(EditAttachmentsView, self).get_form(form_class)
def get_context_data(self, **kwargs):
context = super(EditAttachmentsView, self).get_context_data(**kwargs)
context['form'] = self.get_form()
volume = self.get_object()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
if volume and volume.status == 'available':
context['show_attach'] = True
else:
context['show_attach'] = False
context['volume'] = volume
if self.request.is_ajax():
context['hide'] = True
return context
def get(self, request, *args, **kwargs):
# Table action handling
handled = self.construct_tables()
if handled:
return handled
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.get(request, *args, **kwargs)
class RetypeView(forms.ModalFormView):
form_class = project_forms.RetypeForm
modal_id = "retype_volume_modal"
modal_header = _("Change Volume Type")
template_name = 'project/volumes/volumes/retype.html'
submit_label = _("Change Volume Type")
submit_url = "horizon:project:volumes:volumes:retype"
success_url = reverse_lazy("horizon:project:volumes:index")
page_title = _("Change Volume Type")
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
error_message = _(
'Unable to retrieve volume information for volume: "%s"') \
% volume_id
exceptions.handle(self.request,
error_message,
redirect=self.success_url)
return volume
def get_context_data(self, **kwargs):
context = super(RetypeView, self).get_context_data(**kwargs)
context['volume'] = self.get_data()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
volume = self.get_data()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'volume_type': volume.volume_type}
class EncryptionDetailView(generic.TemplateView):
template_name = 'project/volumes/volumes/encryption_detail.html'
page_title = _("Volume Encryption Details: {{ volume.name }}")
def get_context_data(self, **kwargs):
context = super(EncryptionDetailView, self).get_context_data(**kwargs)
volume = self.get_volume_data()
context["encryption_metadata"] = self.get_encryption_data()
context["volume"] = volume
context["page_title"] = _("Volume Encryption Details: "
"%(volume_name)s") % {'volume_name':
volume.name}
return context
@memoized.memoized_method
def get_encryption_data(self):
try:
volume_id = self.kwargs['volume_id']
self._encryption_metadata = \
cinder.volume_get_encryption_metadata(self.request,
volume_id)
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve volume encryption '
'details.'),
redirect=redirect)
return self._encryption_metadata
@memoized.memoized_method
def get_volume_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return volume
def get_redirect_url(self):
return reverse('horizon:project:volumes:index')
class DownloadTransferCreds(generic.View):
# TODO(Itxaka): Remove cache_control in django >= 1.9
# https://code.djangoproject.com/ticket/13008
@method_decorator(cache_control(max_age=0, no_cache=True,
no_store=True, must_revalidate=True))
@method_decorator(never_cache)
def get(self, request, transfer_id, auth_key):
try:
transfer = cinder.transfer_get(self.request, transfer_id)
except Exception:
transfer = None
response = http.HttpResponse(content_type='application/text')
response['Content-Disposition'] = \
'attachment; filename=%s.txt' % slugify(transfer_id)
response.write('%s: %s\n%s: %s\n%s: %s' % (
_("Transfer name"),
getattr(transfer, 'name', ''),
_("Transfer ID"),
transfer_id,
_("Authorization Key"),
auth_key))
response['Content-Length'] = str(len(response.content))
return response
|
|
import datetime
import re
try:
import Image
except ImportError:
from PIL import Image
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.contrib.sites.models import Site
import oembed
from oembed.providers import DjangoProvider, DjangoDateBasedProvider, DjangoProviderOptions
from oembed.consumer import OEmbedConsumer
from oembed.constants import OEMBED_ALLOWED_SIZES
from oembed.tests.models import Blog
from oembed.tests.oembed_providers import BlogProvider
from oembed.tests.tests.base import BaseOEmbedTestCase
class ProviderTestCase(BaseOEmbedTestCase):
def test_resource_options(self):
self.assertTrue(isinstance(BlogProvider._meta, DjangoProviderOptions))
ops = BlogProvider._meta
self.assertEqual(ops.model, Blog)
self.assertEqual(ops.date_field, 'pub_date')
self.assertEqual(ops.fields_to_match, {'entry_slug': 'slug'})
self.assertEqual(ops.named_view, 'test_blog_detail')
def test_meta_queryset_behavior(self):
provider = BlogProvider()
obj = provider.get_object('http://example.com/testapp/blog/2010/may/01/entry-1/')
blog_obj = Blog.objects.get(slug='entry-1')
self.assertEqual(obj, blog_obj)
new_obj = Blog.objects.create(
title='new entry',
author='new author',
pub_date=datetime.datetime(2010, 1, 1),
)
oembed_obj = provider.get_object('http://example.com/testapp/blog/2010/jan/01/new-entry/')
self.assertEqual(new_obj, oembed_obj)
def test_resource_object(self):
provider = BlogProvider()
resource = provider.request_resource('http://example.com/testapp/blog/2010/may/01/entry-1/')
blog_obj = Blog.objects.get(slug='entry-1')
self.assertEqual(blog_obj, resource.content_object)
def test_django_provider(self):
resource = oembed.site.embed(self.category_url)
category_data = resource.get_data()
# provider data is pulled from the sites table
self.assertEqual(category_data['provider_url'], 'http://example.com')
self.assertEqual(category_data['provider_name'], 'example.com')
# resource data is pulled from the provider
self.assertEqual(category_data['type'], 'photo')
self.assertEqual(category_data['title'], 'Category 1')
max_width, max_height = max(OEMBED_ALLOWED_SIZES)
# image data
self.assertTrue(category_data['width'] <= max_width)
self.assertTrue(category_data['height'] <= max_height)
w, h = category_data['width'], category_data['height']
image_name = 'images/test_image1_%sx%s.jpg' % (w, h)
self.assertEqual(category_data['url'], 'http://example.com/media/%s' % image_name)
# just double check to be sure it got saved here
self.assertTrue(image_name in self.storage._files)
img_buf = StringIO(self.storage._files[image_name])
img = Image.open(img_buf)
img_width, img_height = img.size
self.assertTrue(img_width == w or img_height == h)
tw, th = category_data['thumbnail_width'], category_data['thumbnail_height']
thumbnail_name = 'images/test_image1_%sx%s.jpg' % (tw, th)
self.assertEqual(category_data['thumbnail_url'], 'http://example.com/media/%s' % thumbnail_name)
self.assertTrue(thumbnail_name in self.storage._files)
img_buf = StringIO(self.storage._files[thumbnail_name])
img = Image.open(img_buf)
img_width, img_height = img.size
self.assertTrue(img_width == tw or img_height == th)
def test_django_provider_image_sizing(self):
resource = oembed.site.embed(self.category_url, maxwidth=450)
category_data = resource.get_data()
# provider data is pulled from the sites table
self.assertEqual(category_data['width'], 400)
w, h = category_data['width'], category_data['height']
self.assertEqual(category_data['url'], 'http://example.com/media/images/test_image1_%sx%s.jpg' % (w, h))
# specify both
resource = oembed.site.embed(self.category_url, maxwidth=450, maxheight=200)
category_data = resource.get_data()
self.assertEqual(category_data['height'], 200)
w, h = category_data['width'], category_data['height']
self.assertEqual(category_data['url'], 'http://example.com/media/images/test_image1_%sx%s.jpg' % (w, h))
def test_django_provider_url_match(self):
# even though the sites table has example.com having no www., the regex
# constructed should be able to correctly match the url below
resource = oembed.site.embed('http://www.example.com/testapp/category/2/')
category_data = resource.get_data()
self.assertEqual(category_data['title'], 'Category 2')
# try a https
resource = oembed.site.embed('https://www.example.com/testapp/category/2/')
category_data = resource.get_data()
self.assertEqual(category_data['title'], 'Category 2')
def test_django_datebased_provider(self):
resource = oembed.site.embed(self.blog_url)
blog_data = resource.get_data()
# provider data is pulled from the sites table
self.assertEqual(blog_data['provider_url'], 'http://example.com')
self.assertEqual(blog_data['provider_name'], 'example.com')
# resource data
self.assertEqual(blog_data['type'], 'link')
self.assertEqual(blog_data['title'], 'Entry 1')
self.assertEqual(blog_data['url'], 'http://example.com/testapp/blog/2010/may/01/entry-1/')
self.assertEqual(blog_data['author_name'], 'Charles')
def test_django_rich_provider(self):
resource = oembed.site.embed(self.rich_url)
rich_data = resource.get_data()
max_width, max_height = max(OEMBED_ALLOWED_SIZES)
# image data
self.assertTrue(rich_data['width'] <= max_width)
self.assertTrue(rich_data['height'] <= max_height)
self.assertEqual(rich_data['title'], 'Rich One')
self.assertEqual(rich_data['html'], '<h1>Rich One</h1><p>This is rich one<br />Awesome!</p>\n')
def test_meta_inheritance(self):
class BaseTestProvider(DjangoProvider):
class Meta:
abstract = True
test_attr = 'basetestprovider'
image_processor = 'someimageprocessor'
class BaseDateBasedProvider(BaseTestProvider, DjangoDateBasedProvider):
class Meta:
abstract = True
test_attr = 'basedatebasedprovider'
class BlogProviderMixin(DjangoProvider):
class Meta:
abstract = True
year_part = 'blog_year'
month_part = 'blog_month'
day_part = 'blog_day'
class BaseBlogProvider(BaseDateBasedProvider):
resource_type = 'rich'
class Meta:
abstract = True
model = Blog
test_attr = 'baseblogprovider'
class SomeBlogProvider(BaseBlogProvider):
class Meta:
named_view = 'test_blog_detail'
fields_to_match = {'blog_id': 'id'}
test_attr = 'someblogprovider'
class MixinBlogProvider(BlogProviderMixin, BaseBlogProvider):
class Meta:
named_view = 'test_blog_detail'
fields_to_match = {'blog_id': 'id'}
test_attr = 'mixinblogprovider'
ops = BaseTestProvider._meta
self.assertTrue(ops.abstract)
self.assertEqual(ops.test_attr, 'basetestprovider')
self.assertEqual(ops.image_processor, 'someimageprocessor')
ops = BaseDateBasedProvider._meta
self.assertTrue(ops.abstract)
self.assertEqual(ops.test_attr, 'basedatebasedprovider')
self.assertEqual(ops.image_processor, 'someimageprocessor')
ops = BaseBlogProvider._meta
self.assertTrue(ops.abstract)
self.assertEqual(ops.test_attr, 'baseblogprovider')
self.assertEqual(ops.image_processor, 'someimageprocessor')
self.assertEqual(ops.model, Blog)
ops = SomeBlogProvider._meta
self.assertFalse(ops.abstract)
self.assertEqual(ops.test_attr, 'someblogprovider')
self.assertEqual(ops.image_processor, 'someimageprocessor')
self.assertEqual(ops.model, Blog)
self.assertEqual(ops.fields_to_match, {'blog_id': 'id'})
ops = MixinBlogProvider._meta
self.assertFalse(ops.abstract)
self.assertEqual(ops.test_attr, 'mixinblogprovider')
self.assertEqual(ops.image_processor, 'someimageprocessor')
self.assertEqual(ops.model, Blog)
self.assertEqual(ops.fields_to_match, {'blog_id': 'id'})
self.assertEqual(ops.year_part, 'blog_year')
self.assertEqual(ops.month_part, 'blog_month')
self.assertEqual(ops.day_part, 'blog_day')
|
|
import re
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.plotting.common import (
TestPlotBase,
_check_plot_works,
)
@pytest.fixture
def hist_df():
np.random.seed(0)
df = DataFrame(np.random.randn(30, 2), columns=["A", "B"])
df["C"] = np.random.choice(["a", "b", "c"], 30)
df["D"] = np.random.choice(["a", "b", "c"], 30)
return df
@td.skip_if_no_mpl
class TestHistWithBy(TestPlotBase):
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, titles, legends",
[
("C", "A", ["a", "b", "c"], [["A"]] * 3),
("C", ["A", "B"], ["a", "b", "c"], [["A", "B"]] * 3),
("C", None, ["a", "b", "c"], [["A", "B"]] * 3),
(
["C", "D"],
"A",
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A"]] * 9,
),
(
["C", "D"],
["A", "B"],
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A", "B"]] * 9,
),
(
["C", "D"],
None,
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A", "B"]] * 9,
),
],
)
def test_hist_plot_by_argument(self, by, column, titles, legends, hist_df):
# GH 15079
axes = _check_plot_works(hist_df.plot.hist, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_legends = [
[legend.get_text() for legend in ax.get_legend().texts] for ax in axes
]
assert result_legends == legends
assert result_titles == titles
@pytest.mark.parametrize(
"by, column, titles, legends",
[
(0, "A", ["a", "b", "c"], [["A"]] * 3),
(0, None, ["a", "b", "c"], [["A", "B"]] * 3),
(
[0, "D"],
"A",
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A"]] * 9,
),
],
)
def test_hist_plot_by_0(self, by, column, titles, legends, hist_df):
# GH 15079
df = hist_df.copy()
df = df.rename(columns={"C": 0})
axes = _check_plot_works(df.plot.hist, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_legends = [
[legend.get_text() for legend in ax.get_legend().texts] for ax in axes
]
assert result_legends == legends
assert result_titles == titles
@pytest.mark.parametrize(
"by, column",
[
([], ["A"]),
([], ["A", "B"]),
((), None),
((), ["A", "B"]),
],
)
def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df):
# GH 15079
msg = "No group keys passed"
with pytest.raises(ValueError, match=msg):
_check_plot_works(hist_df.plot.hist, column=column, by=by)
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, layout, axes_num",
[
(["C"], "A", (2, 2), 3),
("C", "A", (2, 2), 3),
(["C"], ["A"], (1, 3), 3),
("C", None, (3, 1), 3),
("C", ["A", "B"], (3, 1), 3),
(["C", "D"], "A", (9, 1), 9),
(["C", "D"], "A", (3, 3), 9),
(["C", "D"], ["A"], (5, 2), 9),
(["C", "D"], ["A", "B"], (9, 1), 9),
(["C", "D"], None, (9, 1), 9),
(["C", "D"], ["A", "B"], (5, 2), 9),
],
)
def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
# GH 15079
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
hist_df.plot.hist, column=column, by=by, layout=layout
)
self._check_axes_shape(axes, axes_num=axes_num, layout=layout)
@pytest.mark.parametrize(
"msg, by, layout",
[
("larger than required size", ["C", "D"], (1, 1)),
(re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)),
("At least one dimension of layout must be positive", "C", (-1, -1)),
],
)
def test_hist_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
# GH 15079, test if error is raised when invalid layout is given
with pytest.raises(ValueError, match=msg):
hist_df.plot.hist(column=["A", "B"], by=by, layout=layout)
@pytest.mark.slow
def test_axis_share_x_with_by(self, hist_df):
# GH 15079
ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True)
# share x
assert self.get_x_axis(ax1).joined(ax1, ax2)
assert self.get_x_axis(ax2).joined(ax1, ax2)
assert self.get_x_axis(ax3).joined(ax1, ax3)
assert self.get_x_axis(ax3).joined(ax2, ax3)
# don't share y
assert not self.get_y_axis(ax1).joined(ax1, ax2)
assert not self.get_y_axis(ax2).joined(ax1, ax2)
assert not self.get_y_axis(ax3).joined(ax1, ax3)
assert not self.get_y_axis(ax3).joined(ax2, ax3)
@pytest.mark.slow
def test_axis_share_y_with_by(self, hist_df):
# GH 15079
ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True)
# share y
assert self.get_y_axis(ax1).joined(ax1, ax2)
assert self.get_y_axis(ax2).joined(ax1, ax2)
assert self.get_y_axis(ax3).joined(ax1, ax3)
assert self.get_y_axis(ax3).joined(ax2, ax3)
# don't share x
assert not self.get_x_axis(ax1).joined(ax1, ax2)
assert not self.get_x_axis(ax2).joined(ax1, ax2)
assert not self.get_x_axis(ax3).joined(ax1, ax3)
assert not self.get_x_axis(ax3).joined(ax2, ax3)
@pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
def test_figure_shape_hist_with_by(self, figsize, hist_df):
# GH 15079
axes = hist_df.plot.hist(column="A", by="C", figsize=figsize)
self._check_axes_shape(axes, axes_num=3, figsize=figsize)
@td.skip_if_no_mpl
class TestBoxWithBy(TestPlotBase):
@pytest.mark.parametrize(
"by, column, titles, xticklabels",
[
("C", "A", ["A"], [["a", "b", "c"]]),
(
["C", "D"],
"A",
["A"],
[
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
]
],
),
("C", ["A", "B"], ["A", "B"], [["a", "b", "c"]] * 2),
(
["C", "D"],
["A", "B"],
["A", "B"],
[
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
]
]
* 2,
),
(["C"], None, ["A", "B"], [["a", "b", "c"]] * 2),
],
)
def test_box_plot_by_argument(self, by, column, titles, xticklabels, hist_df):
# GH 15079
axes = _check_plot_works(hist_df.plot.box, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_xticklabels = [
[label.get_text() for label in ax.get_xticklabels()] for ax in axes
]
assert result_xticklabels == xticklabels
assert result_titles == titles
@pytest.mark.parametrize(
"by, column, titles, xticklabels",
[
(0, "A", ["A"], [["a", "b", "c"]]),
(
[0, "D"],
"A",
["A"],
[
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
]
],
),
(0, None, ["A", "B"], [["a", "b", "c"]] * 2),
],
)
def test_box_plot_by_0(self, by, column, titles, xticklabels, hist_df):
# GH 15079
df = hist_df.copy()
df = df.rename(columns={"C": 0})
axes = _check_plot_works(df.plot.box, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_xticklabels = [
[label.get_text() for label in ax.get_xticklabels()] for ax in axes
]
assert result_xticklabels == xticklabels
assert result_titles == titles
@pytest.mark.parametrize(
"by, column",
[
([], ["A"]),
((), "A"),
([], None),
((), ["A", "B"]),
],
)
def test_box_plot_with_none_empty_list_by(self, by, column, hist_df):
# GH 15079
msg = "No group keys passed"
with pytest.raises(ValueError, match=msg):
_check_plot_works(hist_df.plot.box, column=column, by=by)
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, layout, axes_num",
[
(["C"], "A", (1, 1), 1),
("C", "A", (1, 1), 1),
("C", None, (2, 1), 2),
("C", ["A", "B"], (1, 2), 2),
(["C", "D"], "A", (1, 1), 1),
(["C", "D"], None, (1, 2), 2),
],
)
def test_box_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
# GH 15079
axes = _check_plot_works(hist_df.plot.box, column=column, by=by, layout=layout)
self._check_axes_shape(axes, axes_num=axes_num, layout=layout)
@pytest.mark.parametrize(
"msg, by, layout",
[
("larger than required size", ["C", "D"], (1, 1)),
(re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)),
("At least one dimension of layout must be positive", "C", (-1, -1)),
],
)
def test_box_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
# GH 15079, test if error is raised when invalid layout is given
with pytest.raises(ValueError, match=msg):
hist_df.plot.box(column=["A", "B"], by=by, layout=layout)
@pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
def test_figure_shape_hist_with_by(self, figsize, hist_df):
# GH 15079
axes = hist_df.plot.box(column="A", by="C", figsize=figsize)
self._check_axes_shape(axes, axes_num=1, figsize=figsize)
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Callable, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
import inspect
import os
from .item import Item, ItemCallbackType
from ..enums import ButtonStyle, ComponentType
from ..partial_emoji import PartialEmoji, _EmojiTag
from ..components import Button as ButtonComponent
__all__ = (
'Button',
'button',
)
if TYPE_CHECKING:
from .view import View
from ..emoji import Emoji
B = TypeVar('B', bound='Button')
V = TypeVar('V', bound='View', covariant=True)
class Button(Item[V]):
"""Represents a UI button.
.. versionadded:: 2.0
Parameters
------------
style: :class:`discord.ButtonStyle`
The style of the button.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
url: Optional[:class:`str`]
The URL this button sends you to.
disabled: :class:`bool`
Whether the button is disabled or not.
label: Optional[:class:`str`]
The label of the button, if any.
emoji: Optional[Union[:class:`.PartialEmoji`, :class:`.Emoji`, :class:`str`]]
The emoji of the button, if available.
row: Optional[:class:`int`]
The relative row this button belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__item_repr_attributes__: Tuple[str, ...] = (
'style',
'url',
'disabled',
'label',
'emoji',
'row',
)
def __init__(
self,
*,
style: ButtonStyle = ButtonStyle.secondary,
label: Optional[str] = None,
disabled: bool = False,
custom_id: Optional[str] = None,
url: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
row: Optional[int] = None,
):
super().__init__()
if custom_id is not None and url is not None:
raise TypeError('cannot mix both url and custom_id with Button')
self._provided_custom_id = custom_id is not None
if url is None and custom_id is None:
custom_id = os.urandom(16).hex()
if url is not None:
style = ButtonStyle.link
if emoji is not None:
if isinstance(emoji, str):
emoji = PartialEmoji.from_str(emoji)
elif isinstance(emoji, _EmojiTag):
emoji = emoji._to_partial()
else:
raise TypeError(f'expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}')
self._underlying = ButtonComponent._raw_construct(
type=ComponentType.button,
custom_id=custom_id,
url=url,
disabled=disabled,
label=label,
style=style,
emoji=emoji,
)
self.row = row
@property
def style(self) -> ButtonStyle:
""":class:`discord.ButtonStyle`: The style of the button."""
return self._underlying.style
@style.setter
def style(self, value: ButtonStyle):
self._underlying.style = value
@property
def custom_id(self) -> Optional[str]:
"""Optional[:class:`str`]: The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
"""
return self._underlying.custom_id
@custom_id.setter
def custom_id(self, value: Optional[str]):
if value is not None and not isinstance(value, str):
raise TypeError('custom_id must be None or str')
self._underlying.custom_id = value
@property
def url(self) -> Optional[str]:
"""Optional[:class:`str`]: The URL this button sends you to."""
return self._underlying.url
@url.setter
def url(self, value: Optional[str]):
if value is not None and not isinstance(value, str):
raise TypeError('url must be None or str')
self._underlying.url = value
@property
def disabled(self) -> bool:
""":class:`bool`: Whether the button is disabled or not."""
return self._underlying.disabled
@disabled.setter
def disabled(self, value: bool):
self._underlying.disabled = bool(value)
@property
def label(self) -> Optional[str]:
"""Optional[:class:`str`]: The label of the button, if available."""
return self._underlying.label
@label.setter
def label(self, value: Optional[str]):
self._underlying.label = str(value) if value is not None else value
@property
def emoji(self) -> Optional[PartialEmoji]:
"""Optional[:class:`.PartialEmoji`]: The emoji of the button, if available."""
return self._underlying.emoji
@emoji.setter
def emoji(self, value: Optional[Union[str, Emoji, PartialEmoji]]): # type: ignore
if value is not None:
if isinstance(value, str):
self._underlying.emoji = PartialEmoji.from_str(value)
elif isinstance(value, _EmojiTag):
self._underlying.emoji = value._to_partial()
else:
raise TypeError(f'expected str, Emoji, or PartialEmoji, received {value.__class__} instead')
else:
self._underlying.emoji = None
@classmethod
def from_component(cls: Type[B], button: ButtonComponent) -> B:
return cls(
style=button.style,
label=button.label,
disabled=button.disabled,
custom_id=button.custom_id,
url=button.url,
emoji=button.emoji,
row=None,
)
@property
def type(self) -> ComponentType:
return self._underlying.type
def to_component_dict(self):
return self._underlying.to_dict()
def is_dispatchable(self) -> bool:
return self.custom_id is not None
def is_persistent(self) -> bool:
if self.style is ButtonStyle.link:
return self.url is not None
return super().is_persistent()
def refresh_component(self, button: ButtonComponent) -> None:
self._underlying = button
def button(
*,
label: Optional[str] = None,
custom_id: Optional[str] = None,
disabled: bool = False,
style: ButtonStyle = ButtonStyle.secondary,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
row: Optional[int] = None,
) -> Callable[[ItemCallbackType], ItemCallbackType]:
"""A decorator that attaches a button to a component.
The function being decorated should have three parameters, ``self`` representing
the :class:`discord.ui.View`, the :class:`discord.ui.Button` being pressed and
the :class:`discord.Interaction` you receive.
.. note::
Buttons with a URL cannot be created with this function.
Consider creating a :class:`Button` manually instead.
This is because buttons with a URL do not have a callback
associated with them since Discord does not do any processing
with it.
Parameters
------------
label: Optional[:class:`str`]
The label of the button, if any.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
It is recommended not to set this parameter to prevent conflicts.
style: :class:`.ButtonStyle`
The style of the button. Defaults to :attr:`.ButtonStyle.grey`.
disabled: :class:`bool`
Whether the button is disabled or not. Defaults to ``False``.
emoji: Optional[Union[:class:`str`, :class:`.Emoji`, :class:`.PartialEmoji`]]
The emoji of the button. This can be in string form or a :class:`.PartialEmoji`
or a full :class:`.Emoji`.
row: Optional[:class:`int`]
The relative row this button belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
def decorator(func: ItemCallbackType) -> ItemCallbackType:
if not inspect.iscoroutinefunction(func):
raise TypeError('button function must be a coroutine function')
func.__discord_ui_model_type__ = Button
func.__discord_ui_model_kwargs__ = {
'style': style,
'custom_id': custom_id,
'url': None,
'disabled': disabled,
'label': label,
'emoji': emoji,
'row': row,
}
return func
return decorator
|
|
"""
Component to make instant statistics about your history.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.history_stats/
"""
import datetime
import logging
import math
import voluptuous as vol
import homeassistant.components.history as history
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_ENTITY_ID, CONF_STATE, CONF_TYPE,
EVENT_HOMEASSISTANT_START)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'history_stats'
DEPENDENCIES = ['history']
CONF_START = 'start'
CONF_END = 'end'
CONF_DURATION = 'duration'
CONF_PERIOD_KEYS = [CONF_START, CONF_END, CONF_DURATION]
CONF_TYPE_TIME = 'time'
CONF_TYPE_RATIO = 'ratio'
CONF_TYPE_COUNT = 'count'
CONF_TYPE_KEYS = [CONF_TYPE_TIME, CONF_TYPE_RATIO, CONF_TYPE_COUNT]
DEFAULT_NAME = 'unnamed statistics'
UNITS = {
CONF_TYPE_TIME: 'h',
CONF_TYPE_RATIO: '%',
CONF_TYPE_COUNT: ''
}
ICON = 'mdi:chart-line'
ATTR_VALUE = 'value'
def exactly_two_period_keys(conf):
"""Ensure exactly 2 of CONF_PERIOD_KEYS are provided."""
provided = 0
for param in CONF_PERIOD_KEYS:
if param in conf and conf[param] is not None:
provided += 1
if provided != 2:
raise vol.Invalid('You must provide exactly 2 of the following:'
' start, end, duration')
return conf
PLATFORM_SCHEMA = vol.All(PLATFORM_SCHEMA.extend({
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_STATE): cv.string,
vol.Optional(CONF_START, default=None): cv.template,
vol.Optional(CONF_END, default=None): cv.template,
vol.Optional(CONF_DURATION, default=None): cv.time_period,
vol.Optional(CONF_TYPE, default=CONF_TYPE_TIME): vol.In(CONF_TYPE_KEYS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}), exactly_two_period_keys)
# noinspection PyUnusedLocal
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the History Stats sensor."""
entity_id = config.get(CONF_ENTITY_ID)
entity_state = config.get(CONF_STATE)
start = config.get(CONF_START)
end = config.get(CONF_END)
duration = config.get(CONF_DURATION)
sensor_type = config.get(CONF_TYPE)
name = config.get(CONF_NAME)
for template in [start, end]:
if template is not None:
template.hass = hass
add_devices([HistoryStatsSensor(hass, entity_id, entity_state, start, end,
duration, sensor_type, name)])
return True
class HistoryStatsSensor(Entity):
"""Representation of a HistoryStats sensor."""
def __init__(
self, hass, entity_id, entity_state, start, end, duration,
sensor_type, name):
"""Initialize the HistoryStats sensor."""
self._hass = hass
self._entity_id = entity_id
self._entity_state = entity_state
self._duration = duration
self._start = start
self._end = end
self._type = sensor_type
self._name = name
self._unit_of_measurement = UNITS[sensor_type]
self._period = (datetime.datetime.now(), datetime.datetime.now())
self.value = 0
self.count = 0
def force_refresh(*args):
"""Force the component to refresh."""
self.schedule_update_ha_state(True)
# Update value when home assistant starts
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, force_refresh)
# Update value when tracked entity changes its state
track_state_change(hass, entity_id, force_refresh)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._type == CONF_TYPE_TIME:
return round(self.value, 2)
if self._type == CONF_TYPE_RATIO:
return HistoryStatsHelper.pretty_ratio(self.value, self._period)
if self._type == CONF_TYPE_COUNT:
return self.count
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
hsh = HistoryStatsHelper
return {
ATTR_VALUE: hsh.pretty_duration(self.value),
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the states."""
# Get previous values of start and end
p_start, p_end = self._period
# Parse templates
self.update_period()
start, end = self._period
# Convert times to UTC
start = dt_util.as_utc(start)
end = dt_util.as_utc(end)
p_start = dt_util.as_utc(p_start)
p_end = dt_util.as_utc(p_end)
now = datetime.datetime.now()
# Compute integer timestamps
start_timestamp = math.floor(dt_util.as_timestamp(start))
end_timestamp = math.floor(dt_util.as_timestamp(end))
p_start_timestamp = math.floor(dt_util.as_timestamp(p_start))
p_end_timestamp = math.floor(dt_util.as_timestamp(p_end))
now_timestamp = math.floor(dt_util.as_timestamp(now))
# If period has not changed and current time after the period end...
if start_timestamp == p_start_timestamp and \
end_timestamp == p_end_timestamp and \
end_timestamp <= now_timestamp:
# Don't compute anything as the value cannot have changed
return
# Get history between start and end
history_list = history.state_changes_during_period(
self.hass, start, end, str(self._entity_id))
if self._entity_id not in history_list.keys():
return
# Get the first state
last_state = history.get_state(self.hass, start, self._entity_id)
last_state = (last_state is not None and
last_state == self._entity_state)
last_time = start_timestamp
elapsed = 0
count = 0
# Make calculations
for item in history_list.get(self._entity_id):
current_state = item.state == self._entity_state
current_time = item.last_changed.timestamp()
if last_state:
elapsed += current_time - last_time
if current_state and not last_state:
count += 1
last_state = current_state
last_time = current_time
# Count time elapsed between last history state and end of measure
if last_state:
measure_end = min(end_timestamp, now_timestamp)
elapsed += measure_end - last_time
# Save value in hours
self.value = elapsed / 3600
# Save counter
self.count = count
def update_period(self):
"""Parse the templates and store a datetime tuple in _period."""
start = None
end = None
# Parse start
if self._start is not None:
try:
start_rendered = self._start.render()
except (TemplateError, TypeError) as ex:
HistoryStatsHelper.handle_template_exception(ex, 'start')
return
start = dt_util.parse_datetime(start_rendered)
if start is None:
try:
start = dt_util.as_local(dt_util.utc_from_timestamp(
math.floor(float(start_rendered))))
except ValueError:
_LOGGER.error("Parsing error: start must be a datetime"
"or a timestamp")
return
# Parse end
if self._end is not None:
try:
end_rendered = self._end.render()
except (TemplateError, TypeError) as ex:
HistoryStatsHelper.handle_template_exception(ex, 'end')
return
end = dt_util.parse_datetime(end_rendered)
if end is None:
try:
end = dt_util.as_local(dt_util.utc_from_timestamp(
math.floor(float(end_rendered))))
except ValueError:
_LOGGER.error("Parsing error: end must be a datetime "
"or a timestamp")
return
# Calculate start or end using the duration
if start is None:
start = end - self._duration
if end is None:
end = start + self._duration
self._period = start, end
class HistoryStatsHelper:
"""Static methods to make the HistoryStatsSensor code lighter."""
@staticmethod
def pretty_duration(hours):
"""Format a duration in days, hours, minutes, seconds."""
seconds = int(3600 * hours)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '%dd %dh %dm' % (days, hours, minutes)
elif hours > 0:
return '%dh %dm' % (hours, minutes)
return '%dm' % minutes
@staticmethod
def pretty_ratio(value, period):
"""Format the ratio of value / period duration."""
if len(period) != 2 or period[0] == period[1]:
return 0.0
ratio = 100 * 3600 * value / (period[1] - period[0]).total_seconds()
return round(ratio, 1)
@staticmethod
def handle_template_exception(ex, field):
"""Log an error nicely if the template cannot be interpreted."""
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning(ex)
return
_LOGGER.error("Error parsing template for field %s", field)
_LOGGER.error(ex)
|
|
#!/usr/bin/env python
"""
RS 2013/07/09: Light Curve Models from Arnett 1980, 1982
"""
# ----------------------------------------------------------------------------
# Dependencies
# ----------------------------------------------------------------------------
import sys
import numpy as np
import cPickle as pickle
from os.path import dirname
from scipy.integrate import quad
from sklearn import gaussian_process
# ----------------------------------------------------------------------------
# Package globals
# ----------------------------------------------------------------------------
# e-folding decay times of 56Ni and 56Co, in days
tNi, tCo = 8.8, 111.3
# decay constants of 56Ni and 56Co in days^-1
lNi, lCo = 1.0/tNi, 1.0/tCo
# decay energies in MeV
QNiG, QCoG, QCoE = 1.75, 3.61, 0.12
# Working out the normalization constant for epsilon:
# We have energy released in MeV day^-1 (atom of 56Ni)^-1, where we really
# want it expressed in erg s^-1 (solar masses of 56Ni)^-1. So:
# 2e+33 kg/(56 AMU * 1.67e-27 kg) = 2.14e+55 Ni atoms / Msol
# 1 MeV/day = 1.85e-11 erg/s
# thus: 1.85e-11 (erg/s)/(MeV/day) * 2.14e+55 atoms/Msol
epsilon_norm = 3.96e+44
# Pickle file for standard package Gaussian Process light curve interpolator
_datadir = dirname(__file__)
# ----------------------------------------------------------------------------
# Function definitions
# ----------------------------------------------------------------------------
def epsilon(t, tg):
"""Energy *deposited* via radioactive decay from Nadyozhin 1994
Calculates luminosity of radioactive decay in erg s^-1 (MNi/Msol)^-1,
as a function of time t in days since explosion. In other words,
multiply this by the nickel mass in solar masses to get the luminosity.
t: time since explosion in days
tg: fiducial time till optical depth = 1 to 56Co gamma rays,
in days after explosion (Jeffery 1999 calls this t_0)
"""
# cast time as numpy array
t = np.atleast_1d(t)
# Calculate optical depth to 56Co gamma rays. We assume that positrons
# remain fully trapped during the phases of interest, but to be rigorous
# we should probably include a positron optical depth as well.
tau = (tg/t)**2
# then input to the parametric form from Stritzinger & Leibundgut 2006
return (lNi*np.exp(-lNi*t)*QNiG
+ (lCo*(lNi/(lNi-lCo))*(np.exp(-lCo*t) - np.exp(-lNi*t))
* (QCoE + QCoG*(1-np.exp(-tau))))) * epsilon_norm
def Lambda(t, y):
"""Original Arnett 1982 dimensionless bolometric light curve expression
Calculates the bolometric light curve due to radioactive decay of 56Ni,
assuming no other energy input.
t: time since explosion in days
y: Arnett 1982 light curve width parameter (typical 0.7 < y < 1.4)
Returns the dimensionless light curve shape function.
"""
tm = 2*tNi*y
a, x = [ ], np.atleast_1d(t/tm)
ig = lambda z: 2*z * np.exp(-2*z*y + z**2)
for xi in x.ravel(): a.append(np.exp(-xi**2) * quad(ig, 0, xi)[0])
return np.array(a)
def A82LC_Co(t, y, tg):
"""Modified Arnett law adding Co decay
This version is generalized to include a source term with 56Co.
Done with reference to Dado & Dar's shameless rip-off, wherein
t_r (D&D's LC width) = 0.707 * tau_m (A82's LC width). We work with a
time axis with units of days rather than dimensionless time, to keep from
confusing the Gaussian processes (and the user!).
t: time since explosion in days
y: Arnett 1982 light curve width parameter (typical 0.7 < y < 1.4)
tg: fiducial time till optical depth = 1 to 56Co gamma rays,
in days after explosion (Jeffery 1999 calls this t_0)
Returns light curve normalized to 1.0 Msol of 56Ni.
"""
tm = 2*tNi*y # Arnett 1982 calls this tau_m
a, x = [ ], np.atleast_1d(t/tm)
ig = lambda xp: np.exp(xp**2) * 2*xp * epsilon(tm*xp, tg)
for xi in x.ravel(): a.append(np.exp(-xi**2) * quad(ig, 0, xi)[0])
return np.array(a)
def A82LC_CoR0(t, y, w, tg):
"""Modified Arnett law adding Co decay and finite-size effects
This version is generalized to include a source term with 56Co as well
as effects of a non-zero initial size. It therefore includes P*dV effects
from energy advected as the supernova shock breaks out through the outer
layers of the star. Applicable for SNe Ib/c, or for double-degenerate
"cold merger" SNe Ia with a substantial C+O envelope.
t: time since explosion in days
y: Arnett 1982 light curve width parameter (typical 0.7 < y < 1.4)
w: Arnett 1982 finite size effect parameter (expect w < 0.2)
tg: fiducial time till optical depth = 1 to 56Co gamma rays,
in days after explosion (Jeffery 1999 calls this t_0)
Includes *only* the trapped radioactive decay luminosity per unit 56Ni.
Initial thermal energy from the explosion is done below.
"""
tm = 2*tNi*y # Arnett 1982 calls this tau_m
a, x = [ ], np.atleast_1d(t/tm)
# Below: u = w*x + x**2, du = (w + 2*x)*dx
ig = lambda xp: np.exp((w+xp)*xp) * (w+2*xp) * epsilon(tm*xp, tg)
for xi in x.ravel():
a.append(np.exp(-(w+xi)*xi) * quad(ig, 0, xi)[0])
return np.array(a)
def A82LC_EthR0(t, y, w):
"""Diffusion of initial thermal shock energy through envelope
Since this piece can be calculuated completely analytically, it makes
little sense to bind it up with the light curve calculation which needs
at least some quadrature. Thus this can be done on the fly and doesn't
need to be represented by a Gaussian process.
t: time since explosion in days
y: Arnett 1982 light curve width parameter (typical 0.7 < y < 1.4)
w: Arnett 1982 finite size effect parameter (expect w < 0.2)
Returns luminosity per unit initial thermal energy, that is, multiply
the below by Eth0 to get the full light curve.
"""
tm = 2*tNi*y # Arnett 1982 calls this tau_m
td = tm/(w + 1e-10) # Arnett 1982 calls this tau_0
a, x = [ ], np.atleast_1d(t/tm)
# Below: u = w*x + x**2
return np.exp(-(w+x)*x) / (td * 86400.0)
def A82LC_full(t, y, w, tg, MNi, Eth0):
"""Full Arnett 1982 LC directly evaluated, including 56Co and finite R0"""
return MNi * A82LC_CoR0(t, y, w, tg) + Eth0 * A82LC_EthR0(t, y, w)
def tau_h(R0, vsc):
"""Arnett 1982 expansion timescale, in days
R0: initial radius in cm
vsc: scaling velocity in cm/s
"""
return (R0/vsc) / 86400.0
def tau_0(R0, kappa, M, beta=13.7):
"""Arnett 1982 diffusion timescale, in days
R0: initial radius in cm
kappa: approximate opacity in cm^2/g
M: ejected mass in g
beta: dimensionless form factor, roughly 13.7 (Arnett 1980, 1982)
"""
return (kappa*M/(3e+10*beta*R0)) / 86400.0
def tau_m(vsc, kappa, M, beta=13.7):
"""Arnett 1982 light curve width timescale, in days
vsc: scaling velocity in cm/s
kappa: approximate opacity in cm^2/g
M: ejected mass in g
beta: dimensionless form factor, roughly 13.7 (Arnett 1980, 1982)
"""
R0 = 1e+6 # cm; not really important
return np.sqrt(2 * tau_h(R0, vsc) * tau_0(R0, kappa, M, beta=beta))
# ----------------------------------------------------------------------------
# Class definitions
# ----------------------------------------------------------------------------
def A82LC_regr(x):
"""Mean function basis for Gaussian Process regression"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
# Extract the relevant degrees of freedom
AA = np.array
t, tg = AA([x[:,0]]).T, AA([x[:,3]]).T
fNi = t**2 * np.exp(-t/(2*tNi))
fCo = t**2 / (1 + (t/(tg + 1e-3))**4)
f = np.hstack([fNi, fCo])
return f
class A82LC_gp(object):
"""Class to encapsulate GP interpolation of light curves"""
def __init__(self, pklfname):
try:
self._load_gp(pklfname)
except:
self._setup_gp()
self._save_gp(pklfname)
def _setup_gp(self):
"""Set up a Gaussian process interpolator
This sets up a GP regression interpolator to evaluate the radioactive
part of the finite-size thing.
"""
# Set up the grid
t = np.array([0, 1, 2] + range(5, 120, 5) + [118, 119, 120],
dtype=np.float)
y = np.arange(0.5, 1.51, 0.25)
w = np.arange(0.0, 0.51, 0.25)
tg = np.arange(15.0, 75.1, 15.0)
# ok, let's go
X, L = [ ], [ ]
for yi in y:
for tgi in tg:
for wi in w:
print "setup: ", yi, wi, tgi
sys.stdout.flush()
Lc = A82LC_CoR0(t, yi, wi, tgi)
for ti, Lci in zip(t, Lc):
X.append([ti, yi, wi, tgi])
L.append(Lci)
# Okay, we've set up the inputs. Now make stuff happen.
print "initial thing set up with", len(L), "points"
print "fitting GP"
sys.stdout.flush()
ll0 = np.array([1.0, 1.0, 1.0, 5.0])
llL, llU = 0.01*ll0, 100.0*ll0
thetaL, theta0, thetaU = 0.5/llU**2, 0.5/ll0**2, 0.5/llL**2
self.gp = gaussian_process.GaussianProcess(
theta0=theta0, thetaL=thetaL, thetaU=thetaU,
verbose=True, nugget=1e-10, storage_mode='light',
corr='squared_exponential', regr=A82LC_regr)
self.gp.fit(X, L)
# print "GP fit done, theta =", self.gp.theta_
sys.stdout.flush()
def _save_gp(self, pklfname):
"""Saves GP to a pickle file"""
with open(pklfname, 'w') as pklfile:
pickle.dump(self.gp, pklfile)
def _load_gp(self, pklfname):
"""Loads GP from a pickle file"""
with open(pklfname) as pklfile:
self.gp = pickle.load(pklfile)
def __call__(self, t, pars):
"""Evaluates the light curve, given the parameters"""
# Unpack parameters
t = np.atleast_1d(t).ravel()
y, w, tg, MNi, Eth0 = pars
# Evaluate the radiaoctive part via Gaussian process
X = np.atleast_2d([(ti, y, w, tg) for ti in t])
lc_Co = self.gp.predict(X)
# Evaluate the trapped thermal energy
lc_Eth = A82LC_EthR0(t, y, w)
# Return the full light curve
return MNi * lc_Co + Eth0 * lc_Eth
# Package global standard Gaussian Process light curve interpolator
stdLCgp = A82LC_gp(_datadir + "/a82lc_gp_4d.pkl")
|
|
__all__ = ['TestPost', 'TestDraft', 'TestTag', 'TestPostOperator']
import time
import unittest
from datetime import datetime
from ..helpers import TaoblogTestCase
from taoblog.models.post import Post, PostText, PostOperator, Tag, Draft
from taoblog.models import ModelError
class TestTag(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_name(self):
self.assertRaises(ModelError, Tag, 'hello world')
self.assertRaises(ModelError, Tag, 'hello\tworld')
self.assertRaises(ModelError, Tag, 'hello\nworld')
self.assertRaises(ModelError, Tag, 'hello\rworld')
self.assertRaises(ModelError, Tag, 'hello+world')
class TestDraft(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_autoupdate(self):
draft = Draft(title='title', text='text')
self.session.add(draft)
self.session.commit()
self.assertIsNotNone(draft.saved_at)
old_date = draft.saved_at
draft.title = 'new title'
time.sleep(1)
self.session.commit()
self.assertTrue(draft.saved_at>old_date)
class TestPost(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_slug(self):
post = Post(title='hello world', text='world',
slug='hello-world', author_id=1)
self.assertEqual(post.slug, 'hello-world')
# invalid slug
def _set_slug(slug):
post.slug = slug
self.assertRaises(ModelError, _set_slug, 'this contains spaces')
self.assertRaises(ModelError, _set_slug, 'this-contains-newline\n')
self.assertRaises(ModelError, _set_slug, 'this-contains-newline\r')
self.assertRaises(ModelError, _set_slug, 'this-contains/slash')
self.assertRaises(ModelError, _set_slug, 'this-contains-?')
self.assertRaises(ModelError, _set_slug, '')
self.assertRaises(ModelError, _set_slug, ' ')
self.assertRaises(ModelError, _set_slug, '\t')
self.assertEqual(post.permalink, '%d/%d/%s' % (datetime.utcnow().year,
datetime.utcnow().month,
post.slug))
def test_date(self):
post = Post(title='hello world', text='world',
slug='hello-world', author_id=1)
self.assertIsNone(post.created_at)
self.assertIsNone(post.updated_at)
self.session.add(post)
self.session.commit()
self.assertIsNotNone(post.created_at)
self.assertIsNone(post.updated_at)
post.text = 'new world'
self.session.commit()
self.assertIsNotNone(post.updated_at)
def test_tag(self):
clojure = Post(title='clojure lisp', text='',
slug='clojure-lisp', author_id=1)
scheme = Post(title='scheme lisp', text='',
slug='scheme-lisp', author_id=1)
# post not added to session, raise error
self.assertRaises(RuntimeError, clojure.add_tags, ['clojure'])
self.assertRaises(RuntimeError, clojure.remove_tags, ['clojure'])
self.assertRaises(RuntimeError, clojure.set_tags, ['clojure'])
self.assertRaises(RuntimeError, clojure.clear_tags)
self.session.add(clojure)
self.session.add(scheme)
# add tags
# post tags
# clojure: Clojure, LISP
# scheme: Scheme, LISP
self.assertEqual(clojure.add_tags(['Clojure'])[0].name, 'Clojure')
self.assertEqual(clojure.add_tags(['LISP'])[0].name, 'LISP')
self.assertEqual(set(clojure.tags), {'Clojure', 'LISP'})
self.assertEqual(scheme.add_tags(['Scheme'])[0].name, 'Scheme')
self.assertEqual(scheme.add_tags(['SCHEME']), []) # no new tag added
self.assertEqual(scheme.add_tags(['scheme']), []) # no new tag added
self.assertEqual(scheme.add_tags(['lisp'])[0].name, 'LISP')
self.assertEqual(set(scheme.tags), {'Scheme', 'LISP'})
self.assertEqual(set(clojure.tags), {'Clojure', 'LISP'})
# remove tags
scheme.remove_tags(['SCHEME'])
self.assertIsNone(self.session.query(Tag).filter_by(name='Scheme').first())
scheme.remove_tags(['lisp'])
self.assertEqual(self.session.query(Tag).filter_by(name='LISP').first().name, 'LISP')
self.assertEqual(scheme.tags, [])
# clear tags
clojure.clear_tags()
self.assertEqual(clojure.tags, [])
self.assertIsNone(self.session.query(Tag).filter_by(name='Clojure').first())
self.assertIsNone(self.session.query(Tag).first())
scheme.set_tags(['SCHEME', 'LISP', 'Scheme', 'Lisp'])
self.assertEqual(set(tag.name for tag in self.session.query(Tag).all()), {'SCHEME', 'LISP'})
self.assertEqual(scheme.set_tags(['scheme', 'lisp', 'scheme', 'lisp']), ([], [])) # add none, remove none
def test_content(self):
post = Post(title='hello world', text='world',
slug='hello-world', author_id=1)
self.assertEqual(post.content, '<p>%s</p>\n' % post.text)
post.text = 'new world'
self.assertEqual(post.content, '<p>%s</p>\n' % post.text)
def test_query(self):
post = Post(title='a title', text='the first post',
slug='a-title', author_id=1)
self.session.add(post)
self.session.commit()
result = self.session.query(Post).filter_by(title='a title').one()
self.assertEqual(result.title, post.title)
post = Post(title='a title', text='the second post',
slug='a-title', author_id=1)
self.session.add(post)
self.session.commit()
result = self.session.query(Post).join(PostText)\
.filter(PostText.text=='the second post').one()
self.assertEqual(result.text, post.text)
class TestPostOperator(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_create_post(self):
post = Post(title='hello', text='world',
slug='hello', author_id=1)
op = PostOperator(self.session)
op.create_post(post)
self.assertEqual(op.get_post(post.id), post)
# same slug is not allowed
another_post = Post(title='hello', text='world',
slug='hello', author_id=1)
self.assertRaises(ModelError, op.create_post, another_post)
def test_get_posts(self):
op = PostOperator(self.session)
# create post
post = Post(title='hello', text='world',
slug='hello-world', author_id=1)
op.create_post(post)
self.assertEqual(op.get_post(post.id), post)
# get public posts
haskell = Post(title='haskell-2012', text='world3',
slug='hehe', author_id=1)
haskell.created_at = datetime(year=2012, month=4, day=29)
op.create_post(haskell)
haskell.add_tags(['haskell', 'fp'])
scheme = Post(title='scheme-2010', text='world2',
slug='haha', author_id=1)
scheme.created_at = datetime(year=2010, month=1, day=16)
op.create_post(scheme)
scheme.add_tags(['scheme', 'fp'])
clojure = Post(title='clojure-2009', text='world1',
slug='haha', author_id=1)
clojure.created_at = datetime(year=2009, month=12, day=13)
op.create_post(clojure)
clojure.add_tags(['clojure', 'fp'])
posts, more = op.get_public_posts()
self.assertEqual(4, len(posts))
self.assertEqual(posts, [post, haskell, scheme, clojure])
self.assertFalse(more) # no more
self.assertEqual(set([str(tag) for tag in op.get_public_tags()]),
{'clojure', 'fp', 'scheme', 'haskell'})
op.trash_post(post)
posts, more = op.get_public_posts()
self.assertEqual(posts, [haskell, scheme, clojure])
self.assertFalse(more)
# scheme will be removed from public tags
op.trash_post(scheme)
self.assertEqual(set([tag.name for tag in op.get_public_tags()]),
{'clojure', 'fp', 'haskell'})
self.assertEqual(set([str(tag) for tag in op.get_trash_tags()]),
{'scheme', 'fp'})
if __name__ == '__main__':
unittest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
import tvm
from .. import defop, AllTypes, RealTypes
from .. import assign_by_req, reduce_axes
def compute_add(dtype, ndim):
A = tvm.te.placeholder([tvm.te.size_var() for _ in range(ndim)], name='A', dtype=dtype)
B = tvm.te.placeholder([tvm.te.size_var() for _ in range(ndim)], name='B', dtype=dtype)
C = tvm.te.compute([tvm.te.size_var() for _ in range(ndim)],
lambda *index: A[index] + B[index], name='C')
s = tvm.te.create_schedule(C.op)
return s, A, B, C
@defop(name="vadd", target="cpu", auto_broadcast=True,
dtype=AllTypes, ndim=[5])
def vadd(dtype, ndim):
s, A, B, C = compute_add(dtype, ndim)
axes = [axis for axis in C.op.axis]
fused = s[C].fuse(*axes)
s[C].parallel(fused)
return s, [A, B, C]
@defop(name="cuda_vadd", target="cuda", auto_broadcast=True,
dtype=["float32", "float64"], ndim=[5])
def vadd_gpu(dtype, ndim):
s, A, B, C = compute_add(dtype, ndim)
s = tvm.te.create_schedule(C.op)
axes = [axis for axis in C.op.axis]
fused = s[C].fuse(*axes)
bx, tx = s[C].split(fused, factor=64)
s[C].bind(bx, tvm.te.thread_axis("blockIdx.x"))
s[C].bind(tx, tvm.te.thread_axis("threadIdx.x"))
return s, [A, B, C]
def compute_backward_vadd(dtype, ndim, reduce1st, req):
# The backward of broadcast op is basically a reduction on broadcast axes.
# We label the reduce axes as 1 and other axes as 0, and they form a bit string.
# Each bit string correponds to a kernel, so the number of kernels is as many as `2^n`
# To reduce it, the bit string is compressed by combining consecutive 0s or 1s.
# In this way, the number of bit string (the number of kernels) is reduced to `2 * n`
# They compressed bit string is stored in `axes`. And `reduce1st` represents the first bit
# of the compressed bit string. Credit to @junrushao1994 and @yzhliu.
axes = ([reduce1st, 1 - reduce1st] * ndim)[:ndim]
X = tvm.te.placeholder([tvm.te.size_var() for _ in range(ndim)], name='X', dtype=dtype)
reducer = tvm.te.comm_reducer(lambda x, y: x + y,
lambda t: tvm.tir.const(0, dtype=t), name="sum")
ret = reduce_axes(X, axes, reducer)
in_grad_a, in_grad = assign_by_req(ret, req)
s = tvm.te.create_schedule(in_grad.op)
return s, X, in_grad_a, in_grad, [ret, in_grad]
@defop(name="backward_vadd", target="cpu", dtype=AllTypes,
ndim=[5], reduce1st=[0, 1],
req=["kWriteTo", "kAddTo"], attrs=["reduce1st", "req"])
def backward_vadd(dtype, ndim, reduce1st, req):
s, X, in_grad_a, in_grad, c_list = compute_backward_vadd(dtype, ndim, reduce1st, req)
for t in c_list:
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
s[t].parallel(fused)
return s, [X, in_grad_a, in_grad]
@defop(name="cuda_backward_vadd", target="gpu", dtype=["float32", "float64"],
ndim=[5], reduce1st=[0, 1],
req=["kWriteTo", "kAddTo"], attrs=["reduce1st", "req"])
def backward_vadd_gpu(dtype, ndim, reduce1st, req):
s, X, in_grad_a, in_grad, c_list = compute_backward_vadd(dtype, ndim, reduce1st, req)
num_thread = 64
for t in c_list:
block_x = tvm.te.thread_axis("blockIdx.x")
thread_x = tvm.te.thread_axis("threadIdx.x")
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
bx, tx = s[t].split(fused, factor=num_thread)
s[t].bind(bx, block_x)
s[t].bind(tx, thread_x)
return s, [X, in_grad_a, in_grad]
def compute_degandrad(dtype, ndim, n):
A = tvm.te.placeholder([tvm.te.size_var() for _ in range(ndim)], name='A', dtype=dtype)
import math
if n == 0:
B = tvm.te.compute([tvm.te.size_var() for _ in range(ndim)],
lambda *index: A[index] * tvm.tir.const(math.pi, dtype) / tvm.tir.const(180, dtype), name='B')
else:
B = tvm.te.compute([tvm.te.size_var() for _ in range(ndim)],
lambda *index: A[index] / tvm.tir.const(math.pi, dtype) * tvm.tir.const(180, dtype), name='B')
s = tvm.te.create_schedule(B.op)
return s, A, B
@defop(name="deg2rad", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def deg2rad(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 0)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
s[B].parallel(fused)
return s, [A, B]
@defop(name="rad2deg", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def rad2deg(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 1)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
s[B].parallel(fused)
return s, [A, B]
@defop(name="cuda_deg2rad", target="cuda", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def deg2rad_gpu(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 0)
s = tvm.te.create_schedule(B.op)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
bx, tx = s[B].split(fused, factor=64)
s[B].bind(bx, tvm.te.thread_axis("blockIdx.x"))
s[B].bind(tx, tvm.te.thread_axis("threadIdx.x"))
return s, [A, B]
@defop(name="cuda_rad2deg", target="cuda", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def rad2deg_gpu(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 1)
s = tvm.te.create_schedule(B.op)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
bx, tx = s[B].split(fused, factor=64)
s[B].bind(bx, tvm.te.thread_axis("blockIdx.x"))
s[B].bind(tx, tvm.te.thread_axis("threadIdx.x"))
return s, [A, B]
def compute_backward_degandrad(dtype, ndim, req, n):
ishape = [tvm.te.size_var() for _ in range(ndim)]
in_grad_tmp = tvm.te.placeholder(ishape, name='in_grad_tmp', dtype=dtype)
in_grad = tvm.te.placeholder(ishape, name='in_grad', dtype=dtype)
out_grad = tvm.te.placeholder(ishape, name='out_grad', dtype=dtype)
import math
if n == 0:
ret = tvm.te.compute(ishape, lambda *index: out_grad[index] * tvm.tir.const(math.pi, dtype) / tvm.tir.const(180, dtype))
else:
ret = tvm.te.compute(ishape, lambda *index: out_grad[index] / tvm.tir.const(math.pi, dtype) * tvm.tir.const(180, dtype))
if (req == "kAddTo"):
in_grad = tvm.te.compute(ishape, lambda *index: in_grad_tmp[index] + ret[index])
else:
in_grad = tvm.te.compute(ishape, lambda *index: ret[index])
s = tvm.te.create_schedule(in_grad.op)
return s, out_grad, in_grad_tmp, in_grad, [ret, in_grad]
@defop(name="backward_deg2rad", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def backward_deg2rad(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 0)
for t in c_list:
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
s[t].parallel(fused)
return s, [out_grad, in_grad, in_grad_tmp]
@defop(name="backward_rad2deg", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def backward_rad2deg(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 1)
for t in c_list:
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
s[t].parallel(fused)
return s, [out_grad, in_grad, in_grad_tmp]
@defop(name="cuda_backward_deg2rad", target="gpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def cuda_backward_deg2rad(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 0)
num_thread = 64
for t in c_list:
block_x = tvm.te.thread_axis("blockIdx.x")
thread_x = tvm.te.thread_axis("threadIdx.x")
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
bx, tx = s[t].split(fused, factor=num_thread)
s[t].bind(bx, block_x)
s[t].bind(tx, thread_x)
return s, [out_grad, in_grad, in_grad_tmp]
@defop(name="cuda_backward_rad2deg", target="gpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def cuda_backward_rad2deg(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 1)
num_thread = 64
for t in c_list:
block_x = tvm.te.thread_axis("blockIdx.x")
thread_x = tvm.te.thread_axis("threadIdx.x")
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
bx, tx = s[t].split(fused, factor=num_thread)
s[t].bind(bx, block_x)
s[t].bind(tx, thread_x)
return s, [out_grad, in_grad, in_grad_tmp]
|
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import mock
import six
import glance.common.exception as exc
from glance.common.glare import declarative
import glance.common.glare.definitions as defs
from glance.common.glare import serialization
import glance.tests.utils as test_utils
BASE = declarative.get_declarative_base()
class TestDeclarativeProperties(test_utils.BaseTestCase):
def test_artifact_type_properties(self):
class SomeTypeWithNoExplicitName(BASE):
some_attr = declarative.AttributeDefinition()
class InheritedType(SomeTypeWithNoExplicitName):
__type_version__ = '1.0'
__type_name__ = 'ExplicitName'
__type_description__ = 'Type description'
__type_display_name__ = 'EXPLICIT_NAME'
__endpoint__ = 'some_endpoint'
some_attr = declarative.AttributeDefinition(display_name='NAME')
base_type = SomeTypeWithNoExplicitName
base_instance = SomeTypeWithNoExplicitName()
self.assertIsNotNone(base_type.metadata)
self.assertIsNotNone(base_instance.metadata)
self.assertEqual(base_type.metadata, base_instance.metadata)
self.assertEqual("SomeTypeWithNoExplicitName",
base_type.metadata.type_name)
self.assertEqual("SomeTypeWithNoExplicitName",
base_type.metadata.type_display_name)
self.assertEqual("1.0", base_type.metadata.type_version)
self.assertIsNone(base_type.metadata.type_description)
self.assertEqual('sometypewithnoexplicitname',
base_type.metadata.endpoint)
self.assertIsNone(base_instance.some_attr)
self.assertIsNotNone(base_type.some_attr)
self.assertEqual(base_type.some_attr,
base_instance.metadata.attributes.all['some_attr'])
self.assertEqual('some_attr', base_type.some_attr.name)
self.assertEqual('some_attr', base_type.some_attr.display_name)
self.assertIsNone(base_type.some_attr.description)
derived_type = InheritedType
derived_instance = InheritedType()
self.assertIsNotNone(derived_type.metadata)
self.assertIsNotNone(derived_instance.metadata)
self.assertEqual(derived_type.metadata, derived_instance.metadata)
self.assertEqual('ExplicitName', derived_type.metadata.type_name)
self.assertEqual('EXPLICIT_NAME',
derived_type.metadata.type_display_name)
self.assertEqual('1.0', derived_type.metadata.type_version)
self.assertEqual('Type description',
derived_type.metadata.type_description)
self.assertEqual('some_endpoint', derived_type.metadata.endpoint)
self.assertIsNone(derived_instance.some_attr)
self.assertIsNotNone(derived_type.some_attr)
self.assertEqual(derived_type.some_attr,
derived_instance.metadata.attributes.all['some_attr'])
self.assertEqual('some_attr', derived_type.some_attr.name)
self.assertEqual('NAME', derived_type.some_attr.display_name)
def test_wrong_type_definition(self):
def declare_wrong_type_version():
class WrongType(BASE):
__type_version__ = 'abc' # not a semver
return WrongType
def declare_wrong_type_name():
class WrongType(BASE):
__type_name__ = 'a' * 256 # too long
return WrongType
self.assertRaises(exc.InvalidArtifactTypeDefinition,
declare_wrong_type_version)
self.assertRaises(exc.InvalidArtifactTypeDefinition,
declare_wrong_type_name)
def test_base_declarative_attributes(self):
class TestType(BASE):
defaulted = declarative.PropertyDefinition(default=42)
read_only = declarative.PropertyDefinition(readonly=True)
required_attr = declarative.PropertyDefinition(required=True)
e = self.assertRaises(exc.InvalidArtifactPropertyValue, TestType)
self.assertEqual('required_attr', e.name)
self.assertIsNone(e.value)
tt = TestType(required_attr="universe")
self.assertEqual('universe', tt.required_attr)
self.assertEqual(42, tt.defaulted)
self.assertIsNone(tt.read_only)
tt = TestType(required_attr="universe", defaulted=0, read_only="Hello")
self.assertEqual(0, tt.defaulted)
self.assertEqual("Hello", tt.read_only)
tt.defaulted = 5
self.assertEqual(5, tt.defaulted)
tt.required_attr = 'Foo'
self.assertEqual('Foo', tt.required_attr)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'read_only', 'some_val')
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'required_attr', None)
# no type checks in base AttributeDefinition
o = object()
tt.required_attr = o
self.assertEqual(o, tt.required_attr)
def test_generic_property(self):
class TestType(BASE):
simple_prop = declarative.PropertyDefinition()
immutable_internal = declarative.PropertyDefinition(mutable=False,
internal=True)
prop_with_allowed = declarative.PropertyDefinition(
allowed_values=["Foo", True, 42])
class DerivedType(TestType):
prop_with_allowed = declarative.PropertyDefinition(
allowed_values=["Foo", True, 42], required=True, default=42)
tt = TestType()
self.assertEqual(True,
tt.metadata.attributes.all['simple_prop'].mutable)
self.assertEqual(False,
tt.metadata.attributes.all['simple_prop'].internal)
self.assertEqual(False,
tt.metadata.attributes.all[
'immutable_internal'].mutable)
self.assertEqual(True,
tt.metadata.attributes.all[
'immutable_internal'].internal)
self.assertIsNone(tt.prop_with_allowed)
tt = TestType(prop_with_allowed=42)
self.assertEqual(42, tt.prop_with_allowed)
tt = TestType(prop_with_allowed=True)
self.assertEqual(True, tt.prop_with_allowed)
tt = TestType(prop_with_allowed='Foo')
self.assertEqual('Foo', tt.prop_with_allowed)
tt.prop_with_allowed = 42
self.assertEqual(42, tt.prop_with_allowed)
tt.prop_with_allowed = 'Foo'
self.assertEqual('Foo', tt.prop_with_allowed)
tt.prop_with_allowed = True
self.assertEqual(True, tt.prop_with_allowed)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr,
tt, 'prop_with_allowed', 'bar')
# ensure that wrong assignment didn't change the value
self.assertEqual(True, tt.prop_with_allowed)
self.assertRaises(exc.InvalidArtifactPropertyValue, TestType,
prop_with_allowed=False)
dt = DerivedType()
self.assertEqual(42, dt.prop_with_allowed)
def test_default_violates_allowed(self):
def declare_wrong_type():
class WrongType(BASE):
prop = declarative.PropertyDefinition(
allowed_values=['foo', 'bar'],
default='baz')
return WrongType
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
declare_wrong_type)
def test_string_property(self):
class TestType(BASE):
simple = defs.String()
with_length = defs.String(max_length=10, min_length=5)
with_pattern = defs.String(pattern='^\\d+$', default='42')
tt = TestType()
tt.simple = 'foo'
self.assertEqual('foo', tt.simple)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr,
tt, 'simple', 42)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr,
tt, 'simple', 'x' * 256)
self.assertRaises(exc.InvalidArtifactPropertyValue, TestType,
simple='x' * 256)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr,
tt, 'with_length', 'x' * 11)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr,
tt, 'with_length', 'x' * 4)
tt.simple = 'x' * 5
self.assertEqual('x' * 5, tt.simple)
tt.simple = 'x' * 10
self.assertEqual('x' * 10, tt.simple)
self.assertEqual("42", tt.with_pattern)
tt.with_pattern = '0'
self.assertEqual('0', tt.with_pattern)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'with_pattern', 'abc')
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'with_pattern', '.123.')
def test_binary_object_mutable(self):
def declare_blob(mutable):
class BLOB(BASE):
prop = defs.BinaryObject(mutable=mutable)
return BLOB
blob = declare_blob(False)()
self.assertFalse(blob.metadata.attributes.all['prop'].mutable)
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
functools.partial(declare_blob, True))
def test_default_and_allowed_violates_string_constrains(self):
def declare_wrong_default():
class WrongType(BASE):
prop = defs.String(min_length=4, default='foo')
return WrongType
def declare_wrong_allowed():
class WrongType(BASE):
prop = defs.String(min_length=4, allowed_values=['foo', 'bar'])
return WrongType
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
declare_wrong_default)
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
declare_wrong_allowed)
def test_integer_property(self):
class TestType(BASE):
simple = defs.Integer()
constrained = defs.Integer(min_value=10, max_value=50)
tt = TestType()
self.assertIsNone(tt.simple)
self.assertIsNone(tt.constrained)
tt.simple = 0
tt.constrained = 10
self.assertEqual(0, tt.simple)
self.assertEqual(10, tt.constrained)
tt.constrained = 50
self.assertEqual(50, tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'constrained', 1)
self.assertEqual(50, tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'constrained', 51)
self.assertEqual(50, tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'simple', '11')
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'simple', 10.5)
def test_default_and_allowed_violates_int_constrains(self):
def declare_wrong_default():
class WrongType(BASE):
prop = defs.Integer(min_value=4, default=1)
return WrongType
def declare_wrong_allowed():
class WrongType(BASE):
prop = defs.Integer(min_value=4, max_value=10,
allowed_values=[1, 15])
return WrongType
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
declare_wrong_default)
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
declare_wrong_allowed)
def test_numeric_values(self):
class TestType(BASE):
simple = defs.Numeric()
constrained = defs.Numeric(min_value=3.14, max_value=4.1)
tt = TestType(simple=0.1, constrained=4)
self.assertEqual(0.1, tt.simple)
self.assertEqual(4.0, tt.constrained)
tt.simple = 1
self.assertEqual(1, tt.simple)
tt.constrained = 3.14
self.assertEqual(3.14, tt.constrained)
tt.constrained = 4.1
self.assertEqual(4.1, tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'simple', 'qwerty')
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'constrained', 3)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
'constrained', 5)
def test_default_and_allowed_violates_numeric_constrains(self):
def declare_wrong_default():
class WrongType(BASE):
prop = defs.Numeric(min_value=4.0, default=1.1)
return WrongType
def declare_wrong_allowed():
class WrongType(BASE):
prop = defs.Numeric(min_value=4.0, max_value=10.0,
allowed_values=[1.0, 15.5])
return WrongType
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
declare_wrong_default)
self.assertRaises(exc.InvalidArtifactTypePropertyDefinition,
declare_wrong_allowed)
def test_same_item_type_array(self):
class TestType(BASE):
simple = defs.Array()
unique = defs.Array(unique=True)
simple_with_allowed_values = defs.Array(
defs.String(allowed_values=["Foo", "Bar"]))
defaulted = defs.Array(defs.Boolean(), default=[True, False])
constrained = defs.Array(item_type=defs.Numeric(min_value=0),
min_size=3, max_size=5, unique=True)
tt = TestType(simple=[])
self.assertEqual([], tt.simple)
tt.simple.append("Foo")
self.assertEqual(["Foo"], tt.simple)
tt.simple.append("Foo")
self.assertEqual(["Foo", "Foo"], tt.simple)
self.assertEqual(2, len(tt.simple))
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.simple.append,
42)
tt.simple.pop(1)
self.assertEqual(["Foo"], tt.simple)
del tt.simple[0]
self.assertEqual(0, len(tt.simple))
tt.simple_with_allowed_values = ["Foo"]
tt.simple_with_allowed_values.insert(0, "Bar")
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.simple_with_allowed_values.append, "Baz")
self.assertEqual([True, False], tt.defaulted)
tt.defaulted.pop()
self.assertEqual([True], tt.defaulted)
tt2 = TestType()
self.assertEqual([True, False], tt2.defaulted)
self.assertIsNone(tt.constrained)
tt.constrained = [10, 5, 4]
self.assertEqual([10, 5, 4], tt.constrained)
tt.constrained[1] = 15
self.assertEqual([10, 15, 4], tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.constrained.__setitem__, 1, -5)
self.assertEqual([10, 15, 4], tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.constrained.remove, 15)
self.assertEqual([10, 15, 4], tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.constrained.__delitem__, 1)
self.assertEqual([10, 15, 4], tt.constrained)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.constrained.append, 15)
self.assertEqual([10, 15, 4], tt.constrained)
tt.unique = []
tt.unique.append("foo")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.unique.append,
"foo")
def test_tuple_style_array(self):
class TestType(BASE):
address = defs.Array(
item_type=[defs.String(20), defs.Integer(min_value=1),
defs.Boolean()])
tt = TestType(address=["Hope Street", 1234, True])
self.assertEqual("Hope Street", tt.address[0])
self.assertEqual(1234, tt.address[1])
self.assertEqual(True, tt.address[2])
# On Python 3, sort() fails because int (1) and string ("20") are not
# comparable
if six.PY2:
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.address.sort)
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop, 0)
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop, 1)
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop)
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.append,
"Foo")
def test_same_item_type_dict(self):
class TestType(BASE):
simple_props = defs.Dict()
constrained_props = defs.Dict(
properties=defs.Integer(min_value=1, allowed_values=[1, 2]),
min_properties=2,
max_properties=3)
tt = TestType()
self.assertIsNone(tt.simple_props)
self.assertIsNone(tt.constrained_props)
tt.simple_props = {}
self.assertEqual({}, tt.simple_props)
tt.simple_props["foo"] = "bar"
self.assertEqual({"foo": "bar"}, tt.simple_props)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.simple_props.__setitem__, 42, "foo")
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.simple_props.setdefault, "bar", 42)
tt.constrained_props = {"foo": 1, "bar": 2}
self.assertEqual({"foo": 1, "bar": 2}, tt.constrained_props)
tt.constrained_props["baz"] = 1
self.assertEqual({"foo": 1, "bar": 2, "baz": 1}, tt.constrained_props)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.constrained_props.__setitem__, "foo", 3)
self.assertEqual(1, tt.constrained_props["foo"])
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.constrained_props.__setitem__, "qux", 2)
tt.constrained_props.pop("foo")
self.assertEqual({"bar": 2, "baz": 1}, tt.constrained_props)
tt.constrained_props['qux'] = 2
self.assertEqual({"qux": 2, "bar": 2, "baz": 1}, tt.constrained_props)
tt.constrained_props.popitem()
dict_copy = tt.constrained_props.copy()
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.constrained_props.popitem)
self.assertEqual(dict_copy, tt.constrained_props)
def test_composite_dict(self):
class TestType(BASE):
props = defs.Dict(properties={"foo": defs.String(),
"bar": defs.Boolean()})
fixed = defs.Dict(properties={"name": defs.String(min_length=2),
"age": defs.Integer(min_value=0,
max_value=99)})
tt = TestType()
tt.props = {"foo": "FOO", "bar": False}
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.props.__setitem__, "bar", 123)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.props.__setitem__, "extra", "value")
tt.fixed = {"name": "Alex", "age": 42}
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.fixed.__setitem__, "age", 120)
def test_immutables(self):
class TestType(BASE):
activated = defs.Boolean(required=True, default=False)
name = defs.String(mutable=False)
def __is_mutable__(self):
return not self.activated
tt = TestType()
self.assertEqual(False, tt.activated)
self.assertIsNone(tt.name)
tt.name = "Foo"
self.assertEqual("Foo", tt.name)
tt.activated = True
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr,
tt, "name", "Bar")
self.assertEqual("Foo", tt.name)
tt.activated = False
tt.name = "Bar"
self.assertEqual("Bar", tt.name)
def test_readonly_array_dict(self):
class TestType(BASE):
arr = defs.Array(readonly=True)
dict = defs.Dict(readonly=True)
tt = TestType(arr=["Foo", "Bar"], dict={"qux": "baz"})
self.assertEqual(["Foo", "Bar"], tt.arr)
self.assertEqual({"qux": "baz"}, tt.dict)
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append,
"Baz")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.insert,
0, "Baz")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.__setitem__,
0, "Baz")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.remove,
"Foo")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.pop)
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.pop,
"qux")
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.dict.__setitem__, "qux", "foo")
def test_mutable_array_dict(self):
class TestType(BASE):
arr = defs.Array(mutable=False)
dict = defs.Dict(mutable=False)
activated = defs.Boolean()
def __is_mutable__(self):
return not self.activated
tt = TestType()
tt.arr = []
tt.dict = {}
tt.arr.append("Foo")
tt.arr.insert(0, "Bar")
tt.dict["baz"] = "qux"
tt.activated = True
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append,
"Baz")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.insert,
0, "Baz")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.__setitem__,
0, "Baz")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.remove,
"Foo")
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.pop)
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.pop,
"qux")
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.dict.__setitem__, "qux", "foo")
def test_readonly_as_write_once(self):
class TestType(BASE):
prop = defs.String(readonly=True)
arr = defs.Array(readonly=True)
tt = TestType()
self.assertIsNone(tt.prop)
tt.prop = "Foo"
self.assertEqual("Foo", tt.prop)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt,
"prop", "bar")
tt2 = TestType()
self.assertIsNone(tt2.prop)
tt2.prop = None
self.assertIsNone(tt2.prop)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt2,
"prop", None)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt2,
"prop", "foo")
self.assertIsNone(tt.arr)
tt.arr = ["foo", "bar"]
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append,
'baz')
self.assertIsNone(tt2.arr)
tt2.arr = None
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append,
'baz')
class TestArtifactType(test_utils.BaseTestCase):
def test_create_artifact(self):
a = defs.ArtifactType(**get_artifact_fixture())
self.assertIsNotNone(a)
self.assertEqual("123", a.id)
self.assertEqual("ArtifactType", a.type_name)
self.assertEqual("1.0", a.type_version)
self.assertEqual("11.2", a.version)
self.assertEqual("Foo", a.name)
self.assertEqual("private", a.visibility)
self.assertEqual("creating", a.state)
self.assertEqual("my_tenant", a.owner)
self.assertEqual(a.created_at, a.updated_at)
self.assertIsNone(a.description)
self.assertIsNone(a.published_at)
self.assertIsNone(a.deleted_at)
self.assertIsNone(a.description)
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "id",
"foo")
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a,
"state", "active")
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a,
"owner", "some other")
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a,
"created_at", datetime.datetime.now())
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a,
"deleted_at", datetime.datetime.now())
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a,
"updated_at", datetime.datetime.now())
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a,
"published_at", datetime.datetime.now())
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a,
"visibility", "wrong")
def test_dependency_prop(self):
class DerivedType(defs.ArtifactType):
depends_on_any = defs.ArtifactReference()
depends_on_self = defs.ArtifactReference(type_name='DerivedType')
depends_on_self_version = defs.ArtifactReference(
type_name='DerivedType',
type_version='1.0')
class DerivedTypeV11(DerivedType):
__type_name__ = 'DerivedType'
__type_version__ = '1.1'
depends_on_self_version = defs.ArtifactReference(
type_name='DerivedType',
type_version='1.1')
d1 = DerivedType(**get_artifact_fixture())
d2 = DerivedTypeV11(**get_artifact_fixture())
a = defs.ArtifactType(**get_artifact_fixture())
d1.depends_on_any = a
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, d1,
'depends_on_self', a)
d1.depends_on_self = d2
d2.depends_on_self = d1
d1.depends_on_self_version = d1
d2.depends_on_self_version = d2
self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, d1,
'depends_on_self_version', d2)
def test_dependency_list(self):
class FooType(defs.ArtifactType):
pass
class BarType(defs.ArtifactType):
pass
class TestType(defs.ArtifactType):
depends_on = defs.ArtifactReferenceList()
depends_on_self_or_foo = defs.ArtifactReferenceList(
references=defs.ArtifactReference(['FooType', 'TestType']))
a = defs.ArtifactType(**get_artifact_fixture(id="1"))
a_copy = defs.ArtifactType(**get_artifact_fixture(id="1"))
b = defs.ArtifactType(**get_artifact_fixture(id="2"))
tt = TestType(**get_artifact_fixture(id="3"))
foo = FooType(**get_artifact_fixture(id='4'))
bar = BarType(**get_artifact_fixture(id='4'))
tt.depends_on.append(a)
tt.depends_on.append(b)
self.assertEqual([a, b], tt.depends_on)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.depends_on.append, a)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.depends_on.append, a_copy)
tt.depends_on_self_or_foo.append(tt)
tt.depends_on_self_or_foo.append(foo)
self.assertRaises(exc.InvalidArtifactPropertyValue,
tt.depends_on_self_or_foo.append, bar)
self.assertEqual([tt, foo], tt.depends_on_self_or_foo)
def test_blob(self):
class TestType(defs.ArtifactType):
image_file = defs.BinaryObject(max_file_size=201054,
min_locations=1,
max_locations=5)
screen_shots = defs.BinaryObjectList(
objects=defs.BinaryObject(min_file_size=100), min_count=1)
tt = TestType(**get_artifact_fixture())
blob = defs.Blob()
blob.size = 1024
blob.locations.append("file://some.file.path")
tt.image_file = blob
self.assertEqual(1024, tt.image_file.size)
self.assertEqual(["file://some.file.path"], tt.image_file.locations)
def test_pre_publish_blob_validation(self):
class TestType(defs.ArtifactType):
required_blob = defs.BinaryObject(required=True)
optional_blob = defs.BinaryObject()
tt = TestType(**get_artifact_fixture())
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.__pre_publish__)
tt.required_blob = defs.Blob(size=0)
tt.__pre_publish__()
def test_pre_publish_dependency_validation(self):
class TestType(defs.ArtifactType):
required_dependency = defs.ArtifactReference(required=True)
optional_dependency = defs.ArtifactReference()
tt = TestType(**get_artifact_fixture())
self.assertRaises(exc.InvalidArtifactPropertyValue, tt.__pre_publish__)
tt.required_dependency = defs.ArtifactType(**get_artifact_fixture())
tt.__pre_publish__()
def test_default_value_of_immutable_field_in_active_state(self):
class TestType(defs.ArtifactType):
foo = defs.String(default='Bar', mutable=False)
tt = TestType(**get_artifact_fixture(state='active'))
self.assertEqual('Bar', tt.foo)
class SerTestType(defs.ArtifactType):
some_string = defs.String()
some_text = defs.Text()
some_version = defs.SemVerString()
some_int = defs.Integer()
some_numeric = defs.Numeric()
some_bool = defs.Boolean()
some_array = defs.Array()
another_array = defs.Array(
item_type=[defs.Integer(), defs.Numeric(), defs.Boolean()])
some_dict = defs.Dict()
another_dict = defs.Dict(
properties={'foo': defs.Integer(), 'bar': defs.Boolean()})
some_ref = defs.ArtifactReference()
some_ref_list = defs.ArtifactReferenceList()
some_blob = defs.BinaryObject()
some_blob_list = defs.BinaryObjectList()
class TestSerialization(test_utils.BaseTestCase):
def test_serialization_to_db(self):
ref1 = defs.ArtifactType(**get_artifact_fixture(id="1"))
ref2 = defs.ArtifactType(**get_artifact_fixture(id="2"))
ref3 = defs.ArtifactType(**get_artifact_fixture(id="3"))
blob1 = defs.Blob(size=100, locations=['http://example.com/blob1'],
item_key='some_key', checksum='abc')
blob2 = defs.Blob(size=200, locations=['http://example.com/blob2'],
item_key='another_key', checksum='fff')
blob3 = defs.Blob(size=300, locations=['http://example.com/blob3'],
item_key='third_key', checksum='123')
fixture = get_artifact_fixture()
tt = SerTestType(**fixture)
tt.some_string = 'bar'
tt.some_text = 'bazz'
tt.some_version = '11.22.33-beta'
tt.some_int = 50
tt.some_numeric = 10.341
tt.some_bool = True
tt.some_array = ['q', 'w', 'e', 'r', 't', 'y']
tt.another_array = [1, 1.2, False]
tt.some_dict = {'foobar': "FOOBAR", 'baz': "QUX"}
tt.another_dict = {'foo': 1, 'bar': True}
tt.some_ref = ref1
tt.some_ref_list = [ref2, ref3]
tt.some_blob = blob1
tt.some_blob_list = [blob2, blob3]
results = serialization.serialize_for_db(tt)
expected = fixture
expected['type_name'] = 'SerTestType'
expected['type_version'] = '1.0'
expected['properties'] = {
'some_string': {
'type': 'string',
'value': 'bar'
},
'some_text': {
'type': 'text',
'value': 'bazz'
},
'some_version': {
'type': 'string',
'value': '11.22.33-beta'
},
'some_int': {
'type': 'int',
'value': 50
},
'some_numeric': {
'type': 'numeric',
'value': 10.341
},
'some_bool': {
'type': 'bool',
'value': True
},
'some_array': {
'type': 'array',
'value': [
{
'type': 'string',
'value': 'q'
},
{
'type': 'string',
'value': 'w'
},
{
'type': 'string',
'value': 'e'
},
{
'type': 'string',
'value': 'r'
},
{
'type': 'string',
'value': 't'
},
{
'type': 'string',
'value': 'y'
}
]
},
'another_array': {
'type': 'array',
'value': [
{
'type': 'int',
'value': 1
},
{
'type': 'numeric',
'value': 1.2
},
{
'type': 'bool',
'value': False
}
]
},
'some_dict.foobar': {
'type': 'string',
'value': 'FOOBAR'
},
'some_dict.baz': {
'type': 'string',
'value': 'QUX'
},
'another_dict.foo': {
'type': 'int',
'value': 1
},
'another_dict.bar': {
'type': 'bool',
'value': True
}
}
expected['dependencies'] = {
'some_ref': ['1'],
'some_ref_list': ['2', '3']
}
expected['blobs'] = {
'some_blob': [
{
'size': 100,
'checksum': 'abc',
'item_key': 'some_key',
'locations': ['http://example.com/blob1']
}],
'some_blob_list': [
{
'size': 200,
'checksum': 'fff',
'item_key': 'another_key',
'locations': ['http://example.com/blob2']
},
{
'size': 300,
'checksum': '123',
'item_key': 'third_key',
'locations': ['http://example.com/blob3']
}
]
}
self.assertEqual(expected, results)
def test_deserialize_from_db(self):
ts = datetime.datetime.now()
db_dict = {
"type_name": 'SerTestType',
"type_version": '1.0',
"id": "123",
"version": "11.2",
"description": None,
"name": "Foo",
"visibility": "private",
"state": "creating",
"owner": "my_tenant",
"created_at": ts,
"updated_at": ts,
"deleted_at": None,
"published_at": None,
"tags": ["test", "fixture"],
"properties": {
'some_string': {
'type': 'string',
'value': 'bar'
},
'some_text': {
'type': 'text',
'value': 'bazz'
},
'some_version': {
'type': 'string',
'value': '11.22.33-beta'
},
'some_int': {
'type': 'int',
'value': 50
},
'some_numeric': {
'type': 'numeric',
'value': 10.341
},
'some_bool': {
'type': 'bool',
'value': True
},
'some_array': {
'type': 'array',
'value': [
{
'type': 'string',
'value': 'q'
},
{
'type': 'string',
'value': 'w'
},
{
'type': 'string',
'value': 'e'
},
{
'type': 'string',
'value': 'r'
},
{
'type': 'string',
'value': 't'
},
{
'type': 'string',
'value': 'y'
}
]
},
'another_array': {
'type': 'array',
'value': [
{
'type': 'int',
'value': 1
},
{
'type': 'numeric',
'value': 1.2
},
{
'type': 'bool',
'value': False
}
]
},
'some_dict.foobar': {
'type': 'string',
'value': 'FOOBAR'
},
'some_dict.baz': {
'type': 'string',
'value': 'QUX'
},
'another_dict.foo': {
'type': 'int',
'value': 1
},
'another_dict.bar': {
'type': 'bool',
'value': True
}
},
'blobs': {
'some_blob': [
{
'size': 100,
'checksum': 'abc',
'item_key': 'some_key',
'locations': ['http://example.com/blob1']
}],
'some_blob_list': [
{
'size': 200,
'checksum': 'fff',
'item_key': 'another_key',
'locations': ['http://example.com/blob2']
},
{
'size': 300,
'checksum': '123',
'item_key': 'third_key',
'locations': ['http://example.com/blob3']
}
]
},
'dependencies': {
'some_ref': [
{
"type_name": 'ArtifactType',
"type_version": '1.0',
"id": "1",
"version": "11.2",
"description": None,
"name": "Foo",
"visibility": "private",
"state": "creating",
"owner": "my_tenant",
"created_at": ts,
"updated_at": ts,
"deleted_at": None,
"published_at": None,
"tags": ["test", "fixture"],
"properties": {},
"blobs": {},
"dependencies": {}
}
],
'some_ref_list': [
{
"type_name": 'ArtifactType',
"type_version": '1.0',
"id": "2",
"version": "11.2",
"description": None,
"name": "Foo",
"visibility": "private",
"state": "creating",
"owner": "my_tenant",
"created_at": ts,
"updated_at": ts,
"deleted_at": None,
"published_at": None,
"tags": ["test", "fixture"],
"properties": {},
"blobs": {},
"dependencies": {}
},
{
"type_name": 'ArtifactType',
"type_version": '1.0',
"id": "3",
"version": "11.2",
"description": None,
"name": "Foo",
"visibility": "private",
"state": "creating",
"owner": "my_tenant",
"created_at": ts,
"updated_at": ts,
"deleted_at": None,
"published_at": None,
"tags": ["test", "fixture"],
"properties": {},
"blobs": {},
"dependencies": {}
}
]
}
}
plugins_dict = {'SerTestType': [SerTestType],
'ArtifactType': [defs.ArtifactType]}
def _retrieve_plugin(name, version):
return next((p for p in plugins_dict.get(name, [])
if version and p.version == version),
plugins_dict.get(name, [None])[0])
plugins = mock.Mock()
plugins.get_class_by_typename = _retrieve_plugin
art = serialization.deserialize_from_db(db_dict, plugins)
self.assertEqual('123', art.id)
self.assertEqual('11.2', art.version)
self.assertIsNone(art.description)
self.assertEqual('Foo', art.name)
self.assertEqual('private', art.visibility)
self.assertEqual('private', art.visibility)
def get_artifact_fixture(**kwargs):
ts = datetime.datetime.now()
fixture = {
"id": "123",
"version": "11.2",
"description": None,
"name": "Foo",
"visibility": "private",
"state": "creating",
"owner": "my_tenant",
"created_at": ts,
"updated_at": ts,
"deleted_at": None,
"published_at": None,
"tags": ["test", "fixture"]
}
fixture.update(kwargs)
return fixture
|
|
"""
Utilities module for generic PySAL functionality, mainly centered on
translating queries into numpy arrays or PySAL weights objects
"""
import numpy as np
import pysal as ps
def construct_neighbor_query(w_type, query_vals):
"""Return query (a string) used for finding neighbors
@param w_type text: type of neighbors to calculate ('knn' or 'queen')
@param query_vals dict: values used to construct the query
"""
if w_type.lower() == 'knn':
return knn(query_vals)
else:
return queen(query_vals)
# Build weight object
def get_weight(query_res, w_type='knn', num_ngbrs=5):
"""
Construct PySAL weight from return value of query
@param query_res dict-like: query results with attributes and neighbors
"""
# if w_type.lower() == 'knn':
# row_normed_weights = [1.0 / float(num_ngbrs)] * num_ngbrs
# weights = {x['id']: row_normed_weights for x in query_res}
# else:
# weights = {x['id']: [1.0 / len(x['neighbors'])] * len(x['neighbors'])
# if len(x['neighbors']) > 0
# else [] for x in query_res}
neighbors = {x['id']: x['neighbors'] for x in query_res}
print 'len of neighbors: %d' % len(neighbors)
built_weight = ps.W(neighbors)
built_weight.transform = 'r'
return built_weight
def query_attr_select(params):
"""
Create portion of SELECT statement for attributes inolved in query.
Defaults to order in the params
@param params: dict of information used in query (column names,
table name, etc.)
Example:
OrderedDict([('numerator', 'price'),
('denominator', 'sq_meters'),
('subquery', 'SELECT * FROM interesting_data')])
Output:
"i.\"price\"::numeric As attr1, " \
"i.\"sq_meters\"::numeric As attr2, "
"""
attr_string = ""
template = "i.\"%(col)s\"::numeric As attr%(alias_num)s, "
if 'time_cols' in params:
# if markov analysis
attrs = params['time_cols']
for idx, val in enumerate(attrs):
attr_string += template % {"col": val, "alias_num": idx + 1}
else:
# if moran's analysis
attrs = [k for k in params
if k not in ('id_col', 'geom_col', 'subquery',
'num_ngbrs', 'subquery')]
for idx, val in enumerate(attrs):
attr_string += template % {"col": params[val],
"alias_num": idx + 1}
return attr_string
def query_attr_where(params):
"""
Construct where conditions when building neighbors query
Create portion of WHERE clauses for weeding out NULL-valued geometries
Input: dict of params:
{'subquery': ...,
'numerator': 'data1',
'denominator': 'data2',
'': ...}
Output:
'idx_replace."data1" IS NOT NULL AND idx_replace."data2" IS NOT NULL'
Input:
{'subquery': ...,
'time_cols': ['time1', 'time2', 'time3'],
'etc': ...}
Output: 'idx_replace."time1" IS NOT NULL AND idx_replace."time2" IS NOT
NULL AND idx_replace."time3" IS NOT NULL'
"""
attr_string = []
template = "idx_replace.\"%s\" IS NOT NULL"
if 'time_cols' in params:
# markov where clauses
attrs = params['time_cols']
# add values to template
for attr in attrs:
attr_string.append(template % attr)
else:
# moran where clauses
# get keys
attrs = [k for k in params
if k not in ('id_col', 'geom_col', 'subquery',
'num_ngbrs', 'subquery')]
# add values to template
for attr in attrs:
attr_string.append(template % params[attr])
if 'denominator' in attrs:
attr_string.append(
"idx_replace.\"%s\" <> 0" % params['denominator'])
out = " AND ".join(attr_string)
return out
def knn(params):
"""SQL query for k-nearest neighbors.
@param vars: dict of values to fill template
"""
attr_select = query_attr_select(params)
attr_where = query_attr_where(params)
replacements = {"attr_select": attr_select,
"attr_where_i": attr_where.replace("idx_replace", "i"),
"attr_where_j": attr_where.replace("idx_replace", "j")}
query = "SELECT " \
"i.\"{id_col}\" As id, " \
"%(attr_select)s" \
"(SELECT ARRAY(SELECT j.\"{id_col}\" " \
"FROM ({subquery}) As j " \
"WHERE " \
"i.\"{id_col}\" <> j.\"{id_col}\" AND " \
"%(attr_where_j)s " \
"ORDER BY " \
"j.\"{geom_col}\" <-> i.\"{geom_col}\" ASC " \
"LIMIT {num_ngbrs})" \
") As neighbors " \
"FROM ({subquery}) As i " \
"WHERE " \
"%(attr_where_i)s " \
"ORDER BY i.\"{id_col}\" ASC;" % replacements
return query.format(**params)
# SQL query for finding queens neighbors (all contiguous polygons)
def queen(params):
"""SQL query for queen neighbors.
@param params dict: information to fill query
"""
attr_select = query_attr_select(params)
attr_where = query_attr_where(params)
replacements = {"attr_select": attr_select,
"attr_where_i": attr_where.replace("idx_replace", "i"),
"attr_where_j": attr_where.replace("idx_replace", "j")}
query = "SELECT " \
"i.\"{id_col}\" As id, " \
"%(attr_select)s" \
"(SELECT ARRAY(SELECT j.\"{id_col}\" " \
"FROM ({subquery}) As j " \
"WHERE i.\"{id_col}\" <> j.\"{id_col}\" AND " \
"ST_Touches(i.\"{geom_col}\", j.\"{geom_col}\") AND " \
"%(attr_where_j)s)" \
") As neighbors " \
"FROM ({subquery}) As i " \
"WHERE " \
"%(attr_where_i)s " \
"ORDER BY i.\"{id_col}\" ASC;" % replacements
return query.format(**params)
# to add more weight methods open a ticket or pull request
def get_attributes(query_res, attr_num=1):
"""
@param query_res: query results with attributes and neighbors
@param attr_num: attribute number (1, 2, ...)
"""
return np.array([x['attr' + str(attr_num)] for x in query_res],
dtype=np.float)
def empty_zipped_array(num_nones):
"""
prepare return values for cases of empty weights objects (no neighbors)
Input:
@param num_nones int: number of columns (e.g., 4)
Output:
[(None, None, None, None)]
"""
return [tuple([None] * num_nones)]
|
|
"""
Copyright (c) 2020, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the
United States Government. Neither the United States Government nor the United
States Department of Energy, nor Battelle, nor any of their employees, nor any
jurisdiction or organization that has cooperated in the development of these
materials, makes any warranty, express or implied, or assumes any legal
liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or
represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or service by
trade name, trademark, manufacturer, or otherwise does not necessarily
constitute or imply its endorsement, recommendation, or favoring by the
United States Government or any agency thereof, or Battelle Memorial Institute.
The views and opinions of authors expressed herein do not necessarily state or
reflect those of the United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by
BATTELLE
for the
UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
"""
import logging
from datetime import timedelta as td
from volttron.platform.agent.math_utils import mean
from volttron.platform.agent.utils import setup_logging
from .. import constants
setup_logging()
_log = logging.getLogger(__name__)
logging.basicConfig(level=logging.debug, format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%m-%d-%y %H:%M:%S")
class EconCorrectlyOn(object):
"""Air-side HVAC economizer diagnostic for AHU/RTU systems.
EconCorrectlyOn uses metered data from a BAS or controller to diagnose
if an AHU/RTU is economizing when it should.
"""
def __init__(self):
# Initialize data arrays
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_spd_values = []
self.oad_values = []
self.timestamp = []
self.analysis_name = ""
# Initialize not_cooling and not_economizing flags
self.not_cooling = []
self.not_economizing = []
self.open_damper_threshold = None
self.oaf_economizing_threshold = None
self.minimum_damper_setpoint = None
self.data_window = None
self.no_required_data = None
self.cfm = None
self.eer = None
self.results_publish = None
self.max_dx_time = None
self.not_economizing_dict = None
self.not_cooling_dict = None
self.inconsistent_date = None
self.insufficient_data = None
# Application result messages
self.alg_result_messages = [
"Conditions are favorable for economizing but the the OAD is frequently below 100%.",
"No problems detected.",
"Conditions are favorable for economizing and OAD is 100% but the OAF is too low."
]
def set_class_values(self, analysis_name, results_publish, data_window, no_required_data, minimum_damper_setpoint, open_damper_threshold, cfm, eer):
"""Set the values needed for doing the diagnostics
analysis_name: string
data_window: datetime time delta
no_required_data: integer
minimum_damper_setpoint: float
open_damper_threshold float
cfm: float
eer: float
No return
"""
self.results_publish = results_publish
self.open_damper_threshold = open_damper_threshold
self.oaf_economizing_threshold = {
"low": open_damper_threshold - 30.0,
"normal": open_damper_threshold - 20.0,
"high": open_damper_threshold - 10.0
}
self.open_damper_threshold = {
"low": open_damper_threshold - 10.0,
"normal": open_damper_threshold,
"high": open_damper_threshold + 10.0
}
self.minimum_damper_setpoint = minimum_damper_setpoint
self.data_window = data_window
self.analysis_name = analysis_name
self.no_required_data = no_required_data
self.cfm = cfm
self.eer = eer
self.max_dx_time = td(minutes=60) if td(minutes=60) > data_window else data_window * 3 / 2
self.not_economizing_dict = {key: 15.0 for key in self.oaf_economizing_threshold}
self.not_cooling_dict = {key: 14.0 for key in self.oaf_economizing_threshold}
self.insufficient_data = {key: 13.2 for key in self.oaf_economizing_threshold}
self.inconsistent_date = {key: 13.2 for key in self.oaf_economizing_threshold}
def run_diagnostic(self, current_time):
if self.timestamp:
elapsed_time = self.timestamp[-1] - self.timestamp[0]
else:
elapsed_time = td(minutes=0)
if self.economizer_conditions(current_time):
return
if len(self.timestamp) >= self.no_required_data:
if elapsed_time > self.max_dx_time:
_log.info(constants.table_log_format(self.analysis_name, self.timestamp[-1], (
constants.ECON3 + constants.DX + ":" + str(self.inconsistent_date))))
self.results_publish.append(constants.table_publish_format(self.analysis_name, self.timestamp[-1],
(constants.ECON2 + constants.DX),
self.inconsistent_date))
self.clear_data()
return
self.not_economizing_when_needed()
else:
self.results_publish.append(constants.table_publish_format(self.analysis_name, current_time,
(constants.ECON2 + constants.DX),
self.insufficient_data))
self.clear_data()
def economizer_on_algorithm(self, cooling_call, oat, rat, mat, oad, econ_condition, cur_time, fan_sp):
"""Perform the Econ Correctly On class algorithm
cooling_call: int
oat: float
rat: float
mat: float
oad: float
econ_condition: float
cur_time: datetime time delta
fan_sp: float
No return
"""
economizing = self.economizing_check(cooling_call, econ_condition, cur_time)
if not economizing:
return
self.oat_values.append(oat)
self.mat_values.append(mat)
self.rat_values.append(rat)
self.oad_values.append(oad)
self.timestamp.append(cur_time)
fan_sp = fan_sp / 100.0 if fan_sp is not None else 1.0
self.fan_spd_values.append(fan_sp)
def economizing_check(self, cooling_call, econ_condition, cur_time):
"""Check conditions to see if should be economizing
cooling_call: int
econ_conditions: float
cur_time: datetime time delta
returns boolean
"""
if not cooling_call:
_log.info("{}: not cooling at {}".format(constants.ECON2, cur_time))
self.not_cooling.append(cur_time)
return False
if not econ_condition:
_log.info("{}: not economizing at {}.".format(constants.ECON2, cur_time))
self.not_economizing.append(cur_time)
return False
return True
def economizer_conditions(self, current_time):
if len(self.not_cooling) >= len(self.not_cooling)*0.5:
_log.info(constants.table_log_format(self.analysis_name, current_time,
(constants.ECON2 + constants.DX + ":" + str(self.not_cooling_dict))))
self.results_publish.append(
constants.table_publish_format(self.analysis_name,
current_time,
(constants.ECON2 + constants.DX),
self.not_cooling_dict))
self.clear_data()
return True
if len(self.not_cooling) >= len(self.not_cooling)*0.5:
_log.info(constants.table_log_format(self.analysis_name, current_time,
(constants.ECON2 + constants.DX + ":" + str(self.not_cooling_dict))))
self.results_publish.append(
constants.table_publish_format(self.analysis_name,
current_time,
(constants.ECON2 + constants.DX),
self.not_cooling_dict))
self.clear_data()
return True
return False
def not_economizing_when_needed(self):
"""If the detected problems(s) are consistent then generate a fault message(s).
No return
"""
oaf = [(m - r) / (o - r) for o, r, m in zip(self.oat_values, self.rat_values, self.mat_values)]
avg_oaf = max(0.0, min(100.0, mean(oaf) * 100.0))
avg_damper_signal = mean(self.oad_values)
diagnostic_msg = {}
energy_impact = {}
thresholds = zip(self.open_damper_threshold.items(), self.oaf_economizing_threshold.items())
for (key, damper_thr), (key2, oaf_thr) in thresholds:
if avg_damper_signal < damper_thr:
msg = "{} - {}: {}".format(constants.ECON2, key, self.alg_result_messages[0])
result = 11.1
energy = self.energy_impact_calculation()
else:
if avg_oaf < oaf_thr:
msg = "{} - {}: {} - OAF={}".format(constants.ECON2, key, self.alg_result_messages[2], avg_oaf)
result = 12.1
energy = self.energy_impact_calculation()
else:
msg = "{} - {}: {}".format(constants.ECON2, key, self.alg_result_messages[1])
result = 10.0
energy = 0.0
_log.info(msg)
diagnostic_msg.update({key: result})
energy_impact.update({key: energy})
_log.info(constants.table_log_format(self.analysis_name, self.timestamp[-1], (constants.ECON2 + constants.DX + ":" + str(diagnostic_msg))))
_log.info(constants.table_log_format(self.analysis_name, self.timestamp[-1], (constants.ECON2 + constants.EI + ":" + str(energy_impact))))
self.results_publish.append(constants.table_publish_format(self.analysis_name, self.timestamp[-1], (constants.ECON2 + constants.DX), diagnostic_msg))
self.results_publish.append(constants.table_publish_format(self.analysis_name, self.timestamp[-1], (constants.ECON2 + constants.EI), energy_impact))
self.clear_data()
def energy_impact_calculation(self):
"""Calculate the impact the temperature values have
returns float
"""
ei = 0.0
energy_calc = [1.08 * spd * self.cfm * (mat - oat) / (1000.0 * self.eer)
for mat, oat, spd in zip(self.mat_values, self.oat_values, self.fan_spd_values)
if (mat - oat) > 0]
if energy_calc:
avg_step = (self.timestamp[-1] - self.timestamp[0]).total_seconds() / 60 if len(self.timestamp) > 1 else 1
dx_time = (len(energy_calc) - 1) * avg_step if len(energy_calc) > 1 else 1.0
ei = (sum(energy_calc) * 60.0) / (len(energy_calc) * dx_time)
ei = round(ei, 2)
return ei
def clear_data(self):
"""
Reinitialize data arrays.
No return
"""
self.oad_values = []
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_spd_values = []
self.timestamp = []
self.not_economizing = []
self.not_cooling = []
|
|
#!/usr/bin/python
#
# Helper to create an SPDX license file (http://spdx.org)
#
# This must be executed when the dist/ directory is otherwise complete,
# except for the SPDX license, so that the file lists and such contained
# in the SPDX license will be correct.
#
# The utility outputs RDF/XML to specified file:
#
# $ python create_spdx_license.py /tmp/license.spdx
#
# Then, validate with SPDXViewer and SPDXTools:
#
# $ java -jar SPDXViewer.jar /tmp/license.spdx
# $ java -jar java -jar spdx-tools-1.2.5-jar-with-dependencies.jar RdfToHtml /tmp/license.spdx /tmp/license.html
#
# Finally, copy to dist:
#
# $ cp /tmp/license.spdx dist/license.spdx
#
# SPDX FAQ indicates there is no standard extension for an SPDX license file
# but '.spdx' is a common practice.
#
# The algorithm to compute a "verification code", implemented in this file,
# can be verified as follows:
#
# # build dist tar.xz, copy to /tmp/duktape-N.N.N.tar.xz
# $ cd /tmp
# $ tar xvfJ duktape-N.N.N.tar.xz
# $ rm duktape-N.N.N/license.spdx # remove file excluded from verification code
# $ java -jar spdx-tools-1.2.5-jar-with-dependencies.jar GenerateVerificationCode /tmp/duktape-N.N.N/
#
# Compare the resulting verification code manually with the one in license.spdx.
#
# Resources:
#
# - http://spdx.org/about-spdx/faqs
# - http://wiki.spdx.org/view/Technical_Team/Best_Practices
#
import os
import sys
import re
import datetime
import sha
import rdflib
from rdflib import URIRef, BNode, Literal, Namespace
RDF = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
RDFS = Namespace('http://www.w3.org/2000/01/rdf-schema#')
XSD = Namespace('http://www.w3.org/2001/XMLSchema#')
SPDX = Namespace('http://spdx.org/rdf/terms#')
DOAP = Namespace('http://usefulinc.com/ns/doap#')
DUKTAPE = Namespace('http://duktape.org/rdf/terms#')
def checksumFile(g, filename):
f = open(filename, 'rb')
d = f.read()
f.close()
shasum = sha.sha(d).digest().encode('hex').lower()
csum_node = BNode()
g.add((csum_node, RDF.type, SPDX.Checksum))
g.add((csum_node, SPDX.algorithm, SPDX.checksumAlgorithm_sha1))
g.add((csum_node, SPDX.checksumValue, Literal(shasum)))
return csum_node
def computePackageVerification(g, dirname, excluded):
# SPDX 1.2 Section 4.7
# The SPDXTools command "GenerateVerificationCode" can be used to
# check the verification codes created. Note that you must manually
# remove "license.spdx" from the unpacked dist directory before
# computing the verification code.
verify_node = BNode()
hashes = []
for dirpath, dirnames, filenames in os.walk(dirname):
for fn in filenames:
full_fn = os.path.join(dirpath, fn)
f = open(full_fn, 'rb')
d = f.read()
f.close()
if full_fn in excluded:
#print('excluded in verification: ' + full_fn)
continue
#print('included in verification: ' + full_fn)
file_sha1 = sha.sha(d).digest().encode('hex').lower()
hashes.append(file_sha1)
#print(repr(hashes))
hashes.sort()
#print(repr(hashes))
verify_code = sha.sha(''.join(hashes)).digest().encode('hex').lower()
for fn in excluded:
g.add((verify_node, SPDX.packageVerificationCodeExcludedFile, Literal(fn)))
g.add((verify_node, SPDX.packageVerificationCodeValue, Literal(verify_code)))
return verify_node
def fileType(filename):
ign, ext = os.path.splitext(filename)
if ext in [ '.c', '.h', '.js' ]:
return SPDX.fileType_source
else:
return SPDX.fileType_other
def getDuktapeVersion():
f = open('./src/duktape.h')
re_ver = re.compile(r'^#define\s+DUK_VERSION\s+(\d+)L$')
for line in f:
line = line.strip()
m = re_ver.match(line)
if m is None:
continue
ver = int(m.group(1))
return '%d.%d.%d' % ((ver / 10000) % 100,
(ver / 100) % 100,
ver % 100)
raise Exception('could not figure out Duktape version')
def main():
outfile = sys.argv[1]
if not os.path.exists('CONTRIBUTING.md') and os.path.exists('tests/ecmascript'):
sys.stderr.write('Invalid CWD, must be in Duktape root with dist/ built')
sys.exit(1)
os.chdir('dist')
if not os.path.exists('Makefile.cmdline'):
sys.stderr.write('Invalid CWD, must be in Duktape root with dist/ built')
sys.exit(1)
duktape_version = getDuktapeVersion()
duktape_pkgname = 'duktape-' + duktape_version + '.tar.xz'
now = datetime.datetime.utcnow()
now = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute, now.second)
creation_date = Literal(now.isoformat() + 'Z', datatype=XSD.dateTime)
duktape_org = Literal('Organization: duktape.org')
mit_license = URIRef('http://spdx.org/licenses/MIT')
duktape_copyright = Literal('Copyright 2013-2015 Duktape authors (see AUTHORS.rst in the Duktape distributable)')
g = rdflib.Graph()
crea_node = BNode()
g.add((crea_node, RDF.type, SPDX.CreationInfo))
g.add((crea_node, RDFS.comment, Literal('')))
g.add((crea_node, SPDX.creator, duktape_org))
g.add((crea_node, SPDX.created, creation_date))
g.add((crea_node, SPDX.licenseListVersion, Literal('1.20'))) # http://spdx.org/licenses/
# 'name' should not include a version number (see best practices)
pkg_node = BNode()
g.add((pkg_node, RDF.type, SPDX.Package))
g.add((pkg_node, SPDX.name, Literal('Duktape')))
g.add((pkg_node, SPDX.versionInfo, Literal(duktape_version)))
g.add((pkg_node, SPDX.packageFileName, Literal(duktape_pkgname)))
g.add((pkg_node, SPDX.supplier, duktape_org))
g.add((pkg_node, SPDX.originator, duktape_org))
g.add((pkg_node, SPDX.downloadLocation, Literal('http://duktape.org/' + duktape_pkgname, datatype=XSD.anyURI)))
g.add((pkg_node, SPDX.homePage, Literal('http://duktape.org/', datatype=XSD.anyURI)))
verify_node = computePackageVerification(g, '.', [ './license.spdx' ])
g.add((pkg_node, SPDX.packageVerificationCode, verify_node))
# SPDX.checksum: omitted because license is inside the package
g.add((pkg_node, SPDX.sourceInfo, Literal('Official duktape.org release built from GitHub repo https://github.com/svaarala/duktape.')))
# NOTE: MIT license alone is sufficient for now, because Duktape, Lua,
# Murmurhash2, and CommonJS (though probably not even relevant for
# licensing) are all MIT.
g.add((pkg_node, SPDX.licenseConcluded, mit_license))
g.add((pkg_node, SPDX.licenseInfoFromFiles, mit_license))
g.add((pkg_node, SPDX.licenseDeclared, mit_license))
g.add((pkg_node, SPDX.licenseComments, Literal('Duktape is copyrighted by its authors and licensed under the MIT license. MurmurHash2 is used internally, it is also under the MIT license. Duktape module loader is based on the CommonJS module loading specification (without sharing any code), CommonJS is under the MIT license.')))
g.add((pkg_node, SPDX.copyrightText, duktape_copyright))
g.add((pkg_node, SPDX.summary, Literal('Duktape Ecmascript interpreter')))
g.add((pkg_node, SPDX.description, Literal('Duktape is an embeddable Javascript engine, with a focus on portability and compact footprint')))
# hasFile properties added separately below
#reviewed_node = BNode()
#g.add((reviewed_node, RDF.type, SPDX.Review))
#g.add((reviewed_node, SPDX.reviewer, XXX))
#g.add((reviewed_node, SPDX.reviewDate, XXX))
#g.add((reviewed_node, RDFS.comment, ''))
spdx_doc = BNode()
g.add((spdx_doc, RDF.type, SPDX.SpdxDocument))
g.add((spdx_doc, SPDX.specVersion, Literal('SPDX-1.2')))
g.add((spdx_doc, SPDX.dataLicense, URIRef('http://spdx.org/licenses/CC0-1.0')))
g.add((spdx_doc, RDFS.comment, Literal('SPDX license for Duktape ' + duktape_version)))
g.add((spdx_doc, SPDX.creationInfo, crea_node))
g.add((spdx_doc, SPDX.describesPackage, pkg_node))
# SPDX.hasExtractedLicensingInfo
# SPDX.reviewed
# SPDX.referencesFile: added below
for dirpath, dirnames, filenames in os.walk('.'):
for fn in filenames:
full_fn = os.path.join(dirpath, fn)
#print('# file: ' + full_fn)
file_node = BNode()
g.add((file_node, RDF.type, SPDX.File))
g.add((file_node, SPDX.fileName, Literal(full_fn)))
g.add((file_node, SPDX.fileType, fileType(full_fn)))
g.add((file_node, SPDX.checksum, checksumFile(g, full_fn)))
# Here we assume that LICENSE.txt provides the actual "in file"
# licensing information, and everything else is implicitly under
# MIT license.
g.add((file_node, SPDX.licenseConcluded, mit_license))
if full_fn == './LICENSE.txt':
g.add((file_node, SPDX.licenseInfoInFile, mit_license))
else:
g.add((file_node, SPDX.licenseInfoInFile, URIRef(SPDX.none)))
# SPDX.licenseComments
g.add((file_node, SPDX.copyrightText, duktape_copyright))
# SPDX.noticeText
# SPDX.artifactOf
# SPDX.fileDependency
# SPDX.fileContributor
# XXX: should referencesFile include all files?
g.add((spdx_doc, SPDX.referencesFile, file_node))
g.add((pkg_node, SPDX.hasFile, file_node))
# Serialize into RDF/XML directly. We could also serialize into
# N-Triples and use external tools (like 'rapper') to get cleaner,
# abbreviated output.
#print('# Duktape SPDX license file (autogenerated)')
#print(g.serialize(format='turtle'))
#print(g.serialize(format='nt'))
f = open(outfile, 'wb')
#f.write(g.serialize(format='rdf/xml'))
f.write(g.serialize(format='xml'))
f.close()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# FIXME: This test module randomly passes/fails even if all tests are skipped.
# Something fishy is going on with the Test fixtures. Behavior seen on CI on
# both Linux and Windows
# TODO: Remove delay of class creations. Adding SetUp/TearDown may help
"""Test sub-classing managed types"""
import System
import pytest
from Python.Test import (IInterfaceTest, SubClassTest, EventArgsTest,
FunctionsTest)
from System.Collections.Generic import List
from ._compat import range
def interface_test_class_fixture(subnamespace):
"""Delay creation of class until test starts."""
class InterfaceTestClass(IInterfaceTest):
"""class that implements the test interface"""
__namespace__ = "Python.Test." + subnamespace
def foo(self):
return "InterfaceTestClass"
def bar(self, x, i):
return "/".join([x] * i)
return InterfaceTestClass
def derived_class_fixture(subnamespace):
"""Delay creation of class until test starts."""
class DerivedClass(SubClassTest):
"""class that derives from a class deriving from IInterfaceTest"""
__namespace__ = "Python.Test." + subnamespace
def foo(self):
return "DerivedClass"
def base_foo(self):
return SubClassTest.foo(self)
def super_foo(self):
return super(DerivedClass, self).foo()
def bar(self, x, i):
return "_".join([x] * i)
def return_list(self):
l = List[str]()
l.Add("A")
l.Add("B")
l.Add("C")
return l
return DerivedClass
def derived_event_test_class_fixture(subnamespace):
"""Delay creation of class until test starts."""
class DerivedEventTest(IInterfaceTest):
"""class that implements IInterfaceTest.TestEvent"""
__namespace__ = "Python.Test." + subnamespace
def __init__(self):
self.event_handlers = []
# event handling
def add_TestEvent(self, handler):
self.event_handlers.append(handler)
def remove_TestEvent(self, handler):
self.event_handlers.remove(handler)
def OnTestEvent(self, value):
args = EventArgsTest(value)
for handler in self.event_handlers:
handler(self, args)
return DerivedEventTest
def test_base_class():
"""Test base class managed type"""
ob = SubClassTest()
assert ob.foo() == "foo"
assert FunctionsTest.test_foo(ob) == "foo"
assert ob.bar("bar", 2) == "bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar"
assert ob.not_overriden() == "not_overriden"
assert list(ob.return_list()) == ["a", "b", "c"]
assert list(SubClassTest.test_list(ob)) == ["a", "b", "c"]
def test_interface():
"""Test python classes can derive from C# interfaces"""
InterfaceTestClass = interface_test_class_fixture(test_interface.__name__)
ob = InterfaceTestClass()
assert ob.foo() == "InterfaceTestClass"
assert FunctionsTest.test_foo(ob) == "InterfaceTestClass"
assert ob.bar("bar", 2) == "bar/bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar/bar"
x = FunctionsTest.pass_through(ob)
assert id(x) == id(ob)
def test_derived_class():
"""Test python class derived from managed type"""
DerivedClass = derived_class_fixture(test_derived_class.__name__)
ob = DerivedClass()
assert ob.foo() == "DerivedClass"
assert ob.base_foo() == "foo"
assert ob.super_foo() == "foo"
assert FunctionsTest.test_foo(ob) == "DerivedClass"
assert ob.bar("bar", 2) == "bar_bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar_bar"
assert ob.not_overriden() == "not_overriden"
assert list(ob.return_list()) == ["A", "B", "C"]
assert list(SubClassTest.test_list(ob)) == ["A", "B", "C"]
x = FunctionsTest.pass_through(ob)
assert id(x) == id(ob)
def test_create_instance():
"""Test derived instances can be created from managed code"""
DerivedClass = derived_class_fixture(test_create_instance.__name__)
ob = FunctionsTest.create_instance(DerivedClass)
assert ob.foo() == "DerivedClass"
assert FunctionsTest.test_foo(ob) == "DerivedClass"
assert ob.bar("bar", 2) == "bar_bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar_bar"
assert ob.not_overriden() == "not_overriden"
x = FunctionsTest.pass_through(ob)
assert id(x) == id(ob)
InterfaceTestClass = interface_test_class_fixture(test_create_instance.__name__)
ob2 = FunctionsTest.create_instance(InterfaceTestClass)
assert ob2.foo() == "InterfaceTestClass"
assert FunctionsTest.test_foo(ob2) == "InterfaceTestClass"
assert ob2.bar("bar", 2) == "bar/bar"
assert FunctionsTest.test_bar(ob2, "bar", 2) == "bar/bar"
y = FunctionsTest.pass_through(ob2)
assert id(y) == id(ob2)
def test_events():
class EventHandler(object):
def handler(self, x, args):
self.value = args.value
event_handler = EventHandler()
x = SubClassTest()
x.TestEvent += event_handler.handler
assert FunctionsTest.test_event(x, 1) == 1
assert event_handler.value == 1
InterfaceTestClass = interface_test_class_fixture(test_events.__name__)
i = InterfaceTestClass()
with pytest.raises(System.NotImplementedException):
FunctionsTest.test_event(i, 2)
DerivedEventTest = derived_event_test_class_fixture(test_events.__name__)
d = DerivedEventTest()
d.add_TestEvent(event_handler.handler)
assert FunctionsTest.test_event(d, 3) == 3
assert event_handler.value == 3
assert len(d.event_handlers) == 1
def test_isinstance_check():
a = [str(x) for x in range(0, 1000)]
b = [System.String(x) for x in a]
for x in a:
assert not isinstance(x, System.Object)
assert not isinstance(x, System.String)
for x in b:
assert isinstance(x, System.Object)
assert isinstance(x, System.String)
def test_namespace_and_init():
calls = []
class TestX(System.Object):
__namespace__ = "test_clr_subclass_with_init_args"
def __init__(self, *args, **kwargs):
calls.append((args, kwargs))
t = TestX(1,2,3,foo="bar")
assert len(calls) == 1
assert calls[0][0] == (1,2,3)
assert calls[0][1] == {"foo":"bar"}
def test_namespace_and_argless_init():
calls = []
class TestX(System.Object):
__namespace__ = "test_clr_subclass_without_init_args"
def __init__(self):
calls.append(True)
t = TestX()
assert len(calls) == 1
assert calls[0] == True
def test_namespace_and_no_init():
class TestX(System.Object):
__namespace__ = "test_clr_subclass_without_init"
q = 1
t = TestX()
assert t.q == 1
def test_construction_from_clr():
import clr
calls = []
class TestX(System.Object):
__namespace__ = "test_clr_subclass_init_from_clr"
@clr.clrmethod(None, [int, str])
def __init__(self, i, s):
calls.append((i, s))
# Construct a TestX from Python
t = TestX(1, "foo")
assert len(calls) == 1
assert calls[0][0] == 1
assert calls[0][1] == "foo"
# Reset calls and construct a TestX from CLR
calls = []
tp = t.GetType()
t2 = tp.GetConstructors()[0].Invoke(None)
assert len(calls) == 0
# The object has only been constructed, now it needs to be initialized as well
tp.GetMethod("__init__").Invoke(t2, [1, "foo"])
assert len(calls) == 1
assert calls[0][0] == 1
assert calls[0][1] == "foo"
|
|
import argparse
import json
import logging
import os
import shutil
import subprocess
import tempfile
import requests
import yaml
from collections import OrderedDict
from io import BytesIO
from pathlib import Path
from . import dockerfile
from .constants import LAYERS_HOME, VERSION
from .disco import configure_logging
from .utils import nested_get
from docker import Client as DockerClient
log = logging.getLogger("cake")
def layer_get_metadata(
name,
api="http://layer-cake.io",
apiver="api/v2",
apiendpoint="layers"):
uri = "/".join([api, apiver, apiendpoint, name])
try:
log.debug("Fetching Layer information %s", uri)
result = requests.get(uri)
except:
result = None
if result and result.ok:
result = result.json()
if result.get("repo"):
return result
raise ValueError("Unable to locate layer {} using {}".format(
name, uri))
def git(*cmd, **kwargs):
return subprocess.check_call(["git", *cmd], **kwargs)
class Layer:
def __init__(self, metadata):
self.metadata = metadata
self.dir = None
self._config = {}
@classmethod
def from_path(cls, path):
ins = cls({})
ins.dir = path
return ins
@property
def config(self):
if self._config:
return self._config
if not self.dir:
raise OSError("Layer %s has not be fetched")
cfg = Path(self.dir) / "layer.yaml"
if cfg.exists():
data = yaml.load(cfg.open())
if 'layer' not in data:
raise ValueError("%s doesn't appear to be a layer config" % cfg)
self._config = data['layer']
else:
self._config = {}
return self._config
@property
def name(self):
return self.config['name']
def fetch(self, todir, overwrite_target=False):
repo = self.metadata['repo']
name = self.metadata['id']
subpath = self.metadata.get('repopath', '/')
if subpath.startswith("/"):
subpath = subpath[1:]
# pull the repo to a tempdir
# then select any subpath, moving that to the target dir
self.dir = Path(todir) / name
if self.dir.exists():
if overwrite_target:
shutil.rmtree(str(self.dir))
else:
raise OSError(
"Fetch of {} would overwrite {}. Use -f to force".format(
name,
self.dir))
with tempfile.TemporaryDirectory() as td:
d = Path(td)
reponame = repo.split("/")[-1]
if reponame.endswith(".git"):
reponame = reponame[:-4]
target = d / reponame
git("clone", repo, str(target))
if subpath:
target = d / subpath
if not target.exists() or not target.is_dir():
raise OSError(
"Repo subpath {} invalid, unable to continue".format(
name))
# XXX: this could fail across certain types of mounts
target.rename(self.dir)
def install(self, layerdir):
installer = self.dir / "install"
shutil.copytree(str(self.dir), str(layerdir / self.name))
if installer.exists():
output = subprocess.check_output(str(installer.resolve()))
log.info("Executed installer for %s", self.name)
log.debug(output.decode("utf-8"))
class Cake:
def __init__(self, options):
self.layer_names = options.layer
self.directory = Path(options.directory)
self.force_overwrite = options.force
self.api_endpoint = options.layer_endpoint.rstrip("/")
self.scan_cakepath()
def fetch_layer(self, name, resolving):
if resolving.get(name):
return resolving[name]
layer = None
if name in self.cake_map:
# Construct and register a layer from the
# directory
layer = Layer.from_path(self.cake_map[name])
elif layer is None:
metadata = layer_get_metadata(name, api=self.api_endpoint)
layer = Layer(metadata)
layer.fetch(self.directory, self.force_overwrite)
# Now create a resolving entry for any layers this includes
for dep in layer.config.get('layers', []):
if dep not in resolving:
resolving[dep] = None
# Each request implies the layer is the dep of a predecessor,
# so move it to the front of the list with the intention
# of installing it before the thing that depends on it
resolving.move_to_end(dep, False)
resolving[name] = layer
return layer
def fetch_all(self):
# This will fill out the resolving map when layers have deps they add
# them to the map and this loop will resolve them keeping the deps in
# proper order.
resolving = OrderedDict([[n, None] for n in self.layer_names])
if not self.directory.exists():
self.directory.mkdir(parents=True)
while not all(resolving.values()):
for name, layer in resolving.items():
if layer is not None:
continue
self.fetch_layer(name, resolving)
self.layers = resolving
def scan_cakepath(self):
cake_map = {} # layername -> Path
CAKE_PATH = os.environ.get("CAKE_PATH", "")
CAKE_PATH = CAKE_PATH.split(":")
CAKE_PATH = [Path(p) for p in CAKE_PATH]
if CAKE_PATH:
for cake_segment in [p for p in CAKE_PATH if p.exists()]:
# Build a last write wins map of layer to directory information
# we can search for the name of the layer in this path ignoring
# the repo (and the repo subpath, as finding the layers.yaml in
# a nested structure without metadata is too intensive)
p = Path(cake_segment)
for layerdir in p.iterdir():
cfg = layerdir / "layer.yaml"
if layerdir.is_dir() and cfg.exists():
# This appears to be a layer
cfg = yaml.load(cfg.open())
layername = nested_get(cfg, "layer.name")
cake_map[layername] = layerdir
self.cake_map = cake_map
log.debug("Found local Layers %s", sorted(self.cake_map.items()))
def install(self, target_dir):
# There are some implicit rules used during the install
# layer install will copy *.{schema,rules} to layerdir
layerdir = Path(target_dir).mkdir(
parents=True, exist_ok=True)
for layer in self.layers.values():
layer.install(layerdir)
def layer_main(options):
"Pull a layer from the api endpoint or from CAKE_PATH"
endpoint = os.environ.get("LAYERCAKE_API")
if endpoint:
options.layer_endpoint = endpoint
cake = Cake(options)
cake.fetch_all()
if options.no_install:
return
cake.install(options.directory)
def bake_main(options):
"""Munge a dockerfile from a cfg
cake:
layers: []
"""
endpoint = os.environ.get("LAYERCAKE_API")
if endpoint:
options.layer_endpoint = endpoint
config = yaml.load(open(options.config))['cake']
df = dockerfile.Dockerfile(options.dockerfile)
if options.layer_endpoint:
df.add("ENV", "LAYERCAKE_API={}".format(options.layer_endpoint))
# In this mode we are adding run cmds for each
# layer in the cfg file (those may pull other layers)
# then we output a new docker file and docker build the
# new container.
last_run = df.last("RUN")
if not options.use_devel:
df.add("RUN", ['pip3', 'install', '--upgrade', 'layer_cake==%s' % VERSION], at=last_run)
else:
# For devel we ask the container to pull git master
df.add("RUN", ['pip3', 'install', '--upgrade',
"https://api.github.com/repos/bcsaller/layercake/tarball/master#layer_cake"],
at=last_run)
for layer_name in config['layers']:
last_run = df.last("RUN")
df.add("RUN", ["cake", "layer", layer_name,
"-d", LAYERS_HOME],
at=last_run)
# we might have an entrypoint
# or a command (or both)
if df.entrypoint:
df.entrypoint = ["disco"] + df.entrypoint['args']
log.debug("Using Dockerfile\n%s", str(df))
if not options.no_build:
client = DockerClient()
f = BytesIO(str(df).encode("utf-8"))
response = client.build(fileobj=f, tag="layercake/disco", decode=True)
for line in response:
if 'errorDetail' in line:
log.critical(line['errorDetail']['message'].strip())
elif 'stream' in line:
log.info(line['stream'].strip())
else:
return df
def search_main(options):
endpoint = os.environ.get("LAYERCAKE_API")
if endpoint:
options.layer_endpoint = endpoint
url = "{}/api/v2/layers/".format(options.layer_endpoint)
query = {"q": options.term}
result = requests.get(url, query)
if not result.ok:
print("Unable to connect to layer endpoint")
return
data = result.json()
if options.format == "json":
print(json.dumps(data, indent=2))
elif options.format == "yaml":
print(yaml.dump(data))
else:
print("{:<10} {:<10} {}".format("Id", "Name", "Descrption"))
for item in data:
print("{id:<10} {name:<10} {summary}".format(**item))
def setup(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--log-level", default=logging.INFO)
parser.set_defaults(func=lambda options: parser.print_help())
parsers = parser.add_subparsers()
layer = parsers.add_parser("layer", help=layer_main.__doc__.split("\n", 1)[0])
layer.add_argument("--layer-endpoint",
help="API endpoint for metadata",
default="http://layer-cake.io")
layer.add_argument("-d", "--directory", default=Path.cwd())
layer.add_argument("-f", "--force", action="store_true",
help=("Force overwrite of existing layers "
"in directory (-d)"))
layer.add_argument("-n", "--no-install", action="store_true",
help=("when set exit after pulling layers, "
"and before the install phase"))
layer.add_argument(
"layer",
nargs="+",
help=("The name of the layer to include, if more "
"than one is provided they will be included in order"))
layer.set_defaults(func=layer_main)
baker = parsers.add_parser("bake", help=bake_main.__doc__.split("\n", 1)[0])
baker.add_argument("-d", "--dockerfile",
help="Dockerfile to process",
)
baker.add_argument("--layer-endpoint",
help="API endpoint for metadata",
default="http://layer-cake.io")
baker.add_argument("-n", "--no-build", action="store_true",
help="Don't build Dockerfile")
baker.add_argument("--use-devel", action="store_true")
baker.add_argument("config",
nargs="?",
default="cake.conf")
baker.set_defaults(func=bake_main)
search = parsers.add_parser("search")
search.add_argument("--layer-endpoint",
help="API endpoint for metadata",
default="http://layer-cake.io")
search.add_argument("-f", "--format", default="text", help="Options text|json|yaml")
search.add_argument("term", nargs="+")
search.set_defaults(func=search_main)
options = parser.parse_args(args)
return options
def main():
options = setup()
configure_logging(options.log_level)
options.func(options)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 18 16:34:59 2015
@author: casimp
"""
import numpy as np
from numpy.polynomial.chebyshev import chebval
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pyxe.fitting_tools import pawley_hkl, extract_parameters, array_fit_pawley
from scipy.optimize import curve_fit
def line_extract(X, Y, pnt, theta, res=0.05):
""" Extracts line from 2d position array (according to point/angle).
Args:
X (ndarray): 1d/2d-array of positions
Y (ndarray): 1d/2d-array of positions
pnt (tuple): Define data point (index) else point (0, ) x ndim.
theta (float): Angle (rad) though 2D array
Returns:
tuple: x, y, d - where (x, y) are vector co-ords and d is scalar pos
"""
x_valid = np.logical_and(pnt[0] >= np.min(X), pnt[1] <= np.max(X))
y_valid = np.logical_and(pnt[0] >= np.min(Y), pnt[1] <= np.max(Y))
error = "Specified point (pnt) doesn't lie within data limits."
assert x_valid and y_valid, error
if theta % np.pi / 2 == 0:
if theta % np.pi != 0:
npnts = 1 + (np.max(Y) - np.min(Y)) // res
y = np.linspace(np.min(Y), np.max(Y), npnts)
x = y * 0 + pnt[0]
d = np.max(y) - np.min(y)
else:
npnts = 1 + (np.max(X) - np.min(X)) // res
x = np.linspace(np.min(X), np.max(X), npnts)
y = x * 0 + pnt[1]
d = np.max(x) - np.min(x)
else:
m = np.tan(theta)
c = pnt[1] - m * pnt[0]
y_lim = [m * np.min(X) + c, m * np.max(X) + c]
y_min = np.min(Y) if min(y_lim) < np.min(Y) else min(y_lim)
y_max = np.max(Y) if max(y_lim) > np.max(Y) else max(y_lim)
x_1, x_2 = (y_min - c) / m, (y_min - c) / m
d = ((x_2 - x_1)**2 + (y_max - y_min)**2)**0.5
npnts = 1 + d // res
y = np.linspace(y_min, y_max, npnts)
x = (y - c) / m
return x, y, np.linspace(0, d, npnts)
def az90(phi, az_idx):
""" Searches for and returns azithmuthal idx perp. to specified idx.
Args:
phi (ndarray): 1d array of azimuthal angles
idx (int): Azimuthal slice index of chosen slice
Returns:
int: Azimuthal slice index
"""
for i in [-np.pi/2, np.pi/2]:
if phi[az_idx] < -np.pi:
find_ind = np.isclose(phi, np.pi - phi[az_idx] + i)
else:
find_ind = np.isclose(phi, phi[az_idx] + i)
if np.sum(find_ind) == 1:
return int(np.argmax(find_ind))
raise ValueError('No cake segment found perpendicular to given index.',
'Number of cake segments must be divisable by 4.')
def meshgrid_res(d1, d2, spatial_resolution):
""" Takes flat data point arrays, re-meshes at a defined spatial resolution.
Args:
d1 (ndarray): Positions (x)
d2 (ndarray): Positions (y)
spatial_resolution (float): Point spacing
Returns:
tuple: Re-meshed 2d arrays (d1, d2)
"""
d1_points = np.ceil((np.max(d1) - np.min(d1)) / spatial_resolution) + 1
d2_points = np.ceil((np.max(d2) - np.min(d2)) / spatial_resolution) + 1
d1_ = np.linspace(np.min(d1), np.max(d1), d1_points)
d2_ = np.linspace(np.min(d2), np.max(d2), d2_points)
return np.meshgrid(d1_, d2_)
def plot_complex(x_raw, y_raw, x, y, z, levels=11, limits=[None, None],
continuous=True, figsize=(10, 10),
ax=False, cbar=True, **kwargs):
""" Plots 2D heat map of stress/strain fields.
Args:
x_raw (ndarray): Data acquisision points
y_raw (ndarray): Data acquisision points
x (ndarray): 2D x-position array (interpolated)
y (ndarray): 2D y-position array (interpolated)
z (ndarray): 2D stress/strain array
lvls (int, ndarray): Number of contours to display (or defined levels)
figsize (tuple): Figure size
ax: Supply axis to plot on or (False) create new plot
cbar (bool): Display colour bar
"""
if not ax:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
if limits != [None, None]:
z[z < limits[0]] = limits[0]
z[z > limits[1]] = limits[1]
cf_back = ax.contourf(x, y, z, levels, **kwargs)
# Zero markings
ax.contour(x, y, z, levels=[0], colors=('k',),
linestyles=('--',), linewidths=(3,))
# Continuous background of discrete colours
if continuous:
if not isinstance(levels, int):
lvls_ = np.linspace(np.min(levels), np.max(levels), 192)
ax.contourf(x, y, z, lvls_, **kwargs)
else:
ax.contourf(x, y, z, 192, **kwargs)
# Contours
c = ax.contour(x, y, z, levels, colors='0', alpha=0.625)
# Acquisition points
ax.plot(x_raw, y_raw, '+', color='0.1', alpha=0.75,
markersize=5, linestyle='None')
# Formatting
ax.set_aspect('equal')
ax.autoscale(tight=True)
divider = make_axes_locatable(ax)
if cbar:
cax = divider.append_axes("right", "3%", pad="3%")
cbar = plt.colorbar(cf_back, cax=cax)
cbar.add_lines(c)
return ax
def pawley_plot(q, I, detector, az_idx, ax, q_lim=None, func='gaussian'):
""" Plots q against measured intensity overlaid with Pawley fit.
Includes highlighting of anticipated Bragg peak locations and
difference between measured intensity and Pawley fit.
Args:
q (ndarray): Reciprocal lattice
I (ndarray): Intensity
detector: pyxpb detector instance
az_idx (int): Azimuthal slice index
ax: Axis to apply plot to
"""
background = chebval(q, detector._back[az_idx])
if q_lim is None:
q_lim = [np.min(q), np.max(q)]
p0 = extract_parameters(detector, q_lim, np.nanmax(I))
pawley = pawley_hkl(detector, background, func=func)
coeff, var_mat = curve_fit(pawley, q, I, p0=p0)
I_pawley = pawley(q, *coeff)
# Plot raw data and Pawley fit to data
ax.plot(q, I, 'o', markeredgecolor='0.3', markersize=4,
markerfacecolor='none', label=r'$\mathregular{I_{obs}}$')
ax.plot(q, I_pawley, 'r-', linewidth=0.75,
label=r'$\mathregular{I_{calc}}$')
# Plot Bragg positions - locate relative to max intensity
ymin = -ax.get_ylim()[1] / 10
materials = detector.materials
for idx, mat in enumerate(materials):
offset = (1 + idx) * ymin / 2
for q0 in detector.q0[mat]:
bragg_line = [offset + ymin / 8, offset - ymin / 8]
ax.plot([q0, q0], bragg_line, 'g-', linewidth=2)
# Use error bars to fudge vertical lines in legend
ax.errorbar(0, 0, yerr=1, fmt='none', capsize=0, ecolor='g',
elinewidth=1.5, label=r'Bragg ({})'.format(mat))
# Plot difference between raw and Pawley fit (shifted below Bragg)
I_diff = I - I_pawley
max_diff = np.max(I_diff)
shifted_error = I - I_pawley + (idx + 2) * ymin / 2 - max_diff
ax.plot(q, shifted_error, 'b-', linewidth=0.75,
label=r'$\mathregular{I_{diff}}$')
# Remove ticks below 0 intensity
ylocs = ax.yaxis.get_majorticklocs()
yticks = ax.yaxis.get_major_ticks()
for idx, yloc in enumerate(ylocs):
if yloc < 0:
yticks[idx].set_visible(False)
legend = ax.legend(numpoints=1)
frame = legend.get_frame()
frame.set_facecolor('w')
|
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll, Ioan Sucan, Luis G. Torres
from sys import argv, exit
from os.path import basename, splitext, exists
import os
import sqlite3
import datetime
plottingEnabled=True
try:
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from math import floor
except:
print('Matplotlib or Numpy was not found; disabling plotting capabilities...')
plottingEnabled=False
from optparse import OptionParser, OptionGroup
# Given a text line, split it into tokens (by space) and return the token
# at the desired index. Additionally, test that some expected tokens exist.
# Return None if they do not.
def readLogValue(filevar, desired_token_index, expected_tokens) :
start_pos = filevar.tell()
tokens = filevar.readline().split()
for token_index in expected_tokens:
if not tokens[token_index] == expected_tokens[token_index]:
# undo the read, if we failed to parse.
filevar.seek(start_pos)
return None
return tokens[desired_token_index]
def readOptionalLogValue(filevar, desired_token_index, expected_tokens = {}) :
return readLogValue(filevar, desired_token_index, expected_tokens)
def readRequiredLogValue(name, filevar, desired_token_index, expected_tokens = {}) :
result = readLogValue(filevar, desired_token_index, expected_tokens)
if result == None:
raise Exception("Unable to read " + name)
return result
def ensurePrefix(line, prefix):
if not line.startswith(prefix):
raise Exception("Expected prefix " + prefix + " was not found")
return line
def readOptionalMultilineValue(filevar):
start_pos = filevar.tell()
line = filevar.readline()
if not line.startswith("<<<|"):
filevar.seek(start_pos)
return None
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readRequiredMultilineValue(filevar):
ensurePrefix(filevar.readline(), "<<<|")
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readBenchmarkLog(dbname, filenames, moveitformat):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
# create all tables if they don't already exist
c.executescript("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512),
totaltime REAL, timelimit REAL, memorylimit REAL, runcount INTEGER,
version VARCHAR(128), hostname VARCHAR(1024), cpuinfo TEXT,
date DATETIME, seed INTEGER, setup TEXT);
CREATE TABLE IF NOT EXISTS plannerConfigs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(512) NOT NULL, settings TEXT);
CREATE TABLE IF NOT EXISTS enums
(name VARCHAR(512), value INTEGER, description TEXT,
PRIMARY KEY (name, value));
CREATE TABLE IF NOT EXISTS runs
(id INTEGER PRIMARY KEY AUTOINCREMENT, experimentid INTEGER, plannerid INTEGER,
FOREIGN KEY (experimentid) REFERENCES experiments(id) ON DELETE CASCADE,
FOREIGN KEY (plannerid) REFERENCES plannerConfigs(id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS progress
(runid INTEGER, time REAL, PRIMARY KEY (runid, time),
FOREIGN KEY (runid) REFERENCES runs(id) ON DELETE CASCADE)""")
for filename in filenames:
print('Processing ' + filename)
logfile = open(filename,'r')
start_pos = logfile.tell()
libname = readOptionalLogValue(logfile, 0, {1 : "version"})
if libname == None:
libname = "OMPL"
logfile.seek(start_pos)
version = readOptionalLogValue(logfile, -1, {1 : "version"})
if version == None:
# set the version number to make Planner Arena happy
version = "0.0.0"
version = ' '.join([libname, version])
expname = readRequiredLogValue("experiment name", logfile, -1, {0 : "Experiment"})
# optional experiment properties
nrexpprops = int(readOptionalLogValue(logfile, 0, {-2: "experiment", -1: "properties"}) or 0)
expprops = {}
for i in range(nrexpprops):
entry = logfile.readline().strip().split('=')
nameAndType = entry[0].split(' ')
expprops[nameAndType[0]] = (entry[1], nameAndType[1])
# adding columns to experiments table
c.execute('PRAGMA table_info(experiments)')
columnNames = [col[1] for col in c.fetchall()]
for name in sorted(expprops.keys()):
# only add column if it doesn't exist
if name not in columnNames:
c.execute('ALTER TABLE experiments ADD %s %s' % (name, expprops[name][1]))
hostname = readRequiredLogValue("hostname", logfile, -1, {0 : "Running"})
date = ' '.join(ensurePrefix(logfile.readline(), "Starting").split()[2:])
if moveitformat:
expsetup = readRequiredLogValue("goal name", logfile, -1, {0: "Goal", 1: "name"})
cpuinfo = None
rseed = 0
timelimit = float(readRequiredLogValue("time limit", logfile, 0, {-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = 0
else:
expsetup = readRequiredMultilineValue(logfile)
cpuinfo = readOptionalMultilineValue(logfile)
rseed = int(readRequiredLogValue("random seed", logfile, 0, {-2 : "random", -1 : "seed"}))
timelimit = float(readRequiredLogValue("time limit", logfile, 0, {-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = float(readRequiredLogValue("memory limit", logfile, 0, {-3 : "MB", -2 : "per", -1 : "run"}))
nrrunsOrNone = readOptionalLogValue(logfile, 0, {-3 : "runs", -2 : "per", -1 : "planner"})
nrruns = -1
if nrrunsOrNone != None:
nrruns = int(nrrunsOrNone)
totaltime = float(readRequiredLogValue("total time", logfile, 0, {-3 : "collect", -2 : "the", -1 : "data"}))
numEnums = 0
numEnumsOrNone = readOptionalLogValue(logfile, 0, {-2 : "enum"})
if numEnumsOrNone != None:
numEnums = int(numEnumsOrNone)
for i in range(numEnums):
enum = logfile.readline()[:-1].split('|')
c.execute('SELECT * FROM enums WHERE name IS "%s"' % enum[0])
if c.fetchone() == None:
for j in range(len(enum)-1):
c.execute('INSERT INTO enums VALUES (?,?,?)',
(enum[0],j,enum[j+1]))
# Creating entry in experiments table
experimentEntries = [None, expname, totaltime, timelimit, memorylimit, nrruns, version,
hostname, cpuinfo, date, rseed, expsetup]
for name in sorted(expprops.keys()): # sort to ensure correct order
experimentEntries.append(expprops[name][0])
c.execute('INSERT INTO experiments VALUES (' + ','.join('?' for i in experimentEntries) + ')', experimentEntries)
experimentId = c.lastrowid
numPlanners = int(readRequiredLogValue("planner count", logfile, 0, {-1 : "planners"}))
for i in range(numPlanners):
plannerName = logfile.readline()[:-1]
print('Parsing data for ' + plannerName)
# read common data for planner
numCommon = int(logfile.readline().split()[0])
settings = ''
for j in range(numCommon):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute('SELECT id FROM plannerConfigs WHERE (name=? AND settings=?)',
(plannerName, settings,))
p = c.fetchone()
if p==None:
c.execute('INSERT INTO plannerConfigs VALUES (?,?,?)',
(None, plannerName, settings,))
plannerId = c.lastrowid
else:
plannerId = p[0]
# get current column names
c.execute('PRAGMA table_info(runs)')
columnNames = [col[1] for col in c.fetchall()]
# read properties and add columns as necessary
numProperties = int(logfile.readline().split()[0])
propertyNames = ['experimentid', 'plannerid']
for j in range(numProperties):
field = logfile.readline().split()
propertyType = field[-1]
propertyName = '_'.join(field[:-1])
if propertyName not in columnNames:
c.execute('ALTER TABLE runs ADD %s %s' % (propertyName, propertyType))
propertyNames.append(propertyName)
# read measurements
insertFmtStr = 'INSERT INTO runs (' + ','.join(propertyNames) + \
') VALUES (' + ','.join('?'*len(propertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
runIds = []
for j in range(numRuns):
values = tuple([experimentId, plannerId] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in logfile.readline().split('; ')[:-1]])
c.execute(insertFmtStr, values)
# extract primary key of each run row so we can reference them
# in the planner progress data table if needed
runIds.append(c.lastrowid)
nextLine = logfile.readline().strip()
# read planner progress data if it's supplied
if nextLine != '.':
# get current column names
c.execute('PRAGMA table_info(progress)')
columnNames = [col[1] for col in c.fetchall()]
# read progress properties and add columns as necesary
numProgressProperties = int(nextLine.split()[0])
progressPropertyNames = ['runid']
for i in range(numProgressProperties):
field = logfile.readline().split()
progressPropertyType = field[-1]
progressPropertyName = "_".join(field[:-1])
if progressPropertyName not in columnNames:
c.execute('ALTER TABLE progress ADD %s %s' %
(progressPropertyName, progressPropertyType))
progressPropertyNames.append(progressPropertyName)
# read progress measurements
insertFmtStr = 'INSERT INTO progress (' + \
','.join(progressPropertyNames) + ') VALUES (' + \
','.join('?'*len(progressPropertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
for j in range(numRuns):
dataSeries = logfile.readline().split(';')[:-1]
for dataSample in dataSeries:
values = tuple([runIds[j]] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in dataSample.split(',')[:-1]])
try:
c.execute(insertFmtStr, values)
except sqlite3.IntegrityError:
print('Ignoring duplicate progress data. Consider increasing ompl::tools::Benchmark::Request::timeBetweenUpdates.')
pass
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plotAttribute(cur, planners, attribute, typename):
"""Create a plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
labels = []
measurements = []
nanCounts = []
if typename == 'ENUM':
cur.execute('SELECT description FROM enums where name IS "%s"' % attribute)
descriptions = [ t[0] for t in cur.fetchall() ]
numValues = len(descriptions)
for planner in planners:
cur.execute('SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL' \
% (attribute, planner[0], attribute))
measurement = [ t[0] for t in cur.fetchall() if t[0] != None ]
if len(measurement) > 0:
cur.execute('SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL' \
% (planner[0], attribute))
nanCounts.append(cur.fetchone()[0])
labels.append(planner[1])
if typename == 'ENUM':
scale = 100. / len(measurement)
measurements.append([measurement.count(i)*scale for i in range(numValues)])
else:
measurements.append(measurement)
if len(measurements)==0:
print('Skipping "%s": no available measurements' % attribute)
return
plt.clf()
ax = plt.gca()
if typename == 'ENUM':
width = .5
measurements = np.transpose(np.vstack(measurements))
colsum = np.sum(measurements, axis=1)
rows = np.where(colsum != 0)[0]
heights = np.zeros((1,measurements.shape[1]))
ind = range(measurements.shape[1])
legend_labels = []
for i in rows:
plt.bar(ind, measurements[i], width, bottom=heights[0],
color=matplotlib.cm.hot(int(floor(i*256/numValues))),
label=descriptions[i])
heights = heights + measurements[i]
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
props = matplotlib.font_manager.FontProperties()
props.set_size('small')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop = props)
elif typename == 'BOOLEAN':
width = .5
measurementsPercentage = [sum(m) * 100. / len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurementsPercentage, width)
xtickNames = plt.xticks([x + width / 2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
else:
if int(matplotlibversion.split('.')[0])<1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_',' '))
xtickNames = plt.setp(ax,xticklabels=labels)
plt.setp(xtickNames, rotation=25)
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nanCounts)>0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i+width/2 if typename=='BOOLEAN' else i+1
ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')
plt.show()
def plotProgressAttribute(cur, planners, attribute):
"""Plot data for a single planner progress attribute. Will create an
average time-plot with error bars of the attribute over all runs for
each planner."""
import numpy.ma as ma
plt.clf()
ax = plt.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel(attribute.replace('_',' '))
plannerNames = []
for planner in planners:
cur.execute("""SELECT count(progress.%s) FROM progress INNER JOIN runs
ON progress.runid = runs.id AND runs.plannerid=%s
AND progress.%s IS NOT NULL""" \
% (attribute, planner[0], attribute))
if cur.fetchone()[0] > 0:
plannerNames.append(planner[1])
cur.execute("""SELECT DISTINCT progress.runid FROM progress INNER JOIN runs
WHERE progress.runid=runs.id AND runs.plannerid=?""", (planner[0],))
runids = [t[0] for t in cur.fetchall()]
timeTable = []
dataTable = []
for r in runids:
# Select data for given run
cur.execute('SELECT time, %s FROM progress WHERE runid = %s ORDER BY time' % (attribute,r))
(time, data) = zip(*(cur.fetchall()))
timeTable.append(time)
dataTable.append(data)
# It's conceivable that the sampling process may have
# generated more samples for one run than another; in this
# case, truncate all data series to length of shortest
# one.
fewestSamples = min(len(time[:]) for time in timeTable)
times = np.array(timeTable[0][:fewestSamples])
dataArrays = np.array([data[:fewestSamples] for data in dataTable])
filteredData = ma.masked_array(dataArrays, np.equal(dataArrays, None), dtype=float)
means = np.mean(filteredData, axis=0)
stddevs = np.std(filteredData, axis=0, ddof=1)
# plot average with error bars
plt.errorbar(times, means, yerr=2*stddevs, errorevery=max(1, len(times) // 20))
ax.legend(plannerNames)
if len(plannerNames)>0:
plt.show()
else:
plt.clf()
def plotStatistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plots...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('SELECT id, name FROM plannerConfigs')
planners = [(t[0],t[1].replace('geometric_','').replace('control_',''))
for t in c.fetchall()]
c.execute('PRAGMA table_info(runs)')
colInfo = c.fetchall()[3:]
pp = PdfPages(fname)
for col in colInfo:
if col[2] == 'BOOLEAN' or col[2] == 'ENUM' or \
col[2] == 'INTEGER' or col[2] == 'REAL':
plotAttribute(c, planners, col[1], col[2])
pp.savefig(plt.gcf())
c.execute('PRAGMA table_info(progress)')
colInfo = c.fetchall()[2:]
for col in colInfo:
plotProgressAttribute(c, planners, col[1])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
c.execute("""SELECT id, name, timelimit, memorylimit FROM experiments""")
experiments = c.fetchall()
for experiment in experiments:
c.execute("""SELECT count(*) FROM runs WHERE runs.experimentid = %d
GROUP BY runs.plannerid""" % experiment[0])
numRuns = [run[0] for run in c.fetchall()]
numRuns = numRuns[0] if len(set(numRuns)) == 1 else ','.join(numRuns)
plt.figtext(pagex, pagey, 'Experiment "%s"' % experiment[1])
plt.figtext(pagex, pagey-0.05, 'Number of averaged runs: %d' % numRuns)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %g seconds" % experiment[2])
plt.figtext(pagex, pagey-0.15, "Memory limit per run: %g MB" % experiment[3])
plt.show()
pp.savefig(plt.gcf())
pp.close()
def saveAsMysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump,'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
c.close()
last = ['experiments', 'planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION','COMMIT',
'sqlite_sequence','CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line: break
else:
process = True
if not process: continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"','`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def computeViews(dbname, moveitformat):
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('PRAGMA table_info(runs)')
if moveitformat:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
# kinodynamic paths cannot be simplified (or least not easily),
# so simplification_time may not exist as a database column
elif 'simplification_time' in [col[1] for col in c.fetchall()]:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time + simplification_time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
else:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
s1 = """SELECT plannerid, plannerName, experimentid, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid, experimentid""" % s0
s2 = """SELECT plannerid, experimentid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName, experimentid ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigsPerExperiment')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigsPerExperiment AS %s' % s2)
s1 = """SELECT plannerid, plannerName, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid""" % s0
s2 = """SELECT plannerid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigs')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigs AS %s' % s2)
conn.commit()
c.close()
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db",
help="Filename of benchmark database [default: %default]")
parser.add_option("-a", "--append", action="store_true", dest="append", default=False,
help="Append data to database (as opposed to overwriting an existing database)")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False,
help="Compute the views for best planner configurations")
if plottingEnabled:
parser.add_option("-p", "--plot", dest="plot", default=None,
help="Create a PDF of plots")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None,
help="Save SQLite3 database as a MySQL dump file")
parser.add_option("--moveit", action="store_true", dest="moveit", default=False,
help="Log files are produced by MoveIt!")
(options, args) = parser.parse_args()
if not options.append and exists(options.dbname) and len(args>0):
os.remove(options.dbname)
if len(args)>0:
readBenchmarkLog(options.dbname, args, options.moveit)
# If we update the database, we recompute the views as well
options.view = True
if options.view:
computeViews(options.dbname, options.moveit)
if options.plot:
plotStatistics(options.dbname, options.plot)
if options.mysqldb:
saveAsMysql(options.dbname, options.mysqldb)
|
|
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
"""
Module for dealing with configuration of the logging.
"""
import os
import re
from datetime import datetime
from ArchiverAccess.logging_period_providers import LoggingPeriodProviderConst, LoggingPeriodProviderPV
from ArchiverAccess.utilities import add_default_field
from server_common.utilities import print_and_log, SEVERITY
DEFAULT_LOG_PATH = os.path.join("C:\\", "logs")
"""Default path where logs should be writen"""
DEFAULT_LOGGING_PERIOD_IN_S = 1
"""If no period is given for the logging then this is the default"""
TIME_DATE_COLUMN_HEADING = "Date/time"
"""Column heading for the date and time column"""
DEFAULT_COLUMN_SEPARATOR = "\t"
"""Default separator between columns in the table"""
class ArchiveAccessConfigBuilder(object):
"""
Configuration builder a way of creating an archive access configuration step by step using a fluid API.
"""
def __init__(self, on_end_logging_filename_template=None, continuous_logging_filename_template=None,
base_path=DEFAULT_LOG_PATH, default_field="VAL"):
"""
Constructor.
Args:
on_end_logging_filename_template: the filename template to use for the log on end file; None for don't
create file template that are replaced are `{xxx}` where xxx can be start_time - for start date time of
log
continuous_logging_filename_template: the filename template to use for the log continuously file; When None
don't create file. Curly brackets in teh template are replaced (as per python format) possible values
are:
{start_time} - replace with start date time of log
base_path: the base path into which files should be placed
default_field: the field appended to pvs without a field e.g. VAL; blank for don't add a field
"""
self._default_field = default_field
self._create_logs_from = datetime.now()
if on_end_logging_filename_template is None:
self._on_end_logging_filename_template = None
else:
self._on_end_logging_filename_template = os.path.join(base_path, on_end_logging_filename_template)
if continuous_logging_filename_template is None:
self._continuous_logging_filename_template = None
else:
self._continuous_logging_filename_template = os.path.join(base_path, continuous_logging_filename_template)
self._header_lines = []
self._columns = []
self._trigger_pv = None
self._logging_period_provider = None
def header(self, header_line):
"""
Add a templated line to the file header. Templates are similar to python formaters where the name of the
argument is the pv name and the format can be specified after that using a | to separate it
(in python it is a :). The value at the start of the log will be used.
E.g. 'a line {TE:BLAH:SIMPLE:VALUE1|5.3f}' would return, if the value was 10, the line 'a line 10.000'
Args:
:param header_line: the header template line
:return: self
"""
self._header_lines.append(header_line)
return self
def build(self):
"""
Build a configuration object from arguments
Returns (ArchiveAccessConfig): logging configuration
"""
logging_period_provider = LoggingPeriodProviderConst(DEFAULT_LOGGING_PERIOD_IN_S)
if self._logging_period_provider is not None:
logging_period_provider = self._logging_period_provider
return ArchiveAccessConfig(self._on_end_logging_filename_template,
self._continuous_logging_filename_template,
self._header_lines, self._columns, self._trigger_pv,
logging_period_provider, default_field=self._default_field)
def table_column(self, heading, pv_template):
"""
Add a table column
Args:
heading: heading for the table
pv_template: pv template
Returns: self
"""
self._columns.append(
{"header": heading,
"pv_template": pv_template})
return self
def trigger_pv(self, pv_name):
"""
PV from which to trigger the creation of a log file
Args:
pv_name: name of the pv to monitor
Returns: self
"""
if self._trigger_pv is not None:
print_and_log("Trigger pv being redefined to {0} from {1}".format(pv_name, self._trigger_pv),
severity=SEVERITY.MAJOR, src="ArchiverAccess")
self._trigger_pv = pv_name
return self
def logging_period_seconds(self, logging_period):
"""
Constant logging period
Args:
logging_period: the logging period
Returns: self
"""
self._set_logging_period_provider(LoggingPeriodProviderConst(logging_period))
return self
def logging_period_pv(self, logging_period_pv):
"""
Set a logging period depending on the value of a pv
Args:
logging_period_pv: pv to use for the logging period
Returns: self
"""
self._set_logging_period_provider(LoggingPeriodProviderPV(logging_period_pv, DEFAULT_LOGGING_PERIOD_IN_S))
return self
def _set_logging_period_provider(self, logging_period_provider):
if self._logging_period_provider is not None:
print_and_log("Logging period being redefined to {0} from {1}".format(
logging_period_provider, self._logging_period_provider), severity=SEVERITY.MAJOR, src="ArchiverAccess")
self._logging_period_provider = logging_period_provider
class ArchiveAccessConfig(object):
"""
A complete valid configuration object for creating a single log file
"""
def __init__(self, on_end_logging_filename_template, continuous_logging_filename_template, header_lines, columns,
trigger_pv, logging_period_provider, default_field="VAL"):
"""
Constructor - this can be built using the builder
Args:
on_end_logging_filename_template: the filename template to use for the log on end file; None for don't
create file template that are replaced are `{xxx}` where xxx can be start_time - for start date time of
log
continuous_logging_filename_template: the filename template to use for the log continuously file; When None
don't create file. Curly brackets in teh template are replaced (as per python format) possible values
are:
{start_time} - replace with start date time of log
header_lines: header line templates
columns: column definition
trigger_pv: pv on which to trigger a log
logging_period_provider(ArchiverAccess.logging_period_providers.LoggingPeriodProvider):
an object which will supply the logging period
default_field: field appended to PVs without a field
"""
self._column_separator = DEFAULT_COLUMN_SEPARATOR
self._default_field = default_field
self.trigger_pv = add_default_field(trigger_pv, self._default_field)
self.on_end_logging_filename_template = on_end_logging_filename_template
self.continuous_logging_filename_template = continuous_logging_filename_template
self._convert_header(header_lines)
self.column_header_list = [TIME_DATE_COLUMN_HEADING]
self._convert_column_headers(columns)
self._convert_columns(columns)
self.logging_period_provider = logging_period_provider
self.logging_period_provider.set_default_field(self._default_field)
def __rep__(self):
rep = "Logging configuration (pvs as read from the archive)"
rep += " - file (log on end): {0}".format(self.on_end_logging_filename_template)
rep += " - file (continuous): {0}".format(self.continuous_logging_filename_template)
rep += " - trigger pv: {0}".format(self.trigger_pv)
rep += " - trigger pv: {0}".format(self.logging_period_provider)
rep += " - file headers: {0}".format(self.header)
rep += " - pvs in fileheader {0}".format(self.pv_names_in_header)
rep += " - table headers: {0}".format(self.column_header_list)
rep += " - table line: {0}".format(self.table_line)
rep += " - pvs in table line {0}".format(self.pv_names_in_columns)
return rep
def _convert_columns(self, columns):
"""
Convert columns to table line and list of pvs contained
Args:
columns: list of column dictionaries
Returns:
"""
line_in_log_format = self._column_separator.join([str(x["pv_template"]) for x in columns])
pv_names_in_columns = self._generate_pv_list([line_in_log_format])
formatted_columns = self._convert_log_formats_to_python_formats(line_in_log_format, pv_names_in_columns)
self.table_line = "{time}" + self._column_separator + formatted_columns
self.pv_names_in_columns = self._add_all_default_fields(pv_names_in_columns)
def _convert_column_headers(self, columns):
"""
Convert column headers from header to a line that is at the top of the table
Args:
columns: columns to be converted
Returns:
"""
self.column_header_list.extend([x["header"] for x in columns])
self.column_headers = self._column_separator.join(self.column_header_list)
def _convert_header(self, header_lines):
"""
Convert the header from lines containing templates to a line and pv names
Args:
header_lines: list of header lines
Returns:
"""
pv_names_in_header = self._generate_pv_list(header_lines)
self.header = []
for line in header_lines:
final_line = self._convert_log_formats_to_python_formats(line, pv_names_in_header)
self.header.append(final_line)
self.pv_names_in_header = self._add_all_default_fields(pv_names_in_header)
def _convert_log_formats_to_python_formats(self, line, pvs):
"""
Convert a log format line to a python format line based on the list of pvs.
The log format is {<pv name>!<converter>|<format>} which converts to {<index>!<converter>:<format>} where
index is the index of <pv name> in the pvs list and converter and format are python string format converter
and format (both are optional). For formatter mini-language see
https://docs.python.org/2/library/string.html#format-specification-mini-language
Args:
line: line to convert
pvs: a list of pvs to index
Returns: converted line
"""
final_line = line
for index, pv in enumerate(pvs):
# find the pv name and replace with argument index which it corresponds to
final_line = re.sub('({)' + pv + '([|!]?[^}]*})', r'\g<1>' + str(index) + r'\g<2>', final_line)
# replace the | with : in the format
final_line = re.sub('({' + str(index) + '!?[^|}]*)\|([^}]*})', r'\1' + ':' + r'\2', final_line)
return final_line
def _generate_pv_list(self, lines):
"""
Generate a pv list from a list of line.
Args:
lines: lines list of lines with log formats in
Returns: list of unique pvs in lines
"""
pvs = set()
for line in lines:
for match in re.finditer('{([^}|!]*)[|!]?[^}]*}', line):
pv = match.group(1)
pvs.add(pv)
return list(pvs)
def _add_all_default_fields(self, pv_names):
"""
Add default field to pvs if they don't have fields.
Args:
pv_names: iterable pv names
Returns: names with fields added
"""
return [add_default_field(pv, self._default_field) for pv in pv_names]
|
|
import click
import sys
import json
from inspect import signature
from mlflow.utils import cli_args
from mlflow.deployments import interface
from mlflow.utils.proto_json_utils import NumpyEncoder, _get_jsonable_obj
def _user_args_to_dict(user_list):
# Similar function in mlflow.cli is throwing exception on import
user_dict = {}
for s in user_list:
try:
name, value = s.split("=")
except ValueError as exc:
# not enough values to unpack
raise click.BadOptionUsage(
"config",
"Config options must be a pair and should be"
"provided as ``-C key=value`` or "
"``--config key=value``",
) from exc
if name in user_dict:
raise click.ClickException("Repeated parameter: '{}'".format(name))
user_dict[name] = value
return user_dict
installed_targets = [target for target in interface.plugin_store.registry]
if len(installed_targets) > 0:
supported_targets_msg = "Support is currently installed for deployment to: " "{targets}".format(
targets=", ".join(installed_targets)
)
else:
supported_targets_msg = (
"NOTE: you currently do not have support installed for any deployment targets."
)
target_details = click.option(
"--target",
"-t",
required=True,
help="""
Deployment target URI. Run
`mlflow deployments help --target-name <target-name>` for
more details on the supported URI format and config options
for a given target.
{supported_targets_msg}
See all supported deployment targets and installation
instructions at
https://mlflow.org/docs/latest/plugins.html#community-plugins
""".format(
supported_targets_msg=supported_targets_msg
),
)
deployment_name = click.option("--name", "name", required=True, help="Name of the deployment")
parse_custom_arguments = click.option(
"--config",
"-C",
metavar="NAME=VALUE",
multiple=True,
help="Extra target-specific config for the model "
"deployment, of the form -C name=value. See "
"documentation/help for your deployment target for a "
"list of supported config options.",
)
parse_input = click.option(
"--input-path", "-I", required=True, help="Path to input json file for prediction"
)
parse_output = click.option(
"--output-path",
"-O",
help="File to output results to as a JSON file. If not provided, prints output to stdout.",
)
@click.group(
"deployments",
help="""
Deploy MLflow models to custom targets.
Run `mlflow deployments help --target-name <target-name>` for
more details on the supported URI format and config options for a given target.
{supported_targets_msg}
See all supported deployment targets and installation instructions in
https://mlflow.org/docs/latest/plugins.html#community-plugins
You can also write your own plugin for deployment to a custom target. For instructions on
writing and distributing a plugin, see
https://mlflow.org/docs/latest/plugins.html#writing-your-own-mlflow-plugins.
""".format(
supported_targets_msg=supported_targets_msg
),
)
def commands():
"""
Deploy MLflow models to custom targets. Support is currently installed for
the following targets: {targets}. Run `mlflow deployments help --target-name <target-name>` for
more details on the supported URI format and config options for a given target.
To deploy to other targets, you must first install an
appropriate third-party Python plugin. See the list of known community-maintained plugins
at https://mlflow.org/docs/latest/plugins.html#community-plugins.
You can also write your own plugin for deployment to a custom target. For instructions on
writing and distributing a plugin, see
https://mlflow.org/docs/latest/plugins.html#writing-your-own-mlflow-plugins.
"""
@commands.command("create")
@parse_custom_arguments
@deployment_name
@target_details
@cli_args.MODEL_URI
@click.option(
"--flavor",
"-f",
help="Which flavor to be deployed. This will be auto " "inferred if it's not given",
)
def create_deployment(flavor, model_uri, target, name, config):
"""
Deploy the model at ``model_uri`` to the specified target.
Additional plugin-specific arguments may also be passed to this command, via `-C key=value`
"""
config_dict = _user_args_to_dict(config)
client = interface.get_deploy_client(target)
deployment = client.create_deployment(name, model_uri, flavor, config=config_dict)
click.echo("\n{} deployment {} is created".format(deployment["flavor"], deployment["name"]))
@commands.command("update")
@parse_custom_arguments
@deployment_name
@target_details
@click.option(
"--model-uri",
"-m",
default=None,
metavar="URI",
help="URI to the model. A local path, a 'runs:/' URI, or a"
" remote storage URI (e.g., an 's3://' URI). For more information"
" about supported remote URIs for model artifacts, see"
" https://mlflow.org/docs/latest/tracking.html"
"#artifact-stores",
) # optional model_uri
@click.option(
"--flavor",
"-f",
help="Which flavor to be deployed. This will be auto " "inferred if it's not given",
)
def update_deployment(flavor, model_uri, target, name, config):
"""
Update the deployment with ID `deployment_id` in the specified target.
You can update the URI of the model and/or the flavor of the deployed model (in which case the
model URI must also be specified).
Additional plugin-specific arguments may also be passed to this command, via `-C key=value`.
"""
config_dict = _user_args_to_dict(config)
client = interface.get_deploy_client(target)
ret = client.update_deployment(name, model_uri=model_uri, flavor=flavor, config=config_dict)
click.echo("Deployment {} is updated (with flavor {})".format(name, ret["flavor"]))
@commands.command("delete")
@parse_custom_arguments
@deployment_name
@target_details
def delete_deployment(target, name, config):
"""
Delete the deployment with name given at `--name` from the specified target.
"""
client = interface.get_deploy_client(target)
sig = signature(client.delete_deployment)
if "config" in sig.parameters:
config_dict = _user_args_to_dict(config)
client.delete_deployment(name, config=config_dict)
else:
client.delete_deployment(name)
click.echo("Deployment {} is deleted".format(name))
@commands.command("list")
@target_details
def list_deployment(target):
"""
List the names of all model deployments in the specified target. These names can be used with
the `delete`, `update`, and `get` commands.
"""
client = interface.get_deploy_client(target)
ids = client.list_deployments()
click.echo("List of all deployments:\n{}".format(ids))
@commands.command("get")
@deployment_name
@target_details
def get_deployment(target, name):
"""
Print a detailed description of the deployment with name given at ``--name`` in the specified
target.
"""
client = interface.get_deploy_client(target)
desc = client.get_deployment(name)
for key, val in desc.items():
click.echo("{}: {}".format(key, val))
click.echo("\n")
@commands.command("help")
@target_details
def target_help(target):
"""
Display additional help for a specific deployment target, e.g. info on target-specific config
options and the target's URI format.
"""
click.echo(interface._target_help(target))
@commands.command("run-local")
@parse_custom_arguments
@deployment_name
@target_details
@cli_args.MODEL_URI
@click.option(
"--flavor",
"-f",
help="Which flavor to be deployed. This will be auto " "inferred if it's not given",
)
def run_local(flavor, model_uri, target, name, config):
"""
Deploy the model locally. This has very similar signature to ``create`` API
"""
config_dict = _user_args_to_dict(config)
interface.run_local(target, name, model_uri, flavor, config_dict)
def predictions_to_json(raw_predictions, output):
predictions = _get_jsonable_obj(raw_predictions, pandas_orient="records")
json.dump(predictions, output, cls=NumpyEncoder)
@commands.command("predict")
@deployment_name
@target_details
@parse_input
@parse_output
def predict(target, name, input_path, output_path):
"""
Predict the results for the deployed model for the given input(s)
"""
import pandas as pd
df = pd.read_json(input_path)
client = interface.get_deploy_client(target)
result = client.predict(name, df)
if output_path:
with open(output_path, "w") as fp:
predictions_to_json(result, fp)
else:
predictions_to_json(result, sys.stdout)
@commands.command("explain")
@deployment_name
@target_details
@parse_input
@parse_output
def explain(target, name, input_path, output_path):
"""
Generate explanations of model predictions on the specified input for
the deployed model for the given input(s). Explanation output formats vary
by deployment target, and can include details like feature importance for
understanding/debugging predictions. Run `mlflow deployments help` or
consult the documentation for your plugin for details on explanation format.
For information about the input data formats accepted by this function,
see the following documentation:
https://www.mlflow.org/docs/latest/models.html#built-in-deployment-tools
"""
import pandas as pd
df = pd.read_json(input_path)
client = interface.get_deploy_client(target)
result = client.explain(name, df)
if output_path:
with open(output_path, "w") as fp:
predictions_to_json(result, fp)
else:
predictions_to_json(result, sys.stdout)
|
|
"""
This plugin captures logging statements issued during test execution. When an
error or failure occurs, the captured log messages are attached to the running
test in the test.capturedLogging attribute, and displayed with the error failure
output. It is enabled by default but can be turned off with the option
``--nologcapture``.
You can filter captured logging statements with the ``--logging-filter`` option.
If set, it specifies which logger(s) will be captured; loggers that do not match
will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
will ensure that only statements logged via sqlalchemy.engine, myapp
or myapp.foo.bar logger will be logged.
You can remove other installed logging handlers with the
``--logging-clear-handlers`` option.
"""
import logging
from logging import Handler
import threading
from nose.plugins.base import Plugin
from nose.util import anyp, ln, safe_str
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
log = logging.getLogger(__name__)
class FilterSet(object):
def __init__(self, filter_components):
self.inclusive, self.exclusive = self._partition(filter_components)
# @staticmethod
def _partition(components):
inclusive, exclusive = [], []
for component in components:
if component.startswith('-'):
exclusive.append(component[1:])
else:
inclusive.append(component)
return inclusive, exclusive
_partition = staticmethod(_partition)
def allow(self, record):
"""returns whether this record should be printed"""
if not self:
# nothing to filter
return True
return self._allow(record) and not self._deny(record)
# @staticmethod
def _any_match(matchers, record):
"""return the bool of whether `record` starts with
any item in `matchers`"""
def record_matches_key(key):
return record == key or record.startswith(key + '.')
return anyp(bool, map(record_matches_key, matchers))
_any_match = staticmethod(_any_match)
def _allow(self, record):
if not self.inclusive:
return True
return self._any_match(self.inclusive, record)
def _deny(self, record):
if not self.exclusive:
return False
return self._any_match(self.exclusive, record)
class MyMemoryHandler(Handler):
def __init__(self, logformat, logdatefmt, filters):
Handler.__init__(self)
fmt = logging.Formatter(logformat, logdatefmt)
self.setFormatter(fmt)
self.filterset = FilterSet(filters)
self.buffer = []
def emit(self, record):
self.buffer.append(self.format(record))
def flush(self):
pass # do nothing
def truncate(self):
self.buffer = []
def filter(self, record):
if self.filterset.allow(record.name):
return Handler.filter(self, record)
def __getstate__(self):
state = self.__dict__.copy()
del state['lock']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = threading.RLock()
class LogCapture(Plugin):
"""
Log capture plugin. Enabled by default. Disable with --nologcapture.
This plugin captures logging statements issued during test execution,
appending any output captured to the error or failure output,
should the test fail or raise an error.
"""
enabled = True
env_opt = 'NOSE_NOLOGCAPTURE'
name = 'logcapture'
score = 500
logformat = '%(name)s: %(levelname)s: %(message)s'
logdatefmt = None
clear = False
filters = ['-nose']
def options(self, parser, env):
"""Register commandline options.
"""
parser.add_option(
"--nologcapture", action="store_false",
default=not env.get(self.env_opt), dest="logcapture",
help="Disable logging capture plugin. "
"Logging configuration will be left intact."
" [NOSE_NOLOGCAPTURE]")
parser.add_option(
"--logging-format", action="store", dest="logcapture_format",
default=env.get('NOSE_LOGFORMAT') or self.logformat,
metavar="FORMAT",
help="Specify custom format to print statements. "
"Uses the same format as used by standard logging handlers."
" [NOSE_LOGFORMAT]")
parser.add_option(
"--logging-datefmt", action="store", dest="logcapture_datefmt",
default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
metavar="FORMAT",
help="Specify custom date/time format to print statements. "
"Uses the same format as used by standard logging handlers."
" [NOSE_LOGDATEFMT]")
parser.add_option(
"--logging-filter", action="store", dest="logcapture_filters",
default=env.get('NOSE_LOGFILTER'),
metavar="FILTER",
help="Specify which statements to filter in/out. "
"By default, everything is captured. If the output is too"
" verbose,\nuse this option to filter out needless output.\n"
"Example: filter=foo will capture statements issued ONLY to\n"
" foo or foo.what.ever.sub but not foobar or other logger.\n"
"Specify multiple loggers with comma: filter=foo,bar,baz.\n"
"If any logger name is prefixed with a minus, eg filter=-foo,\n"
"it will be excluded rather than included. Default: "
"exclude logging messages from nose itself (-nose)."
" [NOSE_LOGFILTER]\n")
parser.add_option(
"--logging-clear-handlers", action="store_true",
default=False, dest="logcapture_clear",
help="Clear all other logging handlers")
parser.add_option(
"--logging-level", action="store",
default='NOTSET', dest="logcapture_level",
help="Set the log level to capture")
def configure(self, options, conf):
"""Configure plugin.
"""
self.conf = conf
# Disable if explicitly disabled, or if logging is
# configured via logging config file
if not options.logcapture or conf.loggingConfig:
self.enabled = False
self.logformat = options.logcapture_format
self.logdatefmt = options.logcapture_datefmt
self.clear = options.logcapture_clear
self.loglevel = options.logcapture_level
if options.logcapture_filters:
self.filters = options.logcapture_filters.split(',')
def setupLoghandler(self):
# setup our handler with root logger
root_logger = logging.getLogger()
if self.clear:
if hasattr(root_logger, "handlers"):
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
for logger in logging.Logger.manager.loggerDict.values():
if hasattr(logger, "handlers"):
for handler in logger.handlers:
logger.removeHandler(handler)
# make sure there isn't one already
# you can't simply use "if self.handler not in root_logger.handlers"
# since at least in unit tests this doesn't work --
# LogCapture() is instantiated for each test case while root_logger
# is module global
# so we always add new MyMemoryHandler instance
for handler in root_logger.handlers[:]:
if isinstance(handler, MyMemoryHandler):
root_logger.handlers.remove(handler)
root_logger.addHandler(self.handler)
# to make sure everything gets captured
loglevel = getattr(self, "loglevel", "NOTSET")
root_logger.setLevel(getattr(logging, loglevel))
def begin(self):
"""Set up logging handler before test run begins.
"""
self.start()
def start(self):
self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
self.filters)
self.setupLoghandler()
def end(self):
pass
def beforeTest(self, test):
"""Clear buffers and handlers before test.
"""
self.setupLoghandler()
def afterTest(self, test):
"""Clear buffers after test.
"""
self.handler.truncate()
def formatFailure(self, test, err):
"""Add captured log messages to failure output.
"""
return self.formatError(test, err)
def formatError(self, test, err):
"""Add captured log messages to error output.
"""
# logic flow copied from Capture.formatError
test.capturedLogging = records = self.formatLogRecords()
if not records:
return err
ec, ev, tb = err
return (ec, self.addCaptureToErr(ev, records), tb)
def formatLogRecords(self):
return map(safe_str, self.handler.buffer)
def addCaptureToErr(self, ev, records):
return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
records + \
[ln('>> end captured logging <<')])
|
|
"""Alexa state report code."""
from __future__ import annotations
import asyncio
from http import HTTPStatus
import json
import logging
import aiohttp
import async_timeout
from homeassistant.const import MATCH_ALL, STATE_ON
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers.significant_change import create_checker
import homeassistant.util.dt as dt_util
from .const import API_CHANGE, DATE_FORMAT, DOMAIN, Cause
from .entities import ENTITY_ADAPTERS, AlexaEntity, generate_alexa_id
from .messages import AlexaResponse
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
async def async_enable_proactive_mode(hass, smart_home_config):
"""Enable the proactive mode.
Proactive mode makes this component report state changes to Alexa.
"""
# Validate we can get access token.
await smart_home_config.async_get_access_token()
@callback
def extra_significant_check(
hass: HomeAssistant,
old_state: str,
old_attrs: dict,
old_extra_arg: dict,
new_state: str,
new_attrs: dict,
new_extra_arg: dict,
):
"""Check if the serialized data has changed."""
return old_extra_arg is not None and old_extra_arg != new_extra_arg
checker = await create_checker(hass, DOMAIN, extra_significant_check)
async def async_entity_state_listener(
changed_entity: str,
old_state: State | None,
new_state: State | None,
):
if not hass.is_running:
return
if not new_state:
return
if new_state.domain not in ENTITY_ADAPTERS:
return
if not smart_home_config.should_expose(changed_entity):
_LOGGER.debug("Not exposing %s because filtered by config", changed_entity)
return
alexa_changed_entity: AlexaEntity = ENTITY_ADAPTERS[new_state.domain](
hass, smart_home_config, new_state
)
# Determine how entity should be reported on
should_report = False
should_doorbell = False
for interface in alexa_changed_entity.interfaces():
if not should_report and interface.properties_proactively_reported():
should_report = True
if interface.name() == "Alexa.DoorbellEventSource":
should_doorbell = True
break
if not should_report and not should_doorbell:
return
if should_doorbell:
if new_state.state == STATE_ON:
await async_send_doorbell_event_message(
hass, smart_home_config, alexa_changed_entity
)
return
alexa_properties = list(alexa_changed_entity.serialize_properties())
if not checker.async_is_significant_change(
new_state, extra_arg=alexa_properties
):
return
await async_send_changereport_message(
hass, smart_home_config, alexa_changed_entity, alexa_properties
)
return hass.helpers.event.async_track_state_change(
MATCH_ALL, async_entity_state_listener
)
async def async_send_changereport_message(
hass, config, alexa_entity, alexa_properties, *, invalidate_access_token=True
):
"""Send a ChangeReport message for an Alexa entity.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-with-changereport-events
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoint = alexa_entity.alexa_id()
payload = {
API_CHANGE: {
"cause": {"type": Cause.APP_INTERACTION},
"properties": alexa_properties,
}
}
message = AlexaResponse(name="ChangeReport", namespace="Alexa", payload=payload)
message.set_endpoint_full(token, endpoint)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.post(
config.endpoint,
headers=headers,
json=message_serialized,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout sending report to Alexa")
return
response_text = await response.text()
_LOGGER.debug("Sent: %s", json.dumps(message_serialized))
_LOGGER.debug("Received (%s): %s", response.status, response_text)
if response.status == HTTPStatus.ACCEPTED:
return
response_json = json.loads(response_text)
if (
response_json["payload"]["code"] == "INVALID_ACCESS_TOKEN_EXCEPTION"
and not invalidate_access_token
):
config.async_invalidate_access_token()
return await async_send_changereport_message(
hass, config, alexa_entity, alexa_properties, invalidate_access_token=False
)
_LOGGER.error(
"Error when sending ChangeReport to Alexa: %s: %s",
response_json["payload"]["code"],
response_json["payload"]["description"],
)
async def async_send_add_or_update_message(hass, config, entity_ids):
"""Send an AddOrUpdateReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#add-or-update-report
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id))
endpoints.append(alexa_entity.serialize_discovery())
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="AddOrUpdateReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
async def async_send_delete_message(hass, config, entity_ids):
"""Send an DeleteReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#deletereport-event
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
endpoints.append({"endpointId": generate_alexa_id(entity_id)})
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="DeleteReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
async def async_send_doorbell_event_message(hass, config, alexa_entity):
"""Send a DoorbellPress event message for an Alexa entity.
https://developer.amazon.com/en-US/docs/alexa/device-apis/alexa-doorbelleventsource.html
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoint = alexa_entity.alexa_id()
message = AlexaResponse(
name="DoorbellPress",
namespace="Alexa.DoorbellEventSource",
payload={
"cause": {"type": Cause.PHYSICAL_INTERACTION},
"timestamp": dt_util.utcnow().strftime(DATE_FORMAT),
},
)
message.set_endpoint_full(token, endpoint)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.post(
config.endpoint,
headers=headers,
json=message_serialized,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout sending report to Alexa")
return
response_text = await response.text()
_LOGGER.debug("Sent: %s", json.dumps(message_serialized))
_LOGGER.debug("Received (%s): %s", response.status, response_text)
if response.status == HTTPStatus.ACCEPTED:
return
response_json = json.loads(response_text)
_LOGGER.error(
"Error when sending DoorbellPress event to Alexa: %s: %s",
response_json["payload"]["code"],
response_json["payload"]["description"],
)
|
|
"""
Support for interacting with and controlling the cmus music player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.cmus/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_PLAY,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, SUPPORT_SEEK, PLATFORM_SCHEMA,
MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME, CONF_PORT,
CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pycmus==0.1.0']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'cmus'
DEFAULT_PORT = 3000
SUPPORT_CMUS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Inclusive(CONF_HOST, 'remote'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'remote'): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discover_info=None):
"""Setup the CMUS platform."""
from pycmus import exceptions
host = config.get(CONF_HOST)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
try:
cmus_remote = CmusDevice(host, password, port, name)
except exceptions.InvalidPassword:
_LOGGER.error("The provided password was rejected by cmus")
return False
add_devices([cmus_remote])
class CmusDevice(MediaPlayerDevice):
"""Representation of a running cmus."""
# pylint: disable=no-member
def __init__(self, server, password, port, name):
"""Initialize the CMUS device."""
from pycmus import remote
if server:
self.cmus = remote.PyCmus(
server=server, password=password, port=port)
auto_name = 'cmus-{}'.format(server)
else:
self.cmus = remote.PyCmus()
auto_name = 'cmus-local'
self._name = name or auto_name
self.status = {}
self.update()
def update(self):
"""Get the latest data and update the state."""
status = self.cmus.get_status_dict()
if not status:
_LOGGER.warning("Recieved no status from cmus")
else:
self.status = status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if 'status' not in self.status:
self.update()
if self.status['status'] == 'playing':
return STATE_PLAYING
elif self.status['status'] == 'paused':
return STATE_PAUSED
else:
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.status.get('file')
@property
def content_type(self):
"""Content type of the current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self.status.get('duration')
@property
def media_title(self):
"""Title of current playing media."""
return self.status['tag'].get('title')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self.status['tag'].get('artist')
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self.status['tag'].get('tracknumber')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self.status['tag'].get('album')
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self.status['tag'].get('albumartist')
@property
def volume_level(self):
"""Return the volume level."""
left = self.status['set'].get('vol_left')[0]
right = self.status['set'].get('vol_right')[0]
if left != right:
volume = float(left + right) / 2
else:
volume = left
return int(volume)/100
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_CMUS
def turn_off(self):
"""Service to send the CMUS the command to stop playing."""
self.cmus.player_stop()
def turn_on(self):
"""Service to send the CMUS the command to start playing."""
self.cmus.player_play()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.cmus.set_volume(int(volume * 100))
def volume_up(self):
"""Function to send CMUS the command for volume up."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) + 5)
def volume_down(self):
"""Function to send CMUS the command for volume down."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) - 5)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play command."""
if media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST]:
self.cmus.player_play_file(media_id)
else:
_LOGGER.error(
"Invalid media type %s. Only %s and %s are supported",
media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST)
def media_pause(self):
"""Send the pause command."""
self.cmus.player_pause()
def media_next_track(self):
"""Send next track command."""
self.cmus.player_next()
def media_previous_track(self):
"""Send next track command."""
self.cmus.player_prev()
def media_seek(self, position):
"""Send seek command."""
self.cmus.seek(position)
def media_play(self):
"""Send the play command."""
self.cmus.player_play()
def media_stop(self):
"""Send the stop command."""
self.cmus.stop()
|
|
import json
from mock import patch
from django.test import TestCase, RequestFactory, Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
from django.contrib.messages.storage.fallback import FallbackStorage
from django.db import transaction
from django.utils.http import int_to_base36
from mezzanine.utils.email import default_token_generator
from rest_framework import status
from hs_core import hydroshare
from hs_core.views import create_user_group, update_user_group, share_group_with_user, unshare_group_with_user, \
make_group_membership_request, act_on_group_membership_request, share_resource_with_group, \
unshare_resource_with_group, delete_user_group, restore_user_group
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.models import PrivilegeCodes
class TestGroup(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(TestGroup, self).setUp()
patcher_email_send_call = patch('hs_core.views.send_action_to_take_email')
patcher_email_send_call.start()
self.addCleanup(patcher_email_send_call.stop)
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.username = 'john'
self.password = 'jhmypassword'
self.john = hydroshare.create_account(
'[email protected]',
username=self.username,
first_name='John',
last_name='Clarson',
superuser=False,
password=self.password,
groups=[]
)
self.mike = hydroshare.create_account(
'[email protected]',
username='mike',
first_name='Mike',
last_name='Jensen',
superuser=False,
groups=[]
)
# create a resource for sharing with group
self.resource = hydroshare.create_resource(resource_type='GenericResource',
owner=self.john,
title='Test Resource',
metadata=[]
)
self.factory = RequestFactory()
def test_create_group(self):
# TODO: test with picture file upload for the group
url = reverse('create_user_group')
# test passing privacy_level = 'public'
grp_data = {'name': 'Test Group-1', 'description': 'This is a cool group-1', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
response = create_user_group(request)
new_group = Group.objects.filter(name='Test Group-1').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-1')
self.assertEqual(new_group.gaccess.public, True)
self.assertEqual(new_group.gaccess.discoverable, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], reverse('group', args=[new_group.id]))
# test passing privacy_level = 'private'
grp_data = {'name': 'Test Group-2', 'description': 'This is a cool group-2', 'privacy_level': 'private'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
create_user_group(request)
new_group = Group.objects.filter(name='Test Group-2').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-2')
self.assertEqual(new_group.gaccess.public, False)
self.assertEqual(new_group.gaccess.discoverable, False)
# test passing privacy_level = 'discoverable'
grp_data = {'name': 'Test Group-3', 'description': 'This is a cool group-3', 'privacy_level': 'discoverable'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
create_user_group(request)
new_group = Group.objects.filter(name='Test Group-3').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-3')
self.assertEqual(new_group.gaccess.public, False)
self.assertEqual(new_group.gaccess.discoverable, True)
def test_group_create_failures(self):
# test that post data for 'name' and 'description' are required
# for creating a group. Also post data must have a key 'privacy_level'
# with one of these values ('public', 'private', 'discoverable'). Duplicate group names are
# not allowed
# at this point there should be only one group
self.assertEqual(Group.objects.count(), 1)
url = reverse('create_user_group')
# test 'name' is required
grp_data = {'description': 'This is a cool group', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = create_user_group(request)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
new_group = Group.objects.filter(gaccess__description='This is a cool group').first()
self.assertEqual(new_group, None)
# at this point there should be only one group
self.assertEqual(Group.objects.count(), 1)
# test 'description' is required
grp_data = {'name': 'Test Group', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = create_user_group(request)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
new_group = Group.objects.filter(name='Test Group').first()
self.assertEqual(new_group, None)
# at this point there should be only one group
self.assertEqual(Group.objects.count(), 1)
# test 'privacy_level' is required
grp_data = {'name': 'Test Group', 'description': 'This is a cool group'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = create_user_group(request)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
new_group = Group.objects.filter(name='Test Group').first()
self.assertEqual(new_group, None)
# at this point there should be only one group
self.assertEqual(Group.objects.count(), 1)
# test 'privacy_level' should have one of these values (public, private, discoverable)
grp_data = {'name': 'Test Group', 'description': 'This is a cool group', 'privacy_level': 'some-level'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = create_user_group(request)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
new_group = Group.objects.filter(name='Test Group').first()
self.assertEqual(new_group, None)
# at this point there should be only one group
self.assertEqual(Group.objects.count(), 1)
# test that duplicate group names are not allowed
grp_data = {'name': 'Test Group', 'description': 'This is a cool group', 'privacy_level': 'private'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
response = create_user_group(request)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
# at this point there should be 2 groups
self.assertEqual(Group.objects.count(), 2)
# create a group with duplicate name
grp_data = {'name': 'Test Group', 'description': 'This is a very cool group', 'privacy_level': 'private'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
# had to do this as a transaction for some reason, otherwise the last statement
# of this function generates a transaction error
with transaction.atomic():
response = create_user_group(request)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# at this point there should be still 2 groups
self.assertEqual(Group.objects.count(), 2)
def test_update_group(self):
# TODO: test with picture file upload for the group
# first create a group to test updating group
url = reverse('create_user_group')
grp_data = {'name': 'Test Group-1', 'description': 'This is a cool group-1',
'purpose': 'This group has no purpose', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
response = create_user_group(request)
new_group = Group.objects.filter(name='Test Group-1').first()
self.assertEqual(new_group.gaccess.active, True)
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-1')
self.assertEqual(new_group.gaccess.purpose, 'This group has no purpose')
self.assertEqual(new_group.gaccess.public, True)
self.assertEqual(new_group.gaccess.discoverable, True)
self.assertEqual(new_group.gaccess.active, True)
self.assertEqual(new_group.gaccess.shareable, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], reverse('group', args=[new_group.id]))
# now test updating new_group
url_params = {'group_id': new_group.id}
url = reverse('update_user_group', kwargs=url_params)
# update name, description, purpose
grp_data = {'name': 'Test Group-2', 'description': 'This is a cool group-2',
'purpose': 'This group now has purpose', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=new_group.id)
new_group = Group.objects.filter(name='Test Group-2').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-2')
self.assertEqual(new_group.gaccess.purpose, 'This group now has purpose')
self.assertEqual(new_group.gaccess.public, True)
self.assertEqual(new_group.gaccess.discoverable, True)
self.assertEqual(new_group.gaccess.active, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# update group to remove purpose
grp_data = {'name': 'Test Group-2', 'description': 'This is a cool group-2',
'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=new_group.id)
new_group = Group.objects.filter(name='Test Group-2').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-2')
self.assertEqual(new_group.gaccess.purpose, '')
self.assertEqual(new_group.gaccess.public, True)
self.assertEqual(new_group.gaccess.discoverable, True)
self.assertEqual(new_group.gaccess.active, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# update privacy_level (set to private)- this set public to false and discoverable to false
grp_data = {'name': 'Test Group-2', 'description': 'This is a cool group-2',
'privacy_level': 'private'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=new_group.id)
new_group = Group.objects.filter(name='Test Group-2').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-2')
self.assertEqual(new_group.gaccess.purpose, '')
self.assertEqual(new_group.gaccess.public, False)
self.assertEqual(new_group.gaccess.discoverable, False)
self.assertEqual(new_group.gaccess.active, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# update privacy_level (set to public) - this set public to true and discoverable to true
grp_data = {'name': 'Test Group-2', 'description': 'This is a cool group-2',
'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=new_group.id)
new_group = Group.objects.filter(name='Test Group-2').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-2')
self.assertEqual(new_group.gaccess.purpose, '')
self.assertEqual(new_group.gaccess.public, True)
self.assertEqual(new_group.gaccess.discoverable, True)
self.assertEqual(new_group.gaccess.active, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# update privacy_level (set to discoverable) - this should set discoverable to
# true and public to false
grp_data = {'name': 'Test Group-2', 'description': 'This is a cool group-2',
'privacy_level': 'discoverable'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=new_group.id)
new_group = Group.objects.filter(name='Test Group-2').first()
self.assertNotEqual(new_group, None)
self.assertEqual(new_group.gaccess.description, 'This is a cool group-2')
self.assertEqual(new_group.gaccess.purpose, '')
self.assertEqual(new_group.gaccess.public, False)
self.assertEqual(new_group.gaccess.discoverable, True)
self.assertEqual(new_group.gaccess.active, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
def test_delete_restore_group(self):
# test a group can be deleted or restored
# first create a group to test updating group
url = reverse('create_user_group')
grp_data = {'name': 'Test Group-1', 'description': 'This is a cool group-1',
'purpose': 'This group has no purpose', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
create_user_group(request)
new_group = Group.objects.filter(name='Test Group-1').first()
self.assertEqual(new_group.gaccess.active, True)
post_data = {'group_id': new_group.id}
url = reverse('delete_user_group', kwargs=post_data)
request = self.factory.post(url, data=post_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = delete_user_group(request, group_id=new_group.id)
new_group = Group.objects.filter(name='Test Group-1').first()
self.assertEqual(new_group.gaccess.active, False)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# test undeleting the group
url = reverse('restore_user_group', kwargs=post_data)
request = self.factory.post(url, data=post_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = restore_user_group(request, group_id=new_group.id)
new_group = Group.objects.filter(name='Test Group-1').first()
self.assertEqual(new_group.gaccess.active, True)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
def test_group_update_failure(self):
# test that post data for 'name', 'description', and 'privacy_level' are required
# when updating a group
# first create a group to test updating group
url = reverse('create_user_group')
grp_data = {'name': 'Test Group-1', 'description': 'This is a cool group-1',
'purpose': 'This group has purpose', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
response = create_user_group(request)
new_group = Group.objects.filter(name='Test Group-1').first()
# now test updating new_group
url_params = {'group_id': new_group.id}
url = reverse('update_user_group', kwargs=url_params)
# test name is required -> update should fail
grp_data = {'description': 'This is a cool group-2', 'purpose': 'This group has purpose'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=new_group.id)
# description has not changed proves update failed
self.assertNotEqual(new_group.gaccess.description, 'This is a cool group-2')
self.assertEqual(new_group.gaccess.description, 'This is a cool group-1')
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# test description is required -> update should fail
grp_data = {'name': 'Test Group-2', 'purpose': 'This group has purpose',
'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._update_failure(new_group, request)
# test privacy_level is required -> update should fail
grp_data = {'name': 'Test Group-2', 'description': 'This is a cool group',
'purpose': 'This group has purpose'}
request = self.factory.post(url, data=grp_data)
self._update_failure(new_group, request)
# test trying to update group with a duplicate name ('HydroShare Author') should fail
grp_data = {'name': 'Hydroshare Author', 'description': 'This is a cool group-1',
'purpose': 'This group has purpose'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=new_group.id)
# name has not changed proves update failed
self.assertEqual(Group.objects.filter(name='Hydroshare Author').count(), 1)
updated_group = Group.objects.filter(name='Hydroshare Author').first()
self.assertNotEqual(updated_group.id, new_group.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
def test_share_group_with_user(self):
# create a group to share
new_group = self._create_group()
# check mike is not a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
# John to share 'Test Group' with user Mike with 'view' privilege
self._share_group_with_user(new_group, 'view')
# John to share 'Test Group' with user Mike with 'edit' privilege
self._share_group_with_user(new_group, 'edit')
# John to share 'Test Group' with user Mike with 'edit' privilege
self._share_group_with_user(new_group, 'edit')
# John to share 'Test Group' with user Mike with 'owner' privilege
self._share_group_with_user(new_group, 'owner')
def test_share_group_with_user_invalid_privilege(self):
# a group can shared with a user with privilege of one of these (view, edit or owner)
# create a group to share
new_group = self._create_group()
# John to share 'Test Group' with user Mike with invalid privilege
url_params = {'group_id': new_group.id, 'user_id': self.mike.id, 'privilege': "badprivilege"}
url = reverse('share_group_with_user', kwargs=url_params)
request = self.factory.post(url)
request.META['HTTP_REFERER'] = "/some_url/"
self._set_request_message_attributes(request)
request.user = self.john
response = share_group_with_user(request, group_id=new_group.id, user_id=self.mike.id, privilege="badprivilege")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
# check mike is not a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
def test_unshare_group_with_user(self):
# create a group to share
new_group = self._create_group()
# John to share 'Test Group' with user Mike with 'view' privilege
self._share_group_with_user(new_group, 'view')
# check mike is a member of the group
self.assertIn(self.mike, new_group.gaccess.members)
# unshare test group with mike
url_params = {'group_id': new_group.id, 'user_id': self.mike.id}
url = reverse('unshare_group_with_user', kwargs=url_params)
request = self.factory.post(url)
request.META['HTTP_REFERER'] = "/some_url/"
self._set_request_message_attributes(request)
request.user = self.john
response = unshare_group_with_user(request, group_id=new_group.id, user_id=self.mike.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
# check mike is not a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
def test_share_resource_with_group(self):
# create a group to share with a resource
new_group = self._create_group()
# let group owner john share resource with view privilege
response = self._share_resource_with_group(group=new_group, privilege='view')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_content = json.loads(response.content)
self.assertEqual(response_content['status'], 'success')
self.assertIn(self.resource, new_group.gaccess.view_resources)
# share resource with group with edit privilege
# first unshare resource with group
self.john.uaccess.unshare_resource_with_group(self.resource, new_group)
self.assertNotIn(self.resource, new_group.gaccess.view_resources)
response = self._share_resource_with_group(group=new_group, privilege='edit')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_content = json.loads(response.content)
self.assertEqual(response_content['status'], 'success')
self.assertIn(self.resource, new_group.gaccess.edit_resources)
# test a group can't have owner privilege over a resource
# first unshare resource with group
self.john.uaccess.unshare_resource_with_group(self.resource, new_group)
self.assertNotIn(self.resource, new_group.gaccess.view_resources)
response = self._share_resource_with_group(group=new_group, privilege='owner')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response_content = json.loads(response.content)
self.assertEqual(response_content['status'], 'error')
self.assertNotIn(self.resource, new_group.gaccess.view_resources)
def test_unshare_resource_with_group(self):
# create a group to share/unshare with a resource
new_group = self._create_group()
# first share the resource with the group
self.john.uaccess.share_resource_with_group(self.resource, new_group, PrivilegeCodes.VIEW)
self.assertIn(self.resource, new_group.gaccess.view_resources)
# now unshare the resource with the group
url_params = {'shortkey': self.resource.short_id, 'group_id': new_group.id}
url = reverse('unshare_resource_with_group', kwargs=url_params)
request = self.factory.post(url)
request.user = self.john
response = unshare_resource_with_group(request, shortkey=self.resource.short_id,
group_id=new_group.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_content = json.loads(response.content)
self.assertEqual(response_content['status'], 'success')
self.assertNotIn(self.resource, new_group.gaccess.view_resources)
# test group member (non-owner) unsharing a resource with a group
# returns response status as 'error' and the group is not unshared
# let make mike a member of group
self.john.uaccess.share_group_with_user(new_group, self.mike, PrivilegeCodes.VIEW)
self.assertIn(new_group, self.mike.uaccess.view_groups)
# let john share the resource with group
self.john.uaccess.share_resource_with_group(self.resource, new_group, PrivilegeCodes.VIEW)
self.assertIn(self.resource, new_group.gaccess.view_resources)
# let mike unshare the resource with group
request.user = self.mike
response = unshare_resource_with_group(request, shortkey=self.resource.short_id,
group_id=new_group.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_content = json.loads(response.content)
self.assertEqual(response_content['status'], 'error')
self.assertIn(self.resource, new_group.gaccess.view_resources)
def test_make_group_membership_request(self):
# test that user can make request to join a group
# create a group
new_group = self._create_group()
# now there should be no GroupMembershipRequest associated with Mile
self.assertEqual(self.mike.uaccess.group_membership_requests.count(), 0)
# test that user mike can make a request to join the new_group
url_params = {'group_id': new_group.id}
url = reverse('make_group_membership_request', kwargs=url_params)
request = self.factory.post(url)
self._set_request_message_attributes(request)
request.META['HTTP_REFERER'] = "/some_url/"
request.user = self.mike
response = make_group_membership_request(request, group_id=new_group.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# now there should be one GroupMembershipRequest associated with Mike
self.assertEqual(self.mike.uaccess.group_membership_requests.count(), 1)
# test user making request more than once for the same group should fail
response = make_group_membership_request(request, group_id=new_group.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# there should be still one GroupMembershipRequest associated with Mike
self.assertEqual(self.mike.uaccess.group_membership_requests.count(), 1)
def test_make_group_membership_invitation(self):
# test group owner inviting a user to join a group
# create a group
new_group = self._create_group()
# there should be no GroupMembershipRequest associated with John
self.assertEqual(self.john.uaccess.group_membership_requests.count(), 0)
# test that group owner john can invite mike to join the new_group
url_params = {'group_id': new_group.id, 'user_id': self.mike.id}
url = reverse('make_group_membership_request', kwargs=url_params)
request = self.factory.post(url)
self._set_request_message_attributes(request)
request.META['HTTP_REFERER'] = "/some_url/"
request.user = self.john
response = make_group_membership_request(request, group_id=new_group.id, user_id=self.mike.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# now there should be one GroupMembershipRequest associated with John
self.assertEqual(self.john.uaccess.group_membership_requests.count(), 1)
# test group owner inviting same user to the same group more than once should fail
response = make_group_membership_request(request, group_id=new_group.id, user_id=self.mike.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# there should be still one GroupMembershipRequest associated with John
self.assertEqual(self.john.uaccess.group_membership_requests.count(), 1)
def test_act_on_group_membership_request(self):
# test group owner accepting/declining a request from a user to join a group
# let user mike make a request
# create a group
new_group = self._create_group()
# let user mike make a request to join the new_group
membership_request = self._generate_user_request_to_join_group(new_group)
# test john can accept the request
# check mike is not a member of the group yet
self.assertNotIn(self.mike, new_group.gaccess.members)
# john accepts mike's request
self._owner_act_on_request(membership_request, 'accept')
# check mike is now a member of the group
self.assertIn(self.mike, new_group.gaccess.members)
# test owner decline user request
# remove mike from group
self.john.uaccess.unshare_group_with_user(new_group, self.mike)
# check mike is no more a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
# let mike again make a request
membership_request = self._generate_user_request_to_join_group(new_group)
# let john decline mike's request
self._owner_act_on_request(membership_request, 'decline')
# check mike is not a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
def test_act_on_group_membership_invitation(self):
# test user invited to join a group can accept/decline the invitation
# create a group
new_group = self._create_group()
# let john invite mike
membership_request = self._generate_owner_invitation_to_join_group(new_group)
# check mike is not a member of the group yet
self.assertNotIn(self.mike, new_group.gaccess.members)
# test mike is a member of the group after accepting john's request
self._user_act_on_invitation(membership_request, 'accept')
# check mike is now a member of the group
self.assertIn(self.mike, new_group.gaccess.members)
# test mike can decline invitation to join a group
# remove mike from group
self.john.uaccess.unshare_group_with_user(new_group, self.mike)
# check mike is no more a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
# let john invite mike again
membership_request = self._generate_owner_invitation_to_join_group(new_group)
# let mike decline john's invitation
self._user_act_on_invitation(membership_request, 'decline')
# check mike is no more a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
def test_group_membership_acceptance_via_email_link(self):
# here we are testing group_membership view function which is invoked
# when the user clicks the link provided in the email
# create a group
new_group = self._create_group()
# test user accepting group owner's invitation
# check mike is no more a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
# let john invite mike to join group
membership_request = self.john.uaccess.create_group_membership_request(new_group, self.mike)
# create the link that mike should find in his email
uidb36 = int_to_base36(self.mike.id)
token = default_token_generator.make_token(self.mike)
url_params = {"uidb36": uidb36, "token": token, "membership_request_id": membership_request.id}
url = reverse('group_membership', kwargs=url_params)
# due to session requirement of the view being tested, using the Client class
client = Client()
# let mike click the link in the email
response = client.get(url)
redirect_url = '/group/{}/'.format(new_group.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertTrue(response['Location'].endswith(redirect_url))
# check mike is now a member of the group
self.assertIn(self.mike, new_group.gaccess.members)
# test group owner (john) accepting user (mike) request to join a group
# remove mike from group
self.john.uaccess.unshare_group_with_user(new_group, self.mike)
# check mike is no more a member of the group
self.assertNotIn(self.mike, new_group.gaccess.members)
# let mike make a request to join group
membership_request = self.mike.uaccess.create_group_membership_request(new_group)
# create the link that john should find in his email
uidb36 = int_to_base36(self.john.id)
token = default_token_generator.make_token(self.john)
url_params = {"uidb36": uidb36, "token": token, "membership_request_id": membership_request.id}
url = reverse('group_membership', kwargs=url_params)
# let john click the link
response = client.get(url)
redirect_url = '/group/{}/'.format(new_group.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertTrue(response['Location'].endswith(redirect_url))
# check mike is now a member of the group
self.assertIn(self.mike, new_group.gaccess.members)
def _update_failure(self, group, request):
group_name = group.name
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = update_user_group(request, group_id=group.id)
# name has not changed proves update failed
updated_group = Group.objects.filter(name='Test Group-2').first()
self.assertEqual(updated_group, None)
original_group = Group.objects.filter(name=group_name).first()
self.assertNotEqual(original_group, None)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
def _share_resource_with_group(self, group, privilege):
url_params = {'shortkey': self.resource.short_id, 'privilege': privilege, 'group_id': group.id}
url = reverse('share_resource_with_group', kwargs=url_params)
request = self.factory.post(url)
self._set_request_message_attributes(request)
request.user = self.john
response = share_resource_with_group(request, shortkey=self.resource.short_id, privilege=privilege,
group_id=group.id)
return response
def _owner_act_on_request(self, membership_request, action):
url_params = {'membership_request_id': membership_request.id, 'action': action}
url = reverse('act_on_group_membership_request', kwargs=url_params)
request = self.factory.post(url)
self._set_request_message_attributes(request)
request.user = self.john
request.META['HTTP_REFERER'] = "/some_url/"
response = act_on_group_membership_request(request, membership_request_id=membership_request.id,
action=action)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
def _user_act_on_invitation(self, membership_request, action):
url_params = {'membership_request_id': membership_request.id, 'action': action}
url = reverse('act_on_group_membership_request', kwargs=url_params)
request = self.factory.post(url)
self._set_request_message_attributes(request)
request.user = self.mike
request.META['HTTP_REFERER'] = "/some_url/"
response = act_on_group_membership_request(request, membership_request_id=membership_request.id,
action=action)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
def _generate_user_request_to_join_group(self, group):
url_params = {'group_id': group.id}
url = reverse('make_group_membership_request', kwargs=url_params)
request = self.factory.post(url)
self._set_request_message_attributes(request)
request.META['HTTP_REFERER'] = "/some_url/"
request.user = self.mike
make_group_membership_request(request, group_id=group.id)
membership_request = self.mike.uaccess.group_membership_requests.first()
return membership_request
def _generate_owner_invitation_to_join_group(self, group):
url_params = {'group_id': group.id, 'user_id': self.mike.id}
url = reverse('make_group_membership_request', kwargs=url_params)
request = self.factory.post(url)
self._set_request_message_attributes(request)
request.META['HTTP_REFERER'] = "/some_url/"
request.user = self.john
make_group_membership_request(request, group_id=group.id, user_id=self.mike.id)
membership_request = self.john.uaccess.group_membership_requests.first()
return membership_request
def _create_group(self):
url = reverse('create_user_group')
# test passing privacy_level = 'public'
grp_data = {'name': 'Test Group', 'description': 'This is a cool group', 'privacy_level': 'public'}
request = self.factory.post(url, data=grp_data)
self._set_request_message_attributes(request)
request.user = self.john
create_user_group(request)
new_group = Group.objects.filter(name='Test Group').first()
return new_group
def _share_group_with_user(self, group, privilege):
url_params = {'group_id': group.id, 'user_id': self.mike.id, 'privilege': privilege}
url = reverse('share_group_with_user', kwargs=url_params)
request = self.factory.post(url)
request.META['HTTP_REFERER'] = "/some_url/"
self._set_request_message_attributes(request)
request.user = self.john
response = share_group_with_user(request, group_id=group.id, user_id=self.mike.id, privilege=privilege)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
# check mike is a member of the group
self.assertIn(self.mike, group.gaccess.members)
# check mike has the specified privilege over the group
if privilege == 'view':
self.assertIn(self.mike, group.gaccess.get_users_with_explicit_access(PrivilegeCodes.VIEW))
elif privilege == 'edit':
self.assertIn(self.mike, group.gaccess.get_users_with_explicit_access(PrivilegeCodes.CHANGE))
else:
self.assertIn(self.mike, group.gaccess.get_users_with_explicit_access(PrivilegeCodes.OWNER))
def _set_request_message_attributes(self, request):
# the following 3 lines are for preventing error in unit test due to the view being tested uses
# messaging middleware
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
|
|
import StringIO
import csv
import datetime
import json
import logging
import shutil
from stat import S_IRUSR
import pytest
from retrying import retry
from subprocess import Popen
import mazerunner
import os
from mazerunner.api_client import Service, AlertPolicy, Decoy, Breadcrumb, \
DeploymentGroup, Endpoint, CIDRMapping, BackgroundTask, AuditLogLine, ISO_TIME_FORMAT
from mazerunner.exceptions import ValidationError, ServerError, BadParamError, \
InvalidInstallMethodError
from utils import TimeoutException, wait_until
CLEAR_SYSTEM_ERROR_MESSAGE = 'System must be clean before running this test. Use the '\
'--initial_clean flag to do this automatically'
ENDPOINT_IP_PARAM = 'endpoint_ip'
ENDPOINT_USERNAME_PARAM = 'endpoint_username'
ENDPOINT_PASSWORD_PARAM = 'endpoint_password'
CODE_EXECUTION_ALERT_TYPE = 'code'
FORENSIC_DATA_ALERT_TYPE = 'forensic_puller'
MAZERUNNER_IP_ADDRESS_PARAM = 'ip_address'
API_ID_PARAM = 'id'
API_SECRET_PARAM = 'secret'
MAZERUNNER_CERTIFICATE_PATH_PARAM = 'mazerunner_certificate_path'
ENTITIES_CONFIGURATION = {
Decoy: [],
Service: [],
Breadcrumb: [],
DeploymentGroup: [1],
Endpoint: [],
CIDRMapping: [],
BackgroundTask: []
}
TEST_DEPLOYMENTS_FILE_PATH = os.path.join(os.path.dirname(__file__), 'test_deployments/dep.zip')
TEST_DEPLOYMENTS_FOLDER_PATH = os.path.dirname(TEST_DEPLOYMENTS_FILE_PATH)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("APITest")
logger.setLevel(logging.INFO)
class AlertNotFoundError(RuntimeError):
pass
def _clear_deployment_path():
if os.path.exists(TEST_DEPLOYMENTS_FOLDER_PATH):
shutil.rmtree(TEST_DEPLOYMENTS_FOLDER_PATH)
os.makedirs(TEST_DEPLOYMENTS_FOLDER_PATH)
class MachineStatus(object):
NOT_SEEN = "not_seen"
ACTIVE = "active"
INACTIVE = "inactive"
# noinspection PyMethodMayBeStatic,PyAttributeOutsideInit
class APITest(object):
runslow = pytest.mark.skipif(not pytest.config.getoption('--runslow'),
reason='--runslow not activated')
lab_dependent = pytest.mark.skipif(not pytest.config.getoption('--lab_dependent'),
reason='--lab_dependent not activated')
def _assert_clean_system(self):
for entity_collection in self.disposable_entities:
existing_ids = {entity.id for entity in entity_collection}
expected_ids = set(ENTITIES_CONFIGURATION[entity_collection.MODEL_CLASS])
assert existing_ids == expected_ids, CLEAR_SYSTEM_ERROR_MESSAGE
assert len(self.background_tasks) == 0, CLEAR_SYSTEM_ERROR_MESSAGE
def _configure_entities_groups(self):
self.decoys = self.client.decoys
self.services = self.client.services
self.breadcrumbs = self.client.breadcrumbs
self.deployment_groups = self.client.deployment_groups
self.alerts = self.client.alerts
self.alert_policies = self.client.alert_policies
self.cidr_mappings = self.client.cidr_mappings
self.endpoints = self.client.endpoints
self.background_tasks = self.client.background_tasks
self.audit_log = self.client.audit_log
self.disposable_entities = [
self.decoys,
self.services,
self.breadcrumbs,
self.deployment_groups,
self.endpoints,
self.cidr_mappings
]
def setup_method(self, method):
logger.debug("setup_method called")
with open(pytest.config.option.json_credentials, 'rb') as file_reader:
json_dict = json.load(file_reader)
self.lab_endpoint_ip = json_dict.get(ENDPOINT_IP_PARAM)
self.lab_endpoint_user = json_dict.get(ENDPOINT_USERNAME_PARAM)
self.lab_endpoint_password = json_dict.get(ENDPOINT_PASSWORD_PARAM)
self.mazerunner_ip_address = json_dict[MAZERUNNER_IP_ADDRESS_PARAM]
self.api_key = json_dict[API_ID_PARAM]
self.api_secret = json_dict[API_SECRET_PARAM]
self.mazerunner_certificate_path = json_dict[MAZERUNNER_CERTIFICATE_PATH_PARAM]
self.client = mazerunner.connect(
ip_address=self.mazerunner_ip_address,
api_key=self.api_key,
api_secret=self.api_secret,
certificate=self.mazerunner_certificate_path)
self._configure_entities_groups()
if pytest.config.option.initial_clean:
self._destroy_new_entities()
self._assert_clean_system()
self.file_paths_for_cleanup = []
_clear_deployment_path()
def _destroy_new_entities(self):
for entity_collection in self.disposable_entities:
for entity in list(entity_collection):
initial_ids = ENTITIES_CONFIGURATION[entity_collection.MODEL_CLASS]
if entity.id not in initial_ids:
wait_until(entity.delete, exc_list=[ServerError, ValidationError],
check_return_value=False)
self.background_tasks.acknowledge_all_complete()
wait_until(self._assert_clean_system, exc_list=[AssertionError], check_return_value=False)
def teardown_method(self, method):
logger.debug("teardown_method called")
self._destroy_new_entities()
# Clean files:
for file_path in self.file_paths_for_cleanup:
if os.path.exists(file_path):
os.remove(file_path)
_clear_deployment_path()
def valid_decoy_status(self, decoy, wanted_statuses):
logger.debug("valid_decoy_status called")
decoy.load()
return decoy.machine_status in wanted_statuses
def wait_for_decoy_status(self, decoy, wanted_statuses, timeout):
logger.info("wait_for_decoy_status called")
logger.info("waiting up to %d seconds", timeout)
try:
wait_until(
self.valid_decoy_status,
decoy=decoy,
wanted_statuses=wanted_statuses,
check_return_value=True,
total_timeout=timeout,
interval=1,
exc_list=[Exception]
)
return True
except TimeoutException:
return False
def create_decoy(self, decoy_params):
logger.debug("create_decoy called")
# create decoy and wait for initial status:
decoy = self.decoys.create(**decoy_params)
self.wait_for_decoy_status(decoy, wanted_statuses=[MachineStatus.NOT_SEEN], timeout=60*5)
logger.info("decoy {0} created".format(decoy_params["name"]))
return decoy
def power_on_decoy(self, decoy):
decoy.power_on()
self.wait_for_decoy_status(decoy, wanted_statuses=[MachineStatus.ACTIVE], timeout=60 * 10)
logger.info("decoy {0} is active".format(decoy.name))
def power_off_decoy(self, decoy):
decoy.power_off()
self.wait_for_decoy_status(decoy,
wanted_statuses=[MachineStatus.NOT_SEEN, MachineStatus.INACTIVE],
timeout=60 * 10)
logger.info("decoy {0} is inactive".format(decoy.name))
def assert_entity_name_in_collection(self, entity_name, collection):
assert any(entity.name == entity_name for entity in collection)
def assert_entity_name_not_in_collection(self, entity_name, collection):
assert not any(entity.name == entity_name for entity in collection)
SSH_GROUP_NAME = "ssh_deployment_group"
SSH_BREADCRUMB_NAME = "ssh_breadcrumb"
SSH_SERVICE_NAME = "ssh_service"
SSH_DECOY_NAME = "ssh_decoy"
SSH_GROUP_NAME_UPDATE = "ssh_deployment_group_update"
SSH_BREADCRUMB_NAME_UPDATE = "ssh_breadcrumb_update"
SSH_SERVICE_NAME_UPDATE = "ssh_service_update"
SSH_DECOY_NAME_UPDATE = "ssh_decoy_update"
HONEYDOC_GROUP_NAME = "honeydoc_deployment_group"
HONEYDOC_BREADCRUMB_NAME = "honeydoc_breadcrumb"
HONEYDOC_SERVICE_NAME = "honeydoc_service"
HONEYDOC_SERVICE_SERVER_SUFFIX = "server_suffix"
HONEYDOC_DECOY_NAME = "honeydoc_decoy"
OVA_DECOY = "ova_decoy"
class TestGeneralFlow(APITest):
def test_api_setup_campaign(self):
logger.debug("test_api_setup_campaign called")
# Create deployment group:
assert {dg.id for dg in self.deployment_groups} == \
set(ENTITIES_CONFIGURATION[DeploymentGroup])
deployment_group = self.deployment_groups.create(name=SSH_GROUP_NAME,
description="test deployment group")
self.assert_entity_name_in_collection(SSH_GROUP_NAME, self.deployment_groups)
assert {dg.id for dg in self.deployment_groups} == \
set(ENTITIES_CONFIGURATION[DeploymentGroup] + [deployment_group.id])
# Create breadcrumb:
assert len(self.breadcrumbs) == 0
breadcrumb_ssh = self.breadcrumbs.create(name=SSH_BREADCRUMB_NAME,
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass",
deployment_groups=[deployment_group.id])
self.assert_entity_name_in_collection(SSH_BREADCRUMB_NAME, self.breadcrumbs)
assert len(self.breadcrumbs) == 1
# Create service:
assert len(self.services) == 0
service_ssh = self.services.create(name=SSH_SERVICE_NAME, service_type="ssh", any_user="false")
self.assert_entity_name_in_collection(SSH_SERVICE_NAME, self.services)
assert len(self.services) == 1
# Create decoy:
assert len(self.decoys) == 0
decoy_ssh = self.create_decoy(dict(name=SSH_DECOY_NAME,
hostname="decoyssh",
os="Ubuntu_1404",
vm_type="KVM"))
self.assert_entity_name_in_collection(SSH_DECOY_NAME, self.decoys)
assert len(self.decoys) == 1
service_ssh.load()
breadcrumb_ssh.load()
assert len(service_ssh.available_decoys) == 1
assert len(service_ssh.attached_decoys) == 0
assert len(service_ssh.available_decoys) == 1
assert len(service_ssh.attached_decoys) == 0
assert len(breadcrumb_ssh.available_services) == 1
assert len(breadcrumb_ssh.attached_services) == 0
# Connect entities:
breadcrumb_ssh.connect_to_service(service_ssh.id)
self.assert_entity_name_in_collection(SSH_SERVICE_NAME, breadcrumb_ssh.attached_services)
service_ssh.connect_to_decoy(decoy_ssh.id)
self.assert_entity_name_in_collection(SSH_DECOY_NAME, service_ssh.attached_decoys)
service_ssh.load()
breadcrumb_ssh.load()
assert len(service_ssh.available_decoys) == 0
assert len(service_ssh.attached_decoys) == 1
assert len(service_ssh.available_decoys) == 0
assert len(service_ssh.attached_decoys) == 1
assert len(breadcrumb_ssh.available_services) == 0
assert len(breadcrumb_ssh.attached_services) == 1
# Power on decoy:
self.power_on_decoy(decoy_ssh)
decoy_ssh.load()
assert decoy_ssh.machine_status == MachineStatus.ACTIVE
# Get deployment file:
deployment_file_path = "mazerunner/test_file"
download_format = "ZIP"
breadcrumb_ssh.deploy(location_with_name=deployment_file_path,
os="Windows",
download_type="install",
download_format=download_format)
self.file_paths_for_cleanup.append("{}.{}".format(deployment_file_path,
download_format.lower()))
# Add / remove deployment group:
breadcrumb_ssh.remove_from_group(deployment_group.id)
self.assert_entity_name_not_in_collection(SSH_GROUP_NAME, breadcrumb_ssh.deployment_groups)
breadcrumb_ssh.add_to_group(deployment_group.id)
self.assert_entity_name_in_collection(SSH_GROUP_NAME, breadcrumb_ssh.deployment_groups)
# Edit deployment group:
deployment_group.update(name=SSH_GROUP_NAME_UPDATE, description="test group")
self.assert_entity_name_in_collection(SSH_GROUP_NAME_UPDATE, self.deployment_groups)
self.assert_entity_name_not_in_collection(SSH_GROUP_NAME, self.deployment_groups)
deployment_group.partial_update(name=SSH_GROUP_NAME)
self.assert_entity_name_in_collection(SSH_GROUP_NAME, self.deployment_groups)
self.assert_entity_name_not_in_collection(SSH_GROUP_NAME_UPDATE, self.deployment_groups)
service_ssh.update(name=SSH_SERVICE_NAME_UPDATE, any_user="false")
breadcrumb_ssh.detach_from_service(service_ssh.id)
self.assert_entity_name_not_in_collection(SSH_SERVICE_NAME,
breadcrumb_ssh.attached_services)
service_ssh.detach_from_decoy(decoy_ssh.id)
self.assert_entity_name_not_in_collection(SSH_DECOY_NAME, service_ssh.attached_decoys)
# Power off decoy:
self.power_off_decoy(decoy_ssh)
decoy_ssh.load()
assert decoy_ssh.machine_status == MachineStatus.INACTIVE
invalid_service = "invalid_service"
with pytest.raises(ValidationError):
self.services.create(name=invalid_service, service_type=invalid_service)
self.assert_entity_name_not_in_collection(invalid_service, self.services)
def test_honeydoc_breadcrumb(self):
logger.debug("test_honeydoc_breadcrumb called")
downloaded_docx_file_path = "test/downloaded.docx"
self.file_paths_for_cleanup.append(downloaded_docx_file_path)
deployment_group = self.deployment_groups.create(name=HONEYDOC_GROUP_NAME,
description="test deployment group")
breadcrumb_honeydoc = self.breadcrumbs.create(name=HONEYDOC_BREADCRUMB_NAME,
breadcrumb_type="honey_doc",
deployment_groups=[deployment_group.id],
monitor_from_external_host=False,
file_field_name="docx_file_content",
file_path="test/sample.docx")
service_honeydoc = self.services.create(name=HONEYDOC_SERVICE_NAME,
service_type="honey_doc",
server_suffix=HONEYDOC_SERVICE_SERVER_SUFFIX)
decoy_honeydoc = self.create_decoy(dict(name=HONEYDOC_DECOY_NAME,
hostname="decoyhoneydoc",
os="Ubuntu_1404",
vm_type="KVM"))
service_honeydoc.load()
breadcrumb_honeydoc.load()
self.assert_entity_name_in_collection(HONEYDOC_GROUP_NAME, breadcrumb_honeydoc.deployment_groups)
breadcrumb_honeydoc.connect_to_service(service_honeydoc.id)
service_honeydoc.connect_to_decoy(decoy_honeydoc.id)
service_honeydoc.load()
breadcrumb_honeydoc.load()
self.power_on_decoy(decoy_honeydoc)
decoy_honeydoc.load()
breadcrumb_honeydoc.download_breadcrumb_honeydoc(downloaded_docx_file_path)
assert os.path.exists(downloaded_docx_file_path)
assert os.path.getsize(downloaded_docx_file_path) > 0
class TestDecoy(APITest):
DECOY_STATUS_ACTIVE = 'active'
DECOY_STATUS_BOOTING = 'booting'
DECOY_STATUS_INACTIVE = 'inactive'
DECOY_STATUS_CONFIGURING = 'configuring'
@APITest.runslow
def test_ova(self):
logger.debug("test_ova called")
# Create decoy:
ova_decoy = self.create_decoy(dict(name=OVA_DECOY,
hostname="ovadecoy",
os="Ubuntu_1404",
vm_type="OVA"))
self.assert_entity_name_in_collection(OVA_DECOY, self.decoys)
# Download decoy:
download_file_path = "mazerunner/ova_image"
# Wait until the decoy becomes available and download the file
wait_until(ova_decoy.download, location_with_name=download_file_path,
check_return_value=False, exc_list=[ValidationError], total_timeout=60*10)
self.file_paths_for_cleanup.append("{}.ova".format(download_file_path))
def test_decoy_update(self):
def _assert_expected_values():
assert decoy.name == decoy_name
assert decoy.hostname == decoy_hostname
assert decoy.os == decoy_os
assert decoy.vm_type == vm_type
decoy_name = 'original_decoy_name'
decoy_hostname = 'decoyssh'
decoy_os = 'Ubuntu_1404'
vm_type = 'KVM'
decoy = self.create_decoy(dict(name=decoy_name,
hostname=decoy_hostname,
os=decoy_os,
vm_type=vm_type))
_assert_expected_values()
decoy.load()
_assert_expected_values()
# Try to rename the decoy
decoy_name = 'renamed_decoy'
decoy.update(name=decoy_name)
_assert_expected_values()
decoy.load()
_assert_expected_values()
@classmethod
@retry(stop_max_attempt_number=600, wait_fixed=1000)
def _wait_for_decoy_status(cls, decoy, desired_status):
assert decoy.load().machine_status == desired_status
@classmethod
def _start_decoy(cls, decoy):
decoy.power_on()
cls._wait_for_decoy_status(decoy, cls.DECOY_STATUS_ACTIVE)
def test_decoy_recreation(self):
decoy = self.create_decoy(dict(name='original_decoy_name',
hostname='decoyssh',
os='Ubuntu_1404',
vm_type='KVM'))
self._start_decoy(decoy)
decoy.recreate()
self._wait_for_decoy_status(decoy, self.DECOY_STATUS_BOOTING)
self._wait_for_decoy_status(decoy, self.DECOY_STATUS_ACTIVE)
def test_test_dns(self):
decoy = self.create_decoy(dict(name='original_decoy_name',
hostname='decoyssh',
os='Ubuntu_1404',
vm_type='KVM',
dns_address='no.such.dns'))
self._start_decoy(decoy)
assert decoy.test_dns() is False
decoy.power_off()
self._wait_for_decoy_status(decoy, self.DECOY_STATUS_INACTIVE)
with pytest.raises(ValidationError):
decoy.test_dns()
class TestDeploymentGroups(APITest):
def test_basic_crud(self):
dep_group = self.deployment_groups.create(name='test_check_conflicts')
dep_group.update(name='test_check_conflicts1', description='pretty dg')
assert self.deployment_groups.get_item(dep_group.id).name == 'test_check_conflicts1'
dep_group.delete()
with pytest.raises(ValidationError):
self.deployment_groups.get_item(dep_group.id)
def test_check_conflicts(self):
decoy_ssh = self.create_decoy(dict(name=SSH_DECOY_NAME,
hostname="decoyssh",
os="Ubuntu_1404",
vm_type="KVM"))
service_ssh = self.services.create(name=SSH_SERVICE_NAME, service_type="ssh", any_user="false")
service_ssh.connect_to_decoy(decoy_ssh.id)
dep_group = self.deployment_groups.create(name='test_check_conflicts')
assert dep_group.check_conflicts('Linux') == []
assert dep_group.check_conflicts('Windows') == []
bc_ssh1 = self.breadcrumbs.create(name='ssh1',
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass")
bc_ssh1.connect_to_service(service_ssh.id)
# Make sure we get only numbers
with pytest.raises(BadParamError):
bc_ssh1.add_to_group('test_check_conflicts')
bc_ssh1.add_to_group(dep_group.id)
assert dep_group.check_conflicts('Linux') == []
assert dep_group.check_conflicts('Windows') == []
bc_ssh2 = self.breadcrumbs.create(name='ssh2',
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass")
bc_ssh2.connect_to_service(service_ssh.id)
bc_ssh2.add_to_group(dep_group.id)
assert dep_group.check_conflicts('Linux') == []
assert dep_group.check_conflicts('Windows') == [
{
u'error': u"Conflict between breadcrumbs ssh1 and ssh2: "
u"Two SSH breadcrumbs can't point to the same "
u"user/decoy combination on the same endpoint"
}
]
def test_deployment(self):
decoy_ssh = self.create_decoy(dict(name=SSH_DECOY_NAME,
hostname="decoyssh",
os="Ubuntu_1404",
vm_type="KVM"))
service_ssh = self.services.create(name=SSH_SERVICE_NAME, service_type="ssh", any_user="false")
bc_ssh = self.breadcrumbs.create(name='ssh1',
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass")
dep_group = self.deployment_groups.create(name='test_check_conflicts')
service_ssh.connect_to_decoy(decoy_ssh.id)
bc_ssh.connect_to_service(service_ssh.id)
bc_ssh.add_to_group(dep_group.id)
self.power_on_decoy(decoy_ssh)
def _has_complete_bg_tasks():
return len([bg_task for bg_task in self.background_tasks.filter(running=False)]) > 0
def _wait_and_destroy_background_task():
wait_until(_has_complete_bg_tasks, check_return_value=True)
self.background_tasks.acknowledge_all_complete()
def _test_manual_deployment():
dep_group.deploy(location_with_name=TEST_DEPLOYMENTS_FILE_PATH.replace('.zip', ''),
os='Windows',
download_type='install')
assert os.path.exists(TEST_DEPLOYMENTS_FILE_PATH)
os.remove(TEST_DEPLOYMENTS_FILE_PATH)
self.deployment_groups.deploy_all(
location_with_name=TEST_DEPLOYMENTS_FILE_PATH.replace('.zip', ''),
os='Windows',
download_format='ZIP')
assert os.path.exists(TEST_DEPLOYMENTS_FILE_PATH)
os.remove(TEST_DEPLOYMENTS_FILE_PATH)
def _test_auto_deployment():
# Since this runs asynchronously and it has nothing to deploy on, we only want to see
# that the request was accepted
dep_group.auto_deploy(username='some-user',
password='some-pass',
install_method='PS_EXEC',
run_method='EXE_DEPLOY',
domain='',
deploy_on="all")
_wait_and_destroy_background_task()
self.deployment_groups.auto_deploy_groups(
username='some-user',
password='some-pass',
install_method='PS_EXEC',
deployment_groups_ids=[1],
run_method='EXE_DEPLOY',
domain='',
deploy_on="all")
_wait_and_destroy_background_task()
_test_manual_deployment()
_test_auto_deployment()
def forensic_puller_alert_is_shown(self):
alerts = list(self.alerts.filter(filter_enabled=True,
only_alerts=False,
alert_types=[FORENSIC_DATA_ALERT_TYPE]))
return bool(alerts)
@pytest.mark.skip("needs auto deploy setting credentials")
@APITest.lab_dependent
def test_forensic_puller_on_demand(self):
## TODO: Add setting global deployment credentials here.
self.client.forensic_puller_on_demand.run_on_ip_list(ip_list=[self.lab_endpoint_ip])
wait_until(self.forensic_puller_alert_is_shown)
@APITest.lab_dependent
def test_deployment_credentials(self):
assert self.client.deployment_groups.test_deployment_credentials(
username=self.lab_endpoint_user,
password=self.lab_endpoint_password,
addr=self.lab_endpoint_ip,
install_method='PS_EXEC',
domain=None
) == {'success': True}
assert self.client.deployment_groups.test_deployment_credentials(
username=self.lab_endpoint_user,
password=self.lab_endpoint_password,
addr='192.168.100.100',
install_method='PS_EXEC',
domain=None
) == {
u'reason': u'Endpoint SMB TCP Ports(139, 445) are unreachable',
u'success': False
}
assert self.client.deployment_groups.test_deployment_credentials(
username=self.lab_endpoint_user,
password='WrongPassword',
addr=self.lab_endpoint_ip,
install_method='PS_EXEC',
domain=None
) == {u'reason': u'Incorrect credentials for endpoint', u'success': False}
assert self.client.deployment_groups.test_deployment_credentials(
username='WrongUser',
password=self.lab_endpoint_password,
addr=self.lab_endpoint_ip,
install_method='PS_EXEC',
domain=None
) == {u'reason': u'Incorrect credentials for endpoint', u'success': False}
class TestCollections(APITest):
def test_pagination(self):
breadcrumbs_to_create = 55
created_breadcrumbs_names = ['%s_%s' % (SSH_BREADCRUMB_NAME, breadcrumb_num)
for breadcrumb_num
in range(breadcrumbs_to_create)]
for breadcrumb_name in created_breadcrumbs_names:
self.breadcrumbs.create(name=breadcrumb_name,
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass")
assert len(self.breadcrumbs) == breadcrumbs_to_create
fetched_breadcrumbs = [breadcrumb for breadcrumb in self.breadcrumbs]
assert len(fetched_breadcrumbs) == breadcrumbs_to_create
fetched_breadcrumbs_names = [breadcrumb.name for breadcrumb in fetched_breadcrumbs]
assert set(fetched_breadcrumbs_names) == set(created_breadcrumbs_names)
def test_get_item(self):
breadcrumb = self.breadcrumbs.create(name='test_breadcrumb',
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass")
assert self.breadcrumbs.get_item(breadcrumb.id).id == breadcrumb.id
with pytest.raises(ValidationError):
assert self.breadcrumbs.get_item(breadcrumb.id + 1)
def test_params(self):
assert type(self.decoys.params()) == dict
assert type(self.services.params()) == dict
assert type(self.breadcrumbs.params()) == dict
def _get_breadcrumb_config(breadcrumb):
breadcrumb.deploy(
location_with_name=TEST_DEPLOYMENTS_FILE_PATH.replace('.zip', ''),
os='Linux',
download_type='install',
download_format='ZIP'
)
Popen(['unzip', TEST_DEPLOYMENTS_FILE_PATH], cwd=TEST_DEPLOYMENTS_FOLDER_PATH)
config_file_path = '%s/utils/config.json' % TEST_DEPLOYMENTS_FOLDER_PATH
wait_until(os.path.exists, path=config_file_path)
with open(config_file_path, 'rb') as f:
return json.load(f)
def _create_private_keys_from_config(config):
for bc_index, bc in config['install'].iteritems():
pk = bc.get('private_key')
bc_id = bc.get('remote_id')
address = bc.get('address')
username = bc.get('username')
login_str = '%s@%s' % (username, address)
if not pk:
continue
key_path = '%s/%s.pem' % (TEST_DEPLOYMENTS_FOLDER_PATH, bc_id)
with open(key_path, 'wb') as f:
f.write(pk)
os.chmod(key_path, S_IRUSR)
yield login_str, key_path
class TestAlert(APITest):
def test_alert_download(self):
def _create_code_exec_alert():
decoy_ssh = self.create_decoy(dict(name=SSH_DECOY_NAME,
hostname="decoyssh",
os="Ubuntu_1404",
vm_type="KVM"))
service_ssh = self.services.create(name=SSH_SERVICE_NAME, service_type="ssh", any_user="false")
service_ssh.connect_to_decoy(decoy_ssh.id)
bc_ssh1 = self.breadcrumbs.create(name='ssh1',
breadcrumb_type="ssh_privatekey",
username="ssh_user",
deploy_for="root",
installation_type='history')
bc_ssh1.connect_to_service(service_ssh.id)
self.power_on_decoy(decoy_ssh)
config = _get_breadcrumb_config(bc_ssh1)
login_str, private_key_path = _create_private_keys_from_config(config).next()
Popen(['ssh', '-o', 'UserKnownHostsFile=/dev/null', '-o',
'StrictHostKeyChecking=no', login_str, '-i', private_key_path,
'ping -c 10 localhost'])
wait_until(self._get_first_code_execution_alert, exc_list=[AlertNotFoundError])
return self._get_first_code_execution_alert()
def _test_download_alert_files(code_exec_alert):
image_file = '%s/image' % TEST_DEPLOYMENTS_FOLDER_PATH
code_exec_alert.download_image_file(image_file)
assert os.path.exists('%s.bin' % image_file)
mem_dump = '%s/mem_dump' % TEST_DEPLOYMENTS_FOLDER_PATH
code_exec_alert.download_memory_dump_file(mem_dump)
assert os.path.exists('%s.bin' % mem_dump)
netcap_file = '%s/netcap' % TEST_DEPLOYMENTS_FOLDER_PATH
code_exec_alert.download_network_capture_file(netcap_file)
assert os.path.exists('%s.pcap' % netcap_file)
stix_file = '%s/stix' % TEST_DEPLOYMENTS_FOLDER_PATH
code_exec_alert.download_stix_file(stix_file)
assert os.path.exists('%s.xml' % stix_file)
def _test_delete_single_alert(code_exec_alert):
code_exec_alert.delete()
with pytest.raises(ValidationError):
self.alerts.get_item(code_exec_alert.id)
def _test_export():
export_file = '%s/export' % TEST_DEPLOYMENTS_FOLDER_PATH
self.alerts.export(export_file)
assert os.path.exists('%s.csv' % export_file)
def _test_delete_filtered_alerts():
assert len(self.alerts) > 0
self.alerts.delete(delete_all_filtered=True)
assert len(self.alerts) == 0
code_alert = _create_code_exec_alert()
_test_download_alert_files(code_alert)
_test_export()
_test_delete_single_alert(code_alert)
_test_delete_filtered_alerts()
def _get_first_code_execution_alert(self):
alerts = list(self.alerts.filter(filter_enabled=True,
only_alerts=True,
alert_types=[CODE_EXECUTION_ALERT_TYPE]))
if not alerts:
raise AlertNotFoundError
return alerts[0]
def test_params(self):
assert isinstance(self.alerts.params(), dict)
class TestEntity(APITest):
def test_repr(self):
service = self.services.create(name=SSH_SERVICE_NAME, service_type="ssh", any_user="false")
str_service = "<Service: available_decoys=[] name=u'ssh_service' service_type_name=u'SSH' " \
"url=u'https://{serv}/api/v1.0/service/{service_id}/' " \
"is_active=False attached_decoys=[] any_user={any_user} is_delete_enabled=True " \
"service_type=u'ssh' id={service_id}>"\
.format(serv=self.mazerunner_ip_address, service_id=service.id, any_user=service.any_user)
assert str(service) == str_service
def test_get_attribute(self):
service = self.services.create(name=SSH_SERVICE_NAME, service_type="ssh", any_user="false")
assert service.name == SSH_SERVICE_NAME
with pytest.raises(AttributeError):
_ = service.no_such_attribute
unloaded_service = Service(self.client, {'id': service.id, 'url': service.url})
assert unloaded_service.name == SSH_SERVICE_NAME
with pytest.raises(AttributeError):
_ = unloaded_service.no_such_attribute
no_such_service_data = {
'id': service.id + 1,
'url': '%s%s/' % (self.client.api_urls['service'], service.id + 1)
}
no_such_service = Service(self.client, no_such_service_data)
with pytest.raises(ValidationError):
assert no_such_service.name == SSH_SERVICE_NAME
with pytest.raises(ValidationError):
_ = no_such_service.no_such_attribute
class TestBreadcrumb(APITest):
def test_crud(self):
breadcrumb_ssh = self.breadcrumbs.create(name=SSH_BREADCRUMB_NAME,
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass")
breadcrumb_ssh.update(name='renamed',
breadcrumb_type="ssh",
username="ssh_user",
password="ssh_pass")
assert self.breadcrumbs.get_item(breadcrumb_ssh.id).name == 'renamed'
class TestService(APITest):
def test_service_with_files(self):
site_data_file = os.path.join(os.path.dirname(__file__), 'test_site.zip')
assert len(self.client.services) == 0
self.services.create(name=SSH_BREADCRUMB_NAME,
service_type="http",
zip_file_path=site_data_file,
web_apps=['phpmyadmin'],
https_active=False)
assert len(self.client.services) == 1
class TestAlertPolicy(APITest):
def test_params(self):
assert isinstance(self.alert_policies.params(), dict)
def test_crud(self):
alert_policies = list(self.alert_policies)
assert len(alert_policies) > 0
assert all([isinstance(alert_policy, AlertPolicy) for alert_policy in alert_policies])
rdp_policies = [alert_policy
for alert_policy
in alert_policies
if alert_policy.alert_type == 'rdp']
assert len(rdp_policies) == 1
rdp_policy = rdp_policies[0]
assert rdp_policy.to_status == 1
assert self.alert_policies.get_item(rdp_policy.id).to_status == 1
rdp_policy.update_to_status(2)
assert self.alert_policies.get_item(rdp_policy.id).to_status == 2
self.alert_policies.reset_all_to_default()
assert self.alert_policies.get_item(rdp_policy.id).to_status == 1
class TestConnection(APITest):
@APITest.lab_dependent
def test_500(self):
with pytest.raises(ServerError):
self.client.api_request(url='http://the-internet.herokuapp.com/status_codes/500')
def test_cert(self):
# With cert
client = mazerunner.connect(
ip_address=self.mazerunner_ip_address,
api_key=self.api_key,
api_secret=self.api_secret,
certificate=self.mazerunner_certificate_path
)
assert len(client.deployment_groups) == 1
# Without cert
client = mazerunner.connect(
ip_address=self.mazerunner_ip_address,
api_key=self.api_key,
api_secret=self.api_secret,
certificate=None
)
assert len(client.deployment_groups) == 1
class TestEndpoints(APITest):
@APITest.lab_dependent
def test_deploy(self):
def _destroy_elements():
for cidr_mapping in self.cidr_mappings:
cidr_mapping.delete()
for ep in self.endpoints:
ep.delete()
def _are_all_tasks_complete():
return len(self.background_tasks) == 0
def _test_import_endpoint():
_destroy_elements()
assert len(self.endpoints) == 0
assert len(self.cidr_mappings) == 0
assert len(self.background_tasks) == 0
cidr_mapping = self.cidr_mappings.create(
cidr_block='%s/30' % self.lab_endpoint_ip,
deployment_group=1,
comments='no comments',
active=True
)
assert len(self.cidr_mappings) == 1
selected_cidr = list(self.cidr_mappings)[0]
assert selected_cidr.cidr_block == cidr_mapping.cidr_block
assert selected_cidr.deployment_group == cidr_mapping.deployment_group
assert selected_cidr.comments == cidr_mapping.comments
assert selected_cidr.active == cidr_mapping.active
cidr_mapping.generate_endpoints()
background_tasks = list(self.background_tasks)
assert len(background_tasks) == 1
wait_until(_are_all_tasks_complete, total_timeout=300)
assert len(self.cidr_mappings) > 0
assert len(self.endpoints) > 0
assert len(self.endpoints.filter(keywords='no.such.thing')) == 0
assert len(self.endpoints.filter(keywords=self.lab_endpoint_ip)) > 0
return list(self.endpoints.filter(keywords=self.lab_endpoint_ip))[0]
def _test_clean(ep):
with pytest.raises(InvalidInstallMethodError):
self.endpoints.filter(keywords=self.lab_endpoint_ip).clean_filtered(
install_method='invalid-install-method',
username=self.lab_endpoint_user,
password=self.lab_endpoint_password
)
self.endpoints.filter(keywords=self.lab_endpoint_ip).clean_filtered(
install_method='ZIP',
username=self.lab_endpoint_user,
password=self.lab_endpoint_password
)
self.endpoints.clean_by_endpoints_ids(
endpoints_ids=[ep.id],
install_method='ZIP',
username=self.lab_endpoint_user,
password=self.lab_endpoint_password
)
def _test_reassignment(ep):
dep_group = self.deployment_groups.create(name='ep1_test', description='test')
# Assign via collection
self.endpoints.reassign_to_group(dep_group, [ep])
assert self.endpoints.get_item(ep.id).deployment_group.id == dep_group.id
# Clear via collection
self.endpoints.clear_deployment_group([ep])
assert self.endpoints.get_item(ep.id).deployment_group is None
# Assign via entity
all_breadcrumbs_deployment_group = self.deployment_groups.get_item(
self.deployment_groups.ALL_BREADCRUMBS_DEPLOYMENT_GROUP_ID)
endpoint.reassign_to_group(all_breadcrumbs_deployment_group)
assert self.endpoints.get_item(ep.id).deployment_group.id == \
all_breadcrumbs_deployment_group.id
# Clear via entity
ep.clear_deployment_group()
assert self.endpoints.get_item(ep.id).deployment_group is None
# Eventually leave the endpoint with the new deployment group assigned
ep.reassign_to_group(dep_group)
assert self.endpoints.get_item(ep.id).deployment_group.id == dep_group.id
def _test_delete():
self.endpoints.filter('no.such.endpoints').delete_filtered()
assert len(self.endpoints.filter(self.lab_endpoint_ip)) > 0
self.endpoints.filter(self.lab_endpoint_ip).delete_filtered()
assert len(self.endpoints.filter(self.lab_endpoint_ip)) == 0
_test_import_endpoint()
endpoints = list(self.endpoints.filter(self.lab_endpoint_ip))
assert len(endpoints) > 0
self.endpoints.delete_by_endpoints_ids([curr_endpoint.id for curr_endpoint in endpoints])
assert len(self.endpoints.filter(self.lab_endpoint_ip)) == 0
def _test_data():
_test_import_endpoint()
csv_data = self.endpoints.export_filtered()
pseudo_csv_file = StringIO.StringIO(csv_data)
csv_data = csv.reader(pseudo_csv_file, delimiter=',')
assert any([
len(csv_line) >= 3 and csv_line[2] == self.lab_endpoint_ip
for csv_line
in csv_data
])
assert isinstance(self.endpoints.filter_data(), dict)
def _test_stop_import():
_destroy_elements()
assert len(self.background_tasks) == 0
self.cidr_mappings.create(
cidr_block='%s/24' % self.lab_endpoint_ip,
deployment_group=1,
comments='no comments',
active=True
)
self.cidr_mappings.generate_all_endpoints()
assert len(self.background_tasks) == 1
list(self.background_tasks)[0].stop()
assert len(self.background_tasks) == 0
assert len(self.background_tasks.filter(running=False)) > 0
self.background_tasks.acknowledge_all_complete()
assert len(self.background_tasks.filter(running=False)) == 0
endpoint = _test_import_endpoint()
_test_clean(endpoint)
_test_reassignment(endpoint)
_test_delete()
_test_data()
_test_stop_import()
def test_create_endpoint(self):
for params in [
dict(ip_address='1.1.1.1'),
dict(dns='endpoint_address.endpoint.local'),
dict(hostname='hostname'),
dict(dns='endpoint_address.endpoint.local', ip_address='1.1.1.1'),
]:
endpoint = self.endpoints.create(**params)
assert endpoint
for key, value in params.iteritems():
assert getattr(endpoint, key) == value
assert len(self.endpoints) == 1
endpoint.delete()
def test_create_endpoint_with_deployment_group(self):
ip_address = "1.1.1.1"
endpoint = self.endpoints.create(ip_address=ip_address, deployment_group_id=1)
assert endpoint.ip_address == ip_address
assert endpoint.deployment_group.name == "All Breadcrumbs"
endpoint.delete()
def test_create_invalid_endpoint(self):
for params, expected_error_message in [
(dict(ip_address='1.1.1.1.1'), "Enter a valid IPv4 address."),
(dict(dns='A'*256), "Maximum field length is 255 characters"),
(dict(hostname='A'*16), "Maximum field length is 15 characters"),
(dict(), "You must provide either dns, hostname, or ip address"),
]:
try:
self.endpoints.create(**params)
raise AssertionError, "Creation of the endpoint should raise an exception"
except ValidationError as e:
error = json.loads(e.message)
if params:
for key in params:
assert key in error
assert error[key] == [expected_error_message]
else:
assert error["non_field_errors"] == [expected_error_message]
class TestAuditLog(APITest):
@staticmethod
def _format_time(date_obj):
return date_obj.strftime(ISO_TIME_FORMAT)
def _test_time_based_queries(self):
today = datetime.datetime.now()
tomorrow = today + datetime.timedelta(days=1)
a_week_ago = today + datetime.timedelta(days=-7)
two_weeks_ago = today + datetime.timedelta(days=-14)
# check that today has data - start date
assert len(self.audit_log.filter(start_date=self._format_time(today))) != 0, \
"No data from today according to start date"
# and that tomorrow doesn't - start date
assert len(self.audit_log.filter(start_date=self._format_time(tomorrow))) == 0, \
"Data from tomorrow found!"
# check that today has data - end date
assert len(self.audit_log.filter(end_date=self._format_time(today))) != 0, \
"No data from today according to end date"
# and that last week doesn't - end date
assert len(self.audit_log.filter(end_date=self._format_time(a_week_ago))) == 0, \
"Data from a week ago found, even though we deleted everything!"
# test time range
assert len(self.audit_log.filter(start_date=self._format_time(a_week_ago),
end_date=self._format_time(today))) != 0, \
"No logs from the past week"
assert len(self.audit_log.filter(start_date=self._format_time(two_weeks_ago),
end_date=self._format_time(a_week_ago))) == 0, \
"Logs found from two weeks ago."
def _test_object_ids_queries(self, log_count):
# get obj ids from server
object_ids = [log_line._param_dict.get("object_ids") for log_line in self.audit_log]
# and extract them
object_ids = list(set([object_id[0] if object_id else None for object_id in object_ids]))
# and make sure you have enough obj ids
assert len(object_ids) >= 2, "No more than 1 object ID in the system"
# test that you don't get all the alerts when filtering
usable_object_id = [object_id for object_id in object_ids if object_id][0]
assert len(self.audit_log.filter(object_ids=usable_object_id)) != log_count, \
"Object ID filter returned the same amount of logs as the full filter"
def _test_username_queries(self, log_count):
user_id = self.client._auth.credentials['id']
# make sure that if the username is right you get data
assert len(self.audit_log.filter(username=[user_id])) != 0, "No logs found for the user"
# Note: we don't have more than one user in the tests, therefore we don't
# have a test that filters one user's info
# check that a bad username doesnt provide any data
bad_username = user_id * 2
assert len(self.audit_log.filter(username=[bad_username])) == 0, \
"Logs found for the (probably) nonexistent user {}".format(bad_username)
# test users not list ERR
with pytest.raises(BadParamError):
self.audit_log.filter(username=user_id)
def _test_category_queries(self, log_count):
# get categories from server
categories = list(set([log_line._param_dict.get("category") for log_line in self.audit_log]))
# and make sure you have enough
assert len(categories) >= 2, "No more than 1 category in the system"
# make sure the param is OK
assert len(self.audit_log.filter(category=[categories[0]])) != 0, \
"No logs for previously existing filter value"
# test that you don't get all the alerts when filtering
assert len(self.audit_log.filter(category=[categories[0]])) != log_count, \
"Filtered list returned the same amount of logs as the full filter"
# test categories not list ERR
with pytest.raises(BadParamError):
self.audit_log.filter(category=categories[0])
def _test_event_type_queries(self, log_count):
# get event_types from server
event_types = list(set([log_line._param_dict.get("event_type_label") for log_line in self.audit_log]))
# and make sure you have enough
assert len(event_types) >= 2, "No more than 1 event type in the system."
# make sure the param is OK
assert len(self.audit_log.filter(event_type=[event_types[0]])) != 0, \
"No logs for previously existing filter value"
# test that you don't get all the alerts when filtering
assert len(self.audit_log.filter(event_type=[event_types[0]])) != log_count, \
"Filtered list returned the same amount of logs as the full filter"
# test event type not list ERR
with pytest.raises(BadParamError):
self.audit_log.filter(event_type=event_types[0])
def test_audit_log_query(self):
# test delete (at the start for a clean log)
self.audit_log.delete()
logger.info("Audit log cleared")
# build all sorts of logs
decoy_ssh = self.create_decoy(dict(name=SSH_DECOY_NAME,
hostname="decoyssh",
os="Ubuntu_1404",
vm_type="KVM"))
# test query
log_count = self.audit_log
assert len(log_count) != 0, "No logs found"
assert type(list(self.audit_log)[0]) == AuditLogLine, "Invalid output"
self._test_time_based_queries()
self._test_object_ids_queries(log_count)
self._test_username_queries(log_count)
self._test_category_queries(log_count)
self._test_event_type_queries(log_count)
# test filter=False with params
assert len(self.audit_log.filter(event_type=["Delete"], filter_enabled=False)) == \
len(self.audit_log.filter(event_type=["Action"], filter_enabled=False)), \
"filter_enabled = False should make other filters redundant"
# test delete (again to make sure the log is actually cleaned)
self.audit_log.delete()
assert len(self.audit_log) != 0, "No delete log!"
assert len(self.audit_log) == 1, "Other logs found"
# once this issue is fixed we need to use the provided data from the decoy creation to actually filter items
@pytest.mark.xfail(reason="Item filtering is broken on server side")
def test_audit_log_item_filter(self):
# look for an item that doesnt exist
INVALID_ITEM_FILTER = "qweasdzxc"
assert len(self.audit_log.filter(item=INVALID_ITEM_FILTER)) == 0
|
|
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pytest
from pytz import (
FixedOffset,
timezone,
utc,
)
from neo4j.exceptions import CypherTypeError
from neo4j.time import (
Date,
DateTime,
Duration,
Time,
)
def test_native_date_input(cypher_eval):
from datetime import date
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.year, x.month, x.day]",
x=date(1976, 6, 13))
year, month, day = result
assert year == 1976
assert month == 6
assert day == 13
def test_date_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.year, x.month, x.day]",
x=Date(1976, 6, 13))
year, month, day = result
assert year == 1976
assert month == 6
assert day == 13
def test_date_array_input(cypher_eval):
data = [DateTime.now().date(), Date(1976, 6, 13)]
value = cypher_eval("CREATE (a {x:$x}) RETURN a.x", x=data)
assert value == data
def test_native_time_input(cypher_eval):
from datetime import time
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.hour, x.minute, x.second, x.nanosecond]",
x=time(12, 34, 56, 789012))
hour, minute, second, nanosecond = result
assert hour == 12
assert minute == 34
assert second == 56
assert nanosecond == 789012000
def test_whole_second_time_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.hour, x.minute, x.second]",
x=Time(12, 34, 56))
hour, minute, second = result
assert hour == 12
assert minute == 34
assert second == 56
def test_nanosecond_resolution_time_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.hour, x.minute, x.second, x.nanosecond]",
x=Time(12, 34, 56, 789012345))
hour, minute, second, nanosecond = result
assert hour == 12
assert minute == 34
assert second == 56
assert nanosecond == 789012345
def test_time_with_numeric_time_offset_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.hour, x.minute, x.second, "
" x.nanosecond, x.offset]",
x=Time(12, 34, 56, 789012345, tzinfo=FixedOffset(90)))
hour, minute, second, nanosecond, offset = result
assert hour == 12
assert minute == 34
assert second == 56
assert nanosecond == 789012345
assert offset == "+01:30"
def test_time_array_input(cypher_eval):
data = [Time(12, 34, 56), Time(10, 0, 0)]
value = cypher_eval("CREATE (a {x:$x}) RETURN a.x", x=data)
assert value == data
def test_native_datetime_input(cypher_eval):
from datetime import datetime
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.year, x.month, x.day, "
" x.hour, x.minute, x.second, x.nanosecond]",
x=datetime(1976, 6, 13, 12, 34, 56, 789012))
year, month, day, hour, minute, second, nanosecond = result
assert year == 1976
assert month == 6
assert day == 13
assert hour == 12
assert minute == 34
assert second == 56
assert nanosecond == 789012000
def test_whole_second_datetime_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.year, x.month, x.day, "
" x.hour, x.minute, x.second]",
x=DateTime(1976, 6, 13, 12, 34, 56))
year, month, day, hour, minute, second = result
assert year == 1976
assert month == 6
assert day == 13
assert hour == 12
assert minute == 34
assert second == 56
def test_nanosecond_resolution_datetime_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.year, x.month, x.day, "
" x.hour, x.minute, x.second, x.nanosecond]",
x=DateTime(1976, 6, 13, 12, 34, 56, 789012345))
year, month, day, hour, minute, second, nanosecond = result
assert year == 1976
assert month == 6
assert day == 13
assert hour == 12
assert minute == 34
assert second == 56
assert nanosecond == 789012345
def test_datetime_with_numeric_time_offset_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.year, x.month, x.day, "
" x.hour, x.minute, x.second, "
" x.nanosecond, x.offset]",
x=DateTime(1976, 6, 13, 12, 34, 56, 789012345,
tzinfo=FixedOffset(90)))
year, month, day, hour, minute, second, nanosecond, offset = result
assert year == 1976
assert month == 6
assert day == 13
assert hour == 12
assert minute == 34
assert second == 56
assert nanosecond == 789012345
assert offset == "+01:30"
def test_datetime_with_named_time_zone_input(cypher_eval):
dt = DateTime(1976, 6, 13, 12, 34, 56.789012345)
input_value = timezone("US/Pacific").localize(dt)
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.year, x.month, x.day, "
" x.hour, x.minute, x.second, "
" x.nanosecond, x.timezone]",
x=input_value)
year, month, day, hour, minute, second, nanosecond, tz = result
assert year == input_value.year
assert month == input_value.month
assert day == input_value.day
assert hour == input_value.hour
assert minute == input_value.minute
assert second == int(input_value.second)
assert nanosecond == int(1000000000 * input_value.second % 1000000000)
assert tz == input_value.tzinfo.zone
def test_datetime_array_input(cypher_eval):
data = [DateTime(2018, 4, 6, 13, 4, 42, 516120), DateTime(1976, 6, 13)]
value = cypher_eval("CREATE (a {x:$x}) RETURN a.x", x=data)
assert value == data
def test_duration_input(cypher_eval):
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.months, x.days, x.seconds, "
" x.microsecondsOfSecond]",
x=Duration(years=1, months=2, days=3, hours=4,
minutes=5, seconds=6.789012))
months, days, seconds, microseconds = result
assert months == 14
assert days == 3
assert seconds == 14706
assert microseconds == 789012
def test_duration_array_input(cypher_eval):
data = [Duration(1, 2, 3, 4, 5, 6), Duration(9, 8, 7, 6, 5, 4)]
value = cypher_eval("CREATE (a {x:$x}) RETURN a.x", x=data)
assert value == data
def test_timedelta_input(cypher_eval):
from datetime import timedelta
result = cypher_eval("CYPHER runtime=interpreted WITH $x AS x "
"RETURN [x.months, x.days, x.seconds, "
" x.microsecondsOfSecond]",
x=timedelta(days=3, hours=4, minutes=5,
seconds=6.789012))
months, days, seconds, microseconds = result
assert months == 0
assert days == 3
assert seconds == 14706
assert microseconds == 789012
def test_mixed_array_input(cypher_eval):
data = [Date(1976, 6, 13), Duration(9, 8, 7, 6, 5, 4)]
with pytest.raises(CypherTypeError):
_ = cypher_eval("CREATE (a {x:$x}) RETURN a.x", x=data)
def test_date_output(cypher_eval):
value = cypher_eval("RETURN date('1976-06-13')")
assert isinstance(value, Date)
assert value == Date(1976, 6, 13)
def test_whole_second_time_output(cypher_eval):
value = cypher_eval("RETURN time('12:34:56')")
assert isinstance(value, Time)
assert value == Time(12, 34, 56, tzinfo=FixedOffset(0))
def test_nanosecond_resolution_time_output(cypher_eval):
value = cypher_eval("RETURN time('12:34:56.789012345')")
assert isinstance(value, Time)
assert value == Time(12, 34, 56, 789012345, tzinfo=FixedOffset(0))
def test_time_with_numeric_time_offset_output(cypher_eval):
value = cypher_eval("RETURN time('12:34:56.789012345+0130')")
assert isinstance(value, Time)
assert value == Time(12, 34, 56, 789012345, tzinfo=FixedOffset(90))
def test_whole_second_localtime_output(cypher_eval):
value = cypher_eval("RETURN localtime('12:34:56')")
assert isinstance(value, Time)
assert value == Time(12, 34, 56)
def test_nanosecond_resolution_localtime_output(cypher_eval):
value = cypher_eval("RETURN localtime('12:34:56.789012345')")
assert isinstance(value, Time)
assert value == Time(12, 34, 56, 789012345)
def test_whole_second_datetime_output(cypher_eval):
value = cypher_eval("RETURN datetime('1976-06-13T12:34:56')")
assert isinstance(value, DateTime)
assert value == DateTime(1976, 6, 13, 12, 34, 56, tzinfo=utc)
def test_nanosecond_resolution_datetime_output(cypher_eval):
value = cypher_eval("RETURN datetime('1976-06-13T12:34:56.789012345')")
assert isinstance(value, DateTime)
assert value == DateTime(1976, 6, 13, 12, 34, 56, 789012345, tzinfo=utc)
def test_datetime_with_numeric_time_offset_output(cypher_eval):
value = cypher_eval("RETURN "
"datetime('1976-06-13T12:34:56.789012345+01:30')")
assert isinstance(value, DateTime)
assert value == DateTime(1976, 6, 13, 12, 34, 56, 789012345,
tzinfo=FixedOffset(90))
def test_datetime_with_named_time_zone_output(cypher_eval):
value = cypher_eval("RETURN datetime('1976-06-13T12:34:56.789012345"
"[Europe/London]')")
assert isinstance(value, DateTime)
dt = DateTime(1976, 6, 13, 12, 34, 56, 789012345)
assert value == timezone("Europe/London").localize(dt)
def test_whole_second_localdatetime_output(cypher_eval):
value = cypher_eval("RETURN localdatetime('1976-06-13T12:34:56')")
assert isinstance(value, DateTime)
assert value == DateTime(1976, 6, 13, 12, 34, 56)
def test_nanosecond_resolution_localdatetime_output(cypher_eval):
value = cypher_eval("RETURN "
"localdatetime('1976-06-13T12:34:56.789012345')")
assert isinstance(value, DateTime)
assert value == DateTime(1976, 6, 13, 12, 34, 56, 789012345)
def test_duration_output(cypher_eval):
value = cypher_eval("RETURN duration('P1Y2M3DT4H5M6.789S')")
assert isinstance(value, Duration)
assert value == Duration(years=1, months=2, days=3, hours=4,
minutes=5, seconds=6.789)
def test_nanosecond_resolution_duration_output(cypher_eval):
value = cypher_eval("RETURN duration('P1Y2M3DT4H5M6.789123456S')")
assert isinstance(value, Duration)
assert value == Duration(years=1, months=2, days=3, hours=4,
minutes=5, seconds=6, nanoseconds=789123456)
def test_datetime_parameter_case1(session):
# python -m pytest tests/integration/test_temporal_types.py -s -v -k test_datetime_parameter_case1
dt1 = session.run("RETURN datetime('2019-10-30T07:54:02.129790001+00:00')").single().value()
assert isinstance(dt1, DateTime)
dt2 = session.run("RETURN $date_time", date_time=dt1).single().value()
assert isinstance(dt2, DateTime)
assert dt1 == dt2
def test_datetime_parameter_case2(session):
# python -m pytest tests/integration/test_temporal_types.py -s -v -k test_datetime_parameter_case2
dt1 = session.run("RETURN datetime('2019-10-30T07:54:02.129790999[UTC]')").single().value()
assert isinstance(dt1, DateTime)
assert dt1.iso_format() == "2019-10-30T07:54:02.129790999+00:00"
dt2 = session.run("RETURN $date_time", date_time=dt1).single().value()
assert isinstance(dt2, DateTime)
assert dt1 == dt2
def test_datetime_parameter_case3(session):
# python -m pytest tests/integration/test_temporal_types.py -s -v -k test_datetime_parameter_case1
dt1 = session.run("RETURN datetime('2019-10-30T07:54:02.129790+00:00')").single().value()
assert isinstance(dt1, DateTime)
dt2 = session.run("RETURN $date_time", date_time=dt1).single().value()
assert isinstance(dt2, DateTime)
assert dt1 == dt2
def test_time_parameter_case1(session):
# python -m pytest tests/integration/test_temporal_types.py -s -v -k test_time_parameter_case1
t1 = session.run("RETURN time('07:54:02.129790001+00:00')").single().value()
assert isinstance(t1, Time)
t2 = session.run("RETURN $time", time=t1).single().value()
assert isinstance(t2, Time)
assert t1 == t2
def test_time_parameter_case2(session):
# python -m pytest tests/integration/test_temporal_types.py -s -v -k test_time_parameter_case2
t1 = session.run("RETURN time('07:54:02.129790999+00:00')").single().value()
assert isinstance(t1, Time)
assert t1.iso_format() == "07:54:02.129790999+00:00"
time_zone_delta = t1.utc_offset()
assert isinstance(time_zone_delta, datetime.timedelta)
assert time_zone_delta == datetime.timedelta(0)
t2 = session.run("RETURN $time", time=t1).single().value()
assert isinstance(t2, Time)
assert t1 == t2
def test_time_parameter_case3(session):
# python -m pytest tests/integration/test_temporal_types.py -s -v -k test_time_parameter_case3
t1 = session.run("RETURN time('07:54:02.129790+00:00')").single().value()
assert isinstance(t1, Time)
t2 = session.run("RETURN $time", time=t1).single().value()
assert isinstance(t2, Time)
assert t1 == t2
|
|
"""
quilt.Quilter
Object to stitch a page based on quilt
{: .lead}
1. use [quilt file](#quiltfile) to create quilt
2. replace all [patch files](#patchfile) by matching `patch#id` tags in quilt with a `patch/id.html` file
3. parses [page file](#pagefile) using the following format:
1. `key: value` page variable *header* (optional)
* key = `[A-Za-z0-9_-]+` until `:`, value = a string per line (mulitlines become array) until next key
* page variable section ends with first empty (whitespace-only) line
2. `html` or `markdown` page content
3. `<script>` page script (optional)
4. set [page variables](#pagevars), overriding default site variables
5. add page content to quilt (auto processing [`markdown` page](#pagefilemd) if file ends with `.md` or `.markdown`)
6. add page script to the end of quilt
7. replace all brace variables, `{ {.*}}`, in content with page or site variables
8. if page is under `posts/` directory, `tags` and `categories` variables are linked and appended to page content
9. fill in blank `alt` attributes for `<a>` and `<img>` tags
project : quilt
version : 0.1.1
status : development
modifydate : 2015-05-13 07:09:00 -0700
createdate : 2015-04-28 06:02:00 -0700
website : https://github.com/tmthydvnprt/quilt
author : tmthydvnprt
email : [email protected]
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2015, quilt
credits :
"""
import os
import bs4
import json
import copy
import time
import math
import shutil
from collections import defaultdict
from quilt.Constants import JS_HTML_PATTERN_RE, FIRST_KEY_RE, FIRST_EMPTY_LINE_RE, KEY_VALUE_RE, VALUE_RE, TRUE_RE
from quilt.Constants import PAGEVAR_RE, ESCAPED_PAGEVAR_RE
from quilt.Constants import PATCHCOMMENT, QUILTCOMMENT, PAGEOBJ, DOTSTAR_RE, PAGEVARS_TO_PRINT
from quilt.Util import write_file, relative_path, group_links, minimize_js, NO_EMPTY_TAGS
from quilt.Util import HEAD_STRAINER, BODY_STRAINER#, a_strainer, link_strainer, script_strainer, table_strainer, img_strainer
from quilt.Markdown import MD
DEBUG_FILE = ''
def add_suffix(filepath='', suffix=''):
"""add suffix to file name"""
dirname = os.path.dirname(filepath)
filename, ext = os.path.splitext(os.path.basename(filepath))
new_name = os.path.join(dirname, filename + suffix + ext)
print 'debuging:', new_name
return new_name
def parse_pagevars(var_str=''):
"""parse page var string"""
page_vars = defaultdict(list)
key = None
value = None
if var_str:
# parse key, value pairs from each line
for line in var_str.split('\n'):
key_value = KEY_VALUE_RE.match(line)
if key_value:
key = key_value.group('key').strip()
value = key_value.group('value').strip()
page_vars[key].append(value)
else:
another_value = VALUE_RE.match(line)
if another_value and key:
page_vars[key].append(another_value.group('value').strip())
# reduce singleton arrays to string
for key, value in page_vars.items():
if len(value) == 1:
page_vars[key] = value[0]
return page_vars
class Quilter(object):
"""page quilter object"""
#@profile
def __init__(self, page_file='', quilt='', patches=None, page='', config=None, overrides=None, wrap=''):
"""create quilter"""
# set settings
self.config = config
self.post = self.config["buildblog"] and os.path.join(os.path.basename(self.config["posts"]), "") in page_file
self.__do_markdown = page_file[-3:] == '.md'
self.__wrap = wrap or self.__do_markdown
# set pagevars, handling some special cases
self.pagevars = copy.deepcopy(self.config["page_defaults"])
self.pagevars.update({
"rootpath" : self.config["output"],
"relativepath" : relative_path(
page_file.replace(self.config["pages"], self.config["output"]).replace('.md', '.html'), self.config["output"]
),
"source" : page_file,
"output" : page_file.replace(self.config["pages"], self.config["output"]).replace('.md', '.html'),
"markdownlink" : os.path.basename(page_file),
"directory" : os.path.basename(os.path.dirname(page_file))
})
if self.config["local"]:
self.pagevars["url"] = self.pagevars["output"]
else:
self.pagevars["url"] = self.pagevars["output"].replace(self.pagevars["rootpath"], 'http://'+self.pagevars["domain"])
# update pagevars
if overrides:
self.pagevars.update(overrides)
self.pagevars["keywords"] = ','.join(self.pagevars["keywords"])
self.__do_debug = self.pagevars["output"] == DEBUG_FILE
# parse html and build soup
self.soup = bs4.BeautifulSoup(quilt, "lxml")
# build patches
self.patches = copy.deepcopy(patches)
# process page file
self.parse_page(page)
# keep track of processing time
self.start = time.time()
#@profile
def parse_page(self, page):
"""parses page into vars, html, and scripts7.487 s"""
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, '_original-page'), page.encode('utf-8'))
if FIRST_KEY_RE.match(page.split('\n', 1)[0]):
page_vars, page_html = FIRST_EMPTY_LINE_RE.split(page, 1)
else:
page_vars, page_html = None, page
page_js, page_html = JS_HTML_PATTERN_RE.match(page_html[::-1]).groups()
page_html = page_html[::-1]
page_js = page_js[::-1] if page_js else None
# update pagevars with page var json
if page_vars:
self.pagevars.update(parse_pagevars(page_vars))
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, 'parsed-page'), page_html.encode('utf-8'))
# handle markdown if necessary
if self.__do_markdown:
page_html_md = MD.reset().convert(page_html)
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, '_markdown-output'), page_html_md.encode('utf-8'))
page_html = page_html_md.replace("<code> ", "<code>").replace(" </code>", "</code>")
if self.__wrap and self.patches["markdown"]:
page_html = self.patches["markdown"].replace("{{markdown}}", page_html)
# set page html
self.patches["page"] = page_html
# append page script to quilt
if page_js:
self.patches["scripts"] = '%s\n%s' % (self.patches["scripts"], page_js)
# add page variables to object
if self.config["pageobject"]:
filtered_pagevars = {k:str(v) for k, v in self.pagevars.items() if k in PAGEVARS_TO_PRINT}
page_obj = json.dumps(filtered_pagevars, indent=4, separators=(',', ': '), sort_keys=True)
if self.config["minimizejs"]:
page_obj = minimize_js(page_obj)
self.patches["scripts"] = '%s\n%s' % (PAGEOBJ % (page_obj), self.patches["scripts"])
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, '_markdown-wrapped'), page_html.encode('utf-8'))
return self
#@profile
def replace_patches(self):
"""replace all patches in quilt with patch files"""
# replace head (special case of patch)
head = bs4.BeautifulSoup(self.patches["head"], "lxml", parse_only=HEAD_STRAINER).head
self.soup.html.head.replace_with(head)
if self.pagevars["patchcomment"]:
self.soup.html.insert(0, self.soup.new_string("quilted head patch", bs4.Comment))
self.soup.html.insert(0, '\n')
# replace all other patches, recursively
patch_tags = self.soup.find_all("patch")
while len(patch_tags) > 0:
for patch in patch_tags:
if patch["id"] in self.patches and self.patches[patch["id"]]:
if patch["id"] == "scripts":
patch_soup = bs4.BeautifulSoup(
self.patches[patch["id"]].encode('utf-8'),
"lxml",
parse_only=HEAD_STRAINER
)
else:
patch_soup = bs4.BeautifulSoup(
self.patches[patch["id"]].encode('utf-8'),
"lxml",
parse_only=BODY_STRAINER
)
if self.__do_debug:
write_file(
add_suffix(DEBUG_FILE, '_'+patch["id"]+'-html'),
self.patches[patch["id"]].encode('utf-8')
)
write_file(
add_suffix(DEBUG_FILE, '_'+patch["id"]+'-soup'),
patch_soup.encode('utf-8', formatter='html')
)
if patch["id"] == "scripts":
patch.append('\n')
patch.append(patch_soup.head)
patch.head.unwrap()
else:
patch.append('\n')
patch.append(patch_soup.body)
patch.body.unwrap()
# auto add patch id for replaced element id, unless already defined
patch.contents[1].attrs["id"] = patch.contents[1].attrs.get("id") or patch["id"]
# add patch comment if necessary
if self.pagevars["patchcomment"]:
patch.insert(0, self.soup.new_string(PATCHCOMMENT % (patch["id"]), bs4.Comment))
patch.insert(0, '\n')
patch.unwrap()
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, '_added-'+patch["id"]), self.soup.encode('utf-8', formatter='html'))
else:
patch.decompose()
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, 'replaced_patches'), self.soup.encode('utf-8', formatter='html'))
# check for new patches (see if patch had nested patches)
patch_tags = self.soup.find_all("patch")
return self
#@profile
def replace_variables(self):
"""replace {{}} page variables (re based replacement)"""
html = self.soup.encode('utf-8', formatter='html')
page_vars = list(set(PAGEVAR_RE.findall(html)))
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, 'replacing_vars'), html)
if self.post:
if 'featured' in self.pagevars.keys() and TRUE_RE.match(self.pagevars["featured"]):
self.pagevars["featured"] = True
else:
self.pagevars["featured"] = False
if self.pagevars["featured"]:
dirname = os.path.join(os.path.dirname(self.pagevars["url"]), 'featured', 'index.html')
self.pagevars["featured_label"] = '<a href="' + dirname + '" class="featured group-link">featured</a>'
else:
self.pagevars["featured_label"] = ''
self.pagevars["tag_list"] = group_links(self.pagevars, "tags")
self.pagevars["category_list"] = group_links(self.pagevars, "categories")
# replace all page_vars, recursively
while len(page_vars) > 0:
for page_var in page_vars:
pagevar_brace = "{{%s}}" % (page_var)
if page_var in self.pagevars:
variable = self.pagevars[page_var]
if isinstance(variable, list):
variable = ','.join(variable)
html = html.replace(pagevar_brace, unicode(variable).encode('utf8'))
else:
html = html.replace(pagevar_brace, "not found")
# check for new page variables (see if variable had nested variable)
page_vars = list(set(PAGEVAR_RE.findall(html)))
escaped_page_vars = list(set(ESCAPED_PAGEVAR_RE.findall(html)))
for escaped_page_var in escaped_page_vars:
html = html.replace("{ {%s}}" % (escaped_page_var), "{{%s}}" % (escaped_page_var))
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, 'replaced_vars'), html)
self.soup = bs4.BeautifulSoup(html, "lxml")
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, 'replaced_vars_soup'), self.soup.encode('utf-8', formatter='html'))
return self
#@profile
def add_page_comments(self):
"""create a page key value pair"""
etime = time.time() - self.start
if self.pagevars["pagecomment"]:
max_key_len = str(max([len(x) for x in self.pagevars.keys()])+1)
keyval_line = '{{:>{}}} : {{}}'.format(max_key_len)
keyvalpair = [keyval_line.format(k, v) for k, v in sorted(self.pagevars.items()) if k in PAGEVARS_TO_PRINT]
pagevar_comment = '\nquilt pagevars :\n %s' % ('\n '.join(keyvalpair))
else:
pagevar_comment = ''
if self.pagevars["quiltcomment"]:
quilt_comment = QUILTCOMMENT % (
'v{}, {}, {}'.format(self.pagevars["quiltversion"], self.pagevars["quiltbranch"], self.pagevars["quilthash"]),
self.pagevars["url"],
self.pagevars["date"],
self.pagevars["branch"],
self.pagevars["hash"],
math.floor(1000 * etime) / 1000,
pagevar_comment
)
else:
quilt_comment = pagevar_comment
self.soup.head.insert(0, self.soup.new_string(quilt_comment, bs4.Comment))
self.soup.head.insert(0, '\n')
return self
#@profile
def stitch(self):
"""generate the page"""
self.replace_patches()
self.replace_variables()
self.add_page_comments()
return self
#@profile
def remove_empty(self):
"""remove empty tags"""
for tag_name in NO_EMPTY_TAGS:
for tag in self.soup.body.findAll(tag_name):
if not tag.contents:
if tag.attrs.get("id") or tag.attrs.get("class"):
if self.config["emptywarning"]:
print 'warning: empty tag on page', self.pagevars["url"].replace(self.pagevars["domain"], '')
print 'id =', tag.attrs.get("id"), 'class =', tag.attrs.get("class")
else:
tag.decompose()
return self
#@profile
def clean_html(self):
"""clean html, post process html"""
# make sure doctype is set
doctype = [x for x in self.soup.contents if isinstance(x, bs4.Doctype)]
if not doctype:
self.soup.insert(0, bs4.Doctype('html'))
# make sure language is set
if "lang" not in self.soup.html:
self.soup.html["lang"] = "en"
# make sure certain metas are set
viewport_meta = self.soup.head.find("meta", attrs={"viewport": DOTSTAR_RE})
if not viewport_meta:
viewport_meta_tag = self.soup.new_tag('meta')
viewport_meta_tag["name"] = "viewport"
viewport_meta_tag["content"] = "width=device-width, initial-scale=1"
self.soup.head.insert(0, viewport_meta_tag)
self.soup.head.insert(0, '\n')
http_equiv_meta = self.soup.head.find("meta", attrs={"http-equiv": DOTSTAR_RE})
if not http_equiv_meta:
http_equiv_meta_tag = self.soup.new_tag('meta')
http_equiv_meta_tag["http-equiv"] = "X-UA-Compatible"
http_equiv_meta_tag["content"] = "ie=edge"
self.soup.head.insert(0, http_equiv_meta_tag)
self.soup.head.insert(0, '\n')
charset_meta = self.soup.head.find("meta", attrs={"charset": DOTSTAR_RE})
if not charset_meta:
charset_meta_tag = self.soup.new_tag('meta')
charset_meta_tag["charset"] = "utf-8"
self.soup.head.insert(0, charset_meta_tag)
self.soup.head.insert(0, '\n')
# make sure as have clean hrefs and alts/titles
for a_tag in self.soup.find_all("a"):
if "href" not in a_tag.attrs:
a_tag.attrs["href"] = "#"
if "alt" not in a_tag.attrs or a_tag.attrs["alt"] == "":
a_tag.attrs["alt"] = a_tag.get_text()
if "title" not in a_tag.attrs or a_tag.attrs["title"] == "":
a_tag.attrs["title"] = a_tag.get_text()
# make sure imgs have clear src and alt/tiles
for img_tag in self.soup.find_all("img"):
if "src" in img_tag.attrs:
if "alt" not in img_tag.attrs or img_tag.attrs["alt"] == "":
img_tag.attrs["alt"] = img_tag.attrs["src"]
if "title" not in img_tag.attrs or img_tag.attrs["title"] == "":
img_tag.attrs["title"] = img_tag.attrs["src"]
# make sure css links have href and proper rel and type
for link_tag in self.soup.find_all("link"):
if "href" not in link_tag.attrs:
link_tag.decompose()
elif link_tag.attrs["href"].endswith('.css'):
link_tag.attrs["rel"] = "stylesheet"
link_tag.attrs["type"] = "text/css"
# make sure js scripts have src and proper rel and type
for script_tag in self.soup.find_all("script"):
if len(script_tag.contents) == 0 and "src" not in script_tag.attrs:
script_tag.decompose()
else:
if "rel" not in script_tag.attrs:
script_tag.attrs["rel"] = "javascript"
if "type" not in script_tag.attrs:
script_tag.attrs["type"] = "text/javascript"
# add .table to <table>
for table in self.soup.find_all("table"):
table.attrs["class"] = ['table'] + table.attrs["class"] if "class" in table.attrs else 'table'
return self
#@profile
def write(self, pretty=False):
"""write it out"""
page_string = self.soup.prettify(formatter='html') if pretty else self.soup.encode('utf-8', formatter='html')
write_file(self.pagevars["output"], page_string)
if self.config["copymd"] and os.path.isfile(self.pagevars["source"]) and self.pagevars["source"].endswith(".md"):
shutil.copyfile(self.pagevars["source"], self.pagevars["output"].replace('.html', '.md'))
return self
|
|
# Copyright 2009 Shikhar Bhushan
# Copyright 2011 Leonidas Poulopoulos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is a thin layer of abstraction around the library. It exposes all core functionality."""
import capabilities
import operations
import transport
import logging
from ncclient.xml_ import *
logger = logging.getLogger('ncclient.manager')
OPERATIONS = {
"get": operations.Get,
"get_config": operations.GetConfig,
"dispatch": operations.Dispatch,
"edit_config": operations.EditConfig,
"copy_config": operations.CopyConfig,
"validate": operations.Validate,
"commit": operations.Commit,
"discard_changes": operations.DiscardChanges,
"delete_config": operations.DeleteConfig,
"lock": operations.Lock,
"unlock": operations.Unlock,
"close_session": operations.CloseSession,
"kill_session": operations.KillSession,
"poweroff_machine": operations.PoweroffMachine,
"reboot_machine": operations.RebootMachine,
}
"""Dictionary of base method names and corresponding :class:`~ncclient.operations.RPC` subclasses. It is used to lookup operations, e.g. `get_config` is mapped to :class:`~ncclient.operations.GetConfig`. It is thus possible to add additional operations to the :class:`Manager` API."""
VENDOR_OPERATIONS = {}
def make_device_handler(device_params):
"""
Create a device handler object that provides device specific parameters and
functions, which are called in various places throughout our code.
If no device_params are defined or the "name" in the parameter dict is not
known then a default handler will be returned.
"""
if device_params is None:
device_params = {}
device_name = device_params.get("name", "default")
# Attempt to import device handler class. All device handlers are
# in a module called "ncclient.devices.<devicename>" and in a class named
# "<devicename>DeviceHandler", with the first letter capitalized.
class_name = "%sDeviceHandler" % device_name.capitalize()
devices_module_name = "ncclient.devices.%s" % device_name
dev_module_obj = __import__(devices_module_name)
handler_module_obj = getattr(getattr(dev_module_obj, "devices"), device_name)
class_obj = getattr(handler_module_obj, class_name)
handler_obj = class_obj(device_params)
return handler_obj
def connect_ssh(*args, **kwds):
"""Initialize a :class:`Manager` over the SSH transport. For documentation of arguments see :meth:`ncclient.transport.SSHSession.connect`.
The underlying :class:`ncclient.transport.SSHSession` is created with :data:`CAPABILITIES`. It is first instructed to :meth:`~ncclient.transport.SSHSession.load_known_hosts` and then all the provided arguments are passed directly to its implementation of :meth:`~ncclient.transport.SSHSession.connect`.
To invoke advanced vendor related operation add device_params = {'name':'<vendor_alias>'} in connection paramerers. For the time, 'junos' and 'nxos' are supported for Juniper and Cisco Nexus respectively.
"""
# Extract device parameter dict, if it was passed into this function. Need to
# remove it from kwds, since the session.connect() doesn't like extra stuff in
# there.
if "device_params" in kwds:
device_params = kwds["device_params"]
del kwds["device_params"]
else:
device_params = None
device_handler = make_device_handler(device_params)
device_handler.add_additional_ssh_connect_params(kwds)
global VENDOR_OPERATIONS
VENDOR_OPERATIONS.update(device_handler.add_additional_operations())
session = transport.SSHSession(device_handler)
session.load_known_hosts()
session.connect(*args, **kwds)
return Manager(session, device_handler, **kwds)
connect = connect_ssh
"Same as :func:`connect_ssh`, since SSH is the default (and currently, the only) transport."
class OpExecutor(type):
def __new__(cls, name, bases, attrs):
def make_wrapper(op_cls):
def wrapper(self, *args, **kwds):
return self.execute(op_cls, *args, **kwds)
wrapper.func_doc = op_cls.request.func_doc
return wrapper
for op_name, op_cls in OPERATIONS.iteritems():
attrs[op_name] = make_wrapper(op_cls)
return super(OpExecutor, cls).__new__(cls, name, bases, attrs)
def __call__(cls, *args, **kwargs):
def make_wrapper(op_cls):
def wrapper(self, *args, **kwds):
return self.execute(op_cls, *args, **kwds)
wrapper.func_doc = op_cls.request.func_doc
return wrapper
if VENDOR_OPERATIONS:
for op_name, op_cls in VENDOR_OPERATIONS.iteritems():
setattr(cls, op_name, make_wrapper(op_cls))
return super(OpExecutor, cls).__call__(*args, **kwargs)
class Manager(object):
"""For details on the expected behavior of the operations and their parameters refer to :rfc:`4741`.
Manager instances are also context managers so you can use it like this::
with manager.connect("host") as m:
# do your stuff
... or like this::
m = manager.connect("host")
try:
# do your stuff
finally:
m.close_session()
"""
__metaclass__ = OpExecutor
def __init__(self, session, device_handler, timeout=30, *args, **kwargs):
self._session = session
self._async_mode = False
self._timeout = timeout
self._raise_mode = operations.RaiseMode.ALL
self._device_handler = device_handler
def __enter__(self):
return self
def __exit__(self, *args):
self.close_session()
return False
def __set_timeout(self, timeout):
self._timeout = timeout
def __set_async_mode(self, mode):
self._async_mode = mode
def __set_raise_mode(self, mode):
assert(mode in (operations.RaiseMode.NONE, operations.RaiseMode.ERRORS, operations.RaiseMode.ALL))
self._raise_mode = mode
def execute(self, cls, *args, **kwds):
return cls(self._session,
device_handler=self._device_handler,
async=self._async_mode,
timeout=self._timeout,
raise_mode=self._raise_mode).request(*args, **kwds)
def locked(self, target):
"""Returns a context manager for a lock on a datastore, where *target* is the name of the configuration datastore to lock, e.g.::
with m.locked("running"):
# do your stuff
... instead of::
m.lock("running")
try:
# do your stuff
finally:
m.unlock("running")
"""
return operations.LockContext(self._session, target)
def scp(self):
return self._session.scp()
def session(self):
raise NotImplementedError
def __getattr__(self, method):
"""Parse args/kwargs correctly in order to build XML element"""
def _missing(*args, **kwargs):
m = method.replace('_', '-')
root = new_ele(m)
if args:
for arg in args:
sub_ele(root, arg)
r = self.rpc(root)
return r
return _missing
@property
def client_capabilities(self):
""":class:`~ncclient.capabilities.Capabilities` object representing the client's capabilities."""
return self._session._client_capabilities
@property
def server_capabilities(self):
""":class:`~ncclient.capabilities.Capabilities` object representing the server's capabilities."""
return self._session._server_capabilities
@property
def channel_id(self):
return self._session._channel_id
@property
def channel_name(self):
return self._session._channel_name
@property
def session_id(self):
"""`session-id` assigned by the NETCONF server."""
return self._session.id
@property
def connected(self):
"""Whether currently connected to the NETCONF server."""
return self._session.connected
async_mode = property(fget=lambda self: self._async_mode, fset=__set_async_mode)
"""Specify whether operations are executed asynchronously (`True`) or synchronously (`False`) (the default)."""
timeout = property(fget=lambda self: self._timeout, fset=__set_timeout)
"""Specify the timeout for synchronous RPC requests."""
raise_mode = property(fget=lambda self: self._raise_mode, fset=__set_raise_mode)
"""Specify which errors are raised as :exc:`~ncclient.operations.RPCError` exceptions. Valid values are the constants defined in :class:`~ncclient.operations.RaiseMode`. The default value is :attr:`~ncclient.operations.RaiseMode.ALL`."""
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`tests` -- Utility methods for tests.
===================================
.. automodule:: utils
:platform: Unix
:synopsis: Tests for Nova.
"""
import subprocess
try:
EVENT_AVAILABLE = True
except ImportError:
EVENT_AVAILABLE = False
from proboscis.asserts import assert_true
from proboscis.asserts import Check
from proboscis.asserts import fail
from proboscis import SkipTest
from six.moves.urllib.parse import unquote
from sqlalchemy import create_engine
from troveclient.compat import Dbaas
from troveclient.compat import exceptions
from trove.common import cfg
from trove.common.utils import import_class
from trove.common.utils import import_object
from trove.tests.config import CONFIG as test_config
from trove.tests.util.client import TestClient as TestClient
from trove.tests.util import test_config as CONFIG
from trove.tests.util.users import Requirements
WHITE_BOX = test_config.white_box
CONF = cfg.CONF
def assert_http_code(expected_http_code, func, *args, **kwargs):
try:
rtn_value = func(*args, **kwargs)
assert_equal(
expected_http_code,
200,
"Expected the function to return http code %s but instead got "
"no error (code 200?)." % expected_http_code)
return rtn_value
except exceptions.ClientException as ce:
assert_equal(
expected_http_code,
ce.code,
"Expected the function to return http code %s but instead got "
"code %s." % (expected_http_code, ce.code))
def create_client(*args, **kwargs):
"""
Using the User Requirements as arguments, finds a user and grabs a new
DBAAS client.
"""
reqs = Requirements(*args, **kwargs)
user = test_config.users.find_user(reqs)
return create_dbaas_client(user)
def create_dbaas_client(user):
"""Creates a rich client for the Trove API using the test config."""
auth_strategy = None
kwargs = {
'service_type': 'database',
'insecure': test_config.values['trove_client_insecure'],
}
def set_optional(kwargs_name, test_conf_name):
value = test_config.values.get(test_conf_name, None)
if value is not None:
kwargs[kwargs_name] = value
force_url = 'override_trove_api_url' in test_config.values
service_url = test_config.get('override_trove_api_url', None)
if user.requirements.is_admin:
service_url = test_config.get('override_admin_trove_api_url',
service_url)
if service_url:
kwargs['service_url'] = service_url
auth_strategy = None
if user.requirements.is_admin:
auth_strategy = test_config.get('admin_auth_strategy',
test_config.auth_strategy)
else:
auth_strategy = test_config.auth_strategy
set_optional('region_name', 'trove_client_region_name')
if test_config.values.get('override_trove_api_url_append_tenant',
False):
kwargs['service_url'] += "/" + user.tenant
if auth_strategy == 'fake':
from troveclient.compat import auth
class FakeAuth(auth.Authenticator):
def authenticate(self):
class FakeCatalog(object):
def __init__(self, auth):
self.auth = auth
def get_public_url(self):
return "%s/%s" % (test_config.dbaas_url,
self.auth.tenant)
def get_token(self):
return self.auth.tenant
return FakeCatalog(self)
auth_strategy = FakeAuth
if auth_strategy:
kwargs['auth_strategy'] = auth_strategy
if not user.requirements.is_admin:
auth_url = test_config.trove_auth_url
else:
auth_url = test_config.values.get('trove_admin_auth_url',
test_config.trove_auth_url)
if test_config.values.get('trove_client_cls'):
cls_name = test_config.trove_client_cls
kwargs['client_cls'] = import_class(cls_name)
dbaas = Dbaas(user.auth_user, user.auth_key, tenant=user.tenant,
auth_url=auth_url, **kwargs)
dbaas.authenticate()
with Check() as check:
check.is_not_none(dbaas.client.auth_token, "Auth token not set!")
if not force_url and user.requirements.is_admin:
expected_prefix = test_config.dbaas_url
actual = dbaas.client.service_url
msg = "Dbaas management url was expected to start with %s, but " \
"was %s." % (expected_prefix, actual)
check.true(actual.startswith(expected_prefix), msg)
return TestClient(dbaas)
def create_nova_client(user, service_type=None):
"""Creates a rich client for the Nova API using the test config."""
if test_config.nova_client is None:
raise SkipTest("No nova_client info specified in the Test Config "
"so this test will be skipped.")
from novaclient.client import Client
if not service_type:
service_type = test_config.nova_client['nova_service_type']
openstack = Client(CONF.nova_client_version,
user.auth_user,
user.auth_key,
project_name=user.tenant,
auth_url=test_config.nova_client['auth_url'],
service_type=service_type, no_cache=True,
cacert=test_config.values.get('cacert', None))
openstack.authenticate()
return TestClient(openstack)
def dns_checker(mgmt_instance):
"""Given a MGMT instance, ensures DNS provisioning worked.
Uses a helper class which, given a mgmt instance (returned by the mgmt
API) can confirm that the DNS record provisioned correctly.
"""
if CONFIG.values.get('trove_dns_checker') is not None:
checker = import_class(CONFIG.trove_dns_checker)
checker()(mgmt_instance)
else:
raise SkipTest("Can't access DNS system to check if DNS provisioned.")
def process(cmd):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = process.communicate()
return result
def string_in_list(str, substr_list):
"""Returns True if the string appears in the list."""
return any([str.find(x) >= 0 for x in substr_list])
def unquote_user_host(user_hostname):
unquoted = unquote(user_hostname)
if '@' not in unquoted:
return unquoted, '%'
if unquoted.endswith('@'):
return unquoted, '%'
splitup = unquoted.split('@')
host = splitup[-1]
user = '@'.join(splitup[:-1])
return user, host
def iso_time(time_string):
"""Return a iso formated datetime: 2013-04-15T19:50:23Z."""
ts = time_string.replace(' ', 'T')
try:
micro = ts.rindex('.')
ts = ts[:micro]
except ValueError:
pass
return '%sZ' % ts
def assert_contains(exception_message, substrings):
for substring in substrings:
assert_true(substring in exception_message,
message="'%s' not in '%s'"
% (substring, exception_message))
# TODO(dukhlov): Still required by trove integration
# Should be removed after trove integration fix
# https://bugs.launchpad.net/trove-integration/+bug/1228306
# TODO(cp16net): DO NOT USE needs to be removed
def mysql_connection():
cls = CONFIG.get('mysql_connection',
"local.MySqlConnection")
if cls == "local.MySqlConnection":
return MySqlConnection()
return import_object(cls)()
class MySqlConnection(object):
def assert_fails(self, ip, user_name, password):
from trove.tests.util import mysql
try:
with mysql.create_mysql_connection(ip, user_name, password):
pass
fail("Should have failed to connect: mysql --host %s -u %s -p%s"
% (ip, user_name, password))
except mysql.MySqlPermissionsFailure:
return # Good, this is what we wanted.
except mysql.MySqlConnectionFailure as mcf:
fail("Expected to see permissions failure. Instead got message:"
"%s" % mcf.message)
def create(self, ip, user_name, password):
from trove.tests.util import mysql
return mysql.create_mysql_connection(ip, user_name, password)
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions."""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except Exception:
self.trans.rollback()
self.trans = None
raise
@staticmethod
def init_engine(user, password, host):
return create_engine("mysql+pymysql://%s:%s@%s:3306" %
(user, password, host),
pool_recycle=1800, echo=True)
|
|
import asyncio
import logging
from collections import OrderedDict
from typing import Dict, Union, Any
from applebot.exceptions import EventNotFoundError
log = logging.getLogger(__name__)
class EventManager(object):
def __init__(self):
self._events = {} # type: Dict[str, Event]
self._event_type = Event
def __contains__(self, event):
return str(event) in self._events
def __getitem__(self, key) -> 'Event':
return self.get(key)
def __iter__(self) -> 'Event':
for event in self._events.values():
yield event
def __len__(self):
return len(self._events)
def get(self, event, default=None) -> 'Event':
"""Get an event from the manager."""
return self._events.get(str(event), default)
def add(self, event, handler=None, call_limit=None) -> Union['Event', 'EventHandler']:
"""Add a new or existing event or handler to the event manager."""
if handler is not None:
return self.add_handler(event, handler, call_limit)
return self.add_event(event)
async def emit(self, event, *args, **kwargs):
"""Emit an event and call its registered handlers."""
await self.get(event).emit(*args, **kwargs)
def add_event(self, event) -> 'Event':
"""Add a new or existing event to the event manager."""
if not isinstance(event, self._event_type) and not isinstance(event, str):
raise TypeError('Parameter \'event\' must be of type Event or str')
if event in self:
if isinstance(event, self._event_type):
return self.get(event).combine(event)
return self.get(event)
self._events[str(event)] = event if isinstance(event, self._event_type) else self._event_type(event)
return self.get(event)
def add_handler(self, event, handler, call_limit=None) -> 'EventHandler':
"""Add a new or existing handler to a new or existing event."""
if event not in self:
raise EventNotFoundError('Event \'{0}\' doesn\'t exist or hasn\'t been registered to this EventManager.'.format(event))
return self.get(event).add(handler, call_limit)
class Event(object):
def __init__(self, name):
self.name = name # type: str
self.enabled = True # type: bool
self._handlers = OrderedDict() # type: OrderedDict[str, EventHandler]
self._handler_type = EventHandler
self._combined_type = CombinedEvent
def __str__(self):
return self.name
def __contains__(self, handler):
return hash(handler) in self._handlers
def __getitem__(self, key) -> 'EventHandler':
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
def __delitem__(self, key):
return self.remove(key)
def __iter__(self) -> 'EventHandler':
for handler in self._handlers.values():
yield handler
def __len__(self):
return len(self._handlers)
def __hash__(self):
return hash(self.name)
async def emit(self, *args, **kwargs):
"""Emit and call the handlers of the event."""
if self.enabled and len(self):
log.debug('Emitting event: {}'.format(self.name))
for handler in self:
await handler.call(*args, **kwargs)
def get(self, handler, default=None) -> 'EventHandler':
"""Get a handler from the event."""
return self._handlers.get(hash(handler), default)
def set(self, key, handler):
"""Set or remove a handler from the event."""
if handler is None:
return self.remove(key)
if hash(key) != hash(handler):
raise ValueError('The key must match the assigned handler.')
return self.add(handler)
def add(self, handler, call_limit=None) -> 'EventHandler':
"""Add a handler to the event."""
if not isinstance(handler, self._handler_type) and not callable(handler):
raise TypeError('Parameter \'handler\' must be callable or of type EventHandler')
if handler not in self:
self._handlers[hash(handler)] = handler if isinstance(handler, self._handler_type) else self._handler_type(handler)
self.get(handler).call_limit = call_limit
return self.get(handler)
def remove(self, handler):
"""Remove a handler from the event."""
self._handlers[handler] = None
def clear(self):
"""Remove all the handlers from the event."""
self._handlers.clear()
def enable(self, enabled=True):
"""Enable or set enabled to value."""
self.enabled = enabled is True
def disable(self):
"""Disable the event."""
self.enabled = False
def combine(self, other) -> 'Event':
"""Combine with another event and merge handler into a single list."""
if other is not self:
self._combined_type(self, other)
return self
class CombinedEvent(Event):
def __init__(self, event, *others):
super().__init__(event)
self.name = event.name # type: str
for event in others:
self._absorb(event)
def _absorb(self, event):
self._handlers.update(event._handlers)
event._handlers = self
def items(self, *args, **kwargs):
return self._handlers.items(*args, **kwargs)
def keys(self, *args, **kwargs):
return self._handlers.keys(*args, **kwargs)
def values(self, *args, **kwargs):
return self._handlers.values(*args, **kwargs)
def update(self, *args, **kwargs):
return self._handlers.update(*args, **kwargs)
def pop(self, *args, **kwargs):
return self._handlers.pop(*args, **kwargs)
class EventHandler(object):
def __init__(self, handler, call_limit=None):
self._handler = None # type: asyncio.coroutine
self._enabled = True # type: bool
self.call_limit = call_limit # type: int
self.handler = handler # type: asyncio.coroutine
def __hash__(self):
"""Get a hash by hashing the handler."""
return hash(self._handler)
async def call(self, *args, **kwargs) -> Any:
"""Call the handler."""
if self.enabled:
if self.call_limit:
self.call_limit -= 1
return await self.handler(*args, **kwargs)
return None
def limit(self, limit=1):
"""Set a limit for the amount of times this handler will be called."""
self.call_limit = int(limit)
def enable(self, value=True):
"""Enable or set enabled to value."""
self.enabled = value
def disable(self):
"""Disable the handler."""
self.enabled = False
@property
def enabled(self) -> bool:
"""Get enabled status."""
return self._enabled and (self.call_limit is None or self.call_limit > 0)
@enabled.setter
def enabled(self, enabled):
"""Set enabled status."""
self._enabled = bool(enabled)
@property
def handler(self) -> asyncio.coroutine:
"""Get handler."""
return self._handler
@handler.setter
def handler(self, handler):
"""Set handler."""
if not callable(handler):
raise TypeError('Parameter \'handler\' must be callable')
self._handler = handler
|
|
import time
import json
import re
import traceback
import requests
import threading
from slacksocket import SlackSocket
from slackclient import SlackClient
from block_io import BlockIo
version = 2 # API version
from key_pin import *
block_io_doge = BlockIo(blockio_api_doge_key, blockio_secret_pin, version)
block_io_btc = BlockIo(blockio_api_btc_key, blockio_secret_pin, version)
block_io_ltc = BlockIo(blockio_api_ltc_key, blockio_secret_pin, version)
ss = SlackSocket(slack_token,translate=False) # translate will lookup and replace user and channel IDs with their human-readable names. default true.
sc = SlackClient(slack_token)
url = 'https://shapeshift.io/shift'
coincap_doge = 'http://www.coincap.io/page/DOGE'
coincap_btc = 'http://www.coincap.io/page/BTC'
coincap_ltc = 'http://www.coincap.io/page/LTC'
cryptocomp_doge = 'https://www.cryptocompare.com/api/data/price?fsym=DOGE&tsyms=USD'
cryptocomp_btc = 'https://www.cryptocompare.com/api/data/price?fsym=BTC&tsyms=USD'
cryptocomp_ltc = 'https://www.cryptocompare.com/api/data/price?fsym=LTC&tsyms=USD'
shapeshift_pubkey = "06c04cfc9f18632d50ca546ba4f3dc49edcaf6217e3cefe73ed98d92cc2f37e764df8371fc3d23847aee4a4d65bdaa2defd30ca43311378827a94146feb017cb"
min_amount = {'doge': 2.0, 'ltc': 0.002, 'btc': 0.0002}
def main():
time.sleep(1)
for event in ss.events():
j = json.loads(event.json)
if j['type'] != 'message':
continue
if '!tipbot' not in j['text']:
continue
print(j['text'])
# user name/id lookups
id2name = {}
name2id = {}
try:
users = sc.api_call("users.list")
for user in users['members']:
id2name[user['id']] = user['name']
name2id[user['name']] = user['id']
except:
print('failed to build user lookups')
continue
# split message and find '!tipbot'
splitmessage = j['text'].split()
print(splitmessage)
tipindex = 0
for i in range(0, len(splitmessage), 1):
if splitmessage[i] == '!tipbot':
tipindex = i
break
try:
command = splitmessage[tipindex + 1]
except:
continue
# !tipbot tip
if command == 'tip':
if len(splitmessage) < (tipindex + 4):
continue
coin = splitmessage[tipindex + 3]
if coin not in min_amount.keys():
print('unknown coin ='+coin)
continue
if splitmessage[tipindex + 2] != 'all':
try:
amount = float(splitmessage[tipindex + 2])
except:
print('amount not float ='+splitmessage[tipindex + 2])
continue
if amount < min_amount[coin]:
print('amount too low ='+splitmessage[tipindex + 2])
continue
# get list of valid users from command
users = []
accounts = block_io_doge.get_my_addresses()
reg_users = []
for g in range(0,len(accounts['data']['addresses']),1):
try:
reg_users.append(accounts['data']['addresses'][g]['label'])
except:
continue
for i in range(tipindex + 4, len(splitmessage), 1):
if splitmessage[i] in name2id.keys():
users.append(splitmessage[i]);
if name2id[splitmessage[i]] not in reg_users:
print(sc.api_call("chat.postMessage", channel=j['channel'], text=splitmessage[i]+' is not registered. Please !tipbot register ', username='pybot', icon_emoji=':robot_face:'))
# build api strings
tousers = str(','.join(name2id[user] for user in users))
toreadable = str(','.join(users))
if splitmessage[tipindex + 2] != 'all':
toeach = str(','.join(str(amount) for user in users))
print(id2name[j['user']]+' ('+j['user']+') tipped '+str(amount)+' '+coin+' to '+toreadable+' ('+tousers+')')
if coin == 'doge' and splitmessage[tipindex + 2] == 'all':
try:
balance_doge = block_io_doge.get_address_balance(labels=j['user'])
print(balance_doge['data']['available_balance'])
fee = block_io_doge.get_network_fee_estimate(amounts=balance_doge['data']['available_balance'], to_labels=tousers, priority='low')
print(fee)
balance_minus_fee = float(balance_doge['data']['available_balance']) - float(fee['data']['estimated_network_fee'])
print(balance_minus_fee)
block_io_doge.withdraw_from_labels(amounts=balance_minus_fee, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(balance_minus_fee)+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_doge.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(splitexc[n])+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to tip all doge')
continue
elif coin == 'doge':
try:
block_io_doge.withdraw_from_labels(amounts=toeach, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+toreadable+' '+str(amount)+' '+coin+'! :moon:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_doge.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(splitexc[n])+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to tip all doge')
continue
elif coin == 'ltc' and splitmessage[tipindex + 2] == 'all':
try:
balance_ltc = block_io_ltc.get_address_balance(labels=j['user'])
print(balance_ltc['data']['available_balance'])
fee = block_io_ltc.get_network_fee_estimate(amounts=balance_ltc['data']['available_balance'], to_labels=tousers, priority='low')
print(fee)
balance_minus_fee = float(balance_ltc['data']['available_balance']) - float(fee['data']['estimated_network_fee'])
print(balance_minus_fee)
block_io_ltc.withdraw_from_labels(amounts=balance_minus_fee, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(splitexc[n])+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_ltc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(splitexc[n])+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to tip all ltc')
continue
elif coin == 'ltc':
try:
block_io_ltc.withdraw_from_labels(amounts=toeach, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+toreadable+' '+str(amount)+' '+coin+'! :moon:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_ltc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(splitexc[n])+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to tip all ltc')
continue
elif coin == 'btc' and splitmessage[tipindex + 2] == 'all':
try:
balance_btc = block_io_btc.get_address_balance(labels=j['user'])
print(balance_btc['data']['available_balance'])
fee = block_io_btc.get_network_fee_estimate(amounts=balance_btc['data']['available_balance'], to_labels=tousers, priority='low')
print(fee)
balance_minus_fee = float(balance_btc['data']['available_balance']) - float(fee['data']['estimated_network_fee'])
print(balance_minus_fee)
block_io_btc.withdraw_from_labels(amounts=balance_minus_fee, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped all ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_btc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(splitexc[n])+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to tip all btc')
continue
elif coin == 'btc':
try:
block_io_btc.withdraw_from_labels(amounts=toeach, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+toreadable+' '+str(amount)+' '+coin+'! :moon:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_btc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+str(splitexc[n])+' ' +coin+' to '+toreadable+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to tip all btc')
continue
# !tipbot make it rain
elif command == 'make':
if len(splitmessage) < (tipindex + 5):
continue
if splitmessage[tipindex + 2] != 'it' or splitmessage[tipindex + 3] != 'rain':
continue
coin = splitmessage[tipindex + 5]
if coin not in min_amount.keys():
print('unknown coin ='+coin)
continue
try:
amount = float(splitmessage[tipindex + 4])
except:
print('amount not float ='+splitmessage[tipindex + 4])
continue
if amount < min_amount[coin]:
print('amount too low ='+splitmessage[tipindex + 4])
continue
if coin == 'doge':
try:
addresses = block_io_doge.get_my_addresses()
users = []
for user in addresses['data']['addresses']:
if user['label'] in id2name.keys() and user['label'] != j['user']:
users.append(user['label'])
if len(splitmessage) > 6 and splitmessage[tipindex + 6] == 'online':
try:
user_on_list = sc.api_call("users.list", presence='1')
for o in range(0,99,1):
try:
if user_on_list['members'][o]['presence'] == 'away':
try:
users.remove(user_on_list['members'][o]['id'])
except:
continue
except:
continue
except:
continue
amounteach = amount / len(users)
if amounteach < min_amount[coin]:
print('amounteach too small ='+amounteach)
continue
tousers = str(','.join(user for user in users))
toreadable = str(','.join(id2name[user] for user in users))
toeach = str(','.join('%.8f'%amounteach for user in users))
print(id2name[j['user']]+' ('+j['user']+') made it rain on '+toreadable+' ('+tousers+') '+str(amount)+' ('+'%.8f' % amounteach+' each)');
block_io_doge.withdraw_from_labels(amounts=toeach, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+toreadable+' '+'%.8f' % amounteach+' '+coin+'! :moon:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to make it rain doge')
continue
elif coin == 'ltc':
try:
addresses = block_io_ltc.get_my_addresses()
users = []
for user in addresses['data']['addresses']:
if user['label'] in id2name.keys() and user['label'] != j['user']:
users.append(user['label'])
if len(splitmessage) > 6 and splitmessage[tipindex + 6] == 'online':
try:
user_on_list = sc.api_call("users.list", presence='1')
for o in range(0,99,1):
try:
if user_on_list['members'][o]['presence'] == 'away':
try:
users.remove(user_on_list['members'][o]['id'])
except:
continue
except:
continue
except:
continue
amounteach = amount / len(users)
if amounteach < min_amount[coin]:
print('amounteach too small ='+amounteach)
continue
tousers = str(','.join(user for user in users))
toreadable = str(','.join(id2name[user] for user in users))
toeach = str(','.join('%.8f'%amounteach for user in users))
print(id2name[j['user']]+' ('+j['user']+') made it rain on '+toreadable+' ('+tousers+') '+str(amount)+' ('+'%.8f' % amounteach+' each)');
block_io_ltc.withdraw_from_labels(amounts=toeach, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+toreadable+' '+'%.8f' % amounteach+' '+coin+'! :moon:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to make it rain ltc')
continue
elif coin == 'btc':
try:
addresses = block_io_btc.get_my_addresses()
users = []
for user in addresses['data']['addresses']:
if user['label'] in id2name.keys() and user['label'] != j['user']:
users.append(user['label'])
if len(splitmessage) > 6 and splitmessage[tipindex + 6] == 'online':
try:
user_on_list = sc.api_call("users.list", presence='1')
for o in range(0,99,1):
try:
if user_on_list['members'][o]['presence'] == 'away':
try:
users.remove(user_on_list['members'][o]['id'])
except:
continue
except:
continue
except:
continue
amounteach = amount / len(users)
if amounteach < min_amount[coin]:
print('amounteach too small ='+amounteach)
continue
tousers = str(','.join(user for user in users))
toreadable = str(','.join(id2name[user] for user in users))
toeach = str(','.join('%.8f'%amounteach for user in users))
print(id2name[j['user']]+' ('+j['user']+') made it rain on '+toreadable+' ('+tousers+') '+str(amount)+' ('+'%.8f' % amounteach+' each)');
block_io_btc.withdraw_from_labels(amounts=toeach, from_labels=j['user'], to_labels=tousers, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' tipped '+toreadable+' '+'%.8f' % amounteach+' '+coin+'! :moon:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to make it rain btc')
continue
# !tipbot withdraw
elif command == 'withdraw':
if len(splitmessage) < (tipindex + 4):
continue
amount = splitmessage[tipindex + 2]
coin = splitmessage[tipindex + 3]
address = splitmessage[tipindex + 4]
if coin not in min_amount.keys():
print('unknown coin ='+coin)
continue
print(id2name[j['user']]+' ('+j['user']+') withdraws '+amount+' '+coin+' to '+address)
if coin == 'doge' and amount == 'all':
try:
balance_doge = block_io_doge.get_address_balance(labels=j['user'])
print(balance_doge['data']['available_balance'])
fee = block_io_doge.get_network_fee_estimate(amounts=balance_doge['data']['available_balance'], to_addresses=address, priority='low')
print(fee)
balance_minus_fee = float(balance_doge['data']['available_balance']) - float(fee['data']['estimated_network_fee'])
print(balance_minus_fee)
block_io_doge.withdraw_from_labels(amounts=balance_minus_fee, from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_doge.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to withdraw doge')
continue
elif coin == 'doge':
try:
block_io_doge.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_doge.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(splitexc[n])+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to withdraw doge')
continue
elif coin == 'ltc' and amount == 'all':
try:
balance_ltc = block_io_ltc.get_address_balance(labels=j['user'])
print(balance_ltc['data']['available_balance'])
fee = block_io_ltc.get_network_fee_estimate(amounts=balance_ltc['data']['available_balance'], to_addresses=address, priority='low')
print(fee)
balance_minus_fee = float(balance_ltc['data']['available_balance']) - float(fee['data']['estimated_network_fee'])
print(balance_minus_fee)
block_io_ltc.withdraw_from_labels(amounts=balance_minus_fee, from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_ltc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to withdraw ltc')
continue
elif coin == 'ltc':
try:
block_io_ltc.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_ltc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(splitexc[n])+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to withdraw ltc')
continue
elif coin == 'btc' and amount == 'all':
try:
balance_btc = block_io_btc.get_address_balance(labels=j['user'])
print(balance_btc['data']['available_balance'])
fee = block_io_btc.get_network_fee_estimate(amounts=balance_btc['data']['available_balance'], to_addresses=address, priority='low')
print(fee)
balance_minus_fee = float(balance_btc['data']['available_balance']) - float(fee['data']['estimated_network_fee'])
print(balance_minus_fee)
block_io_btc.withdraw_from_labels(amounts=balance_minus_fee, from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_btc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to withdraw btc')
continue
elif coin == 'btc':
try:
block_io_btc.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(amount)+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_ltc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=address, priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' withdrew '+str(splitexc[n])+' '+coin+' to '+address+'! :+1:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to withdraw btc')
continue
# tipbot shift
elif command == 'shift':
if len(splitmessage) < (tipindex + 3):
continue
amount = splitmessage[tipindex + 2]
coin = splitmessage[tipindex + 3]
pairs = set(['btc_ltc', 'btc_doge', 'ltc_btc', 'ltc_doge', 'doge_btc', 'doge_ltc'])
if coin not in pairs:
print('unknown coin ='+coin)
continue
print(id2name[j['user']]+' ('+j['user']+') shifted '+amount+' '+coin)
if coin == 'btc_ltc':
try:
address_btc = block_io_btc.get_address_by_label(label=j['user'])
address_ltc = block_io_ltc.get_address_by_label(label=j['user'])
payload = {"withdrawal":address_ltc['data']['address'], "pair":"btc_ltc", "returnAddress":address_btc['data']['address'], "apiKey":shapeshift_pubkey}
print(payload)
try:
r = requests.post(url, data=payload)
response = r.text
jresponse = json.loads(response)
print(jresponse)
except:
traceback.print_exc()
print('failed generate shapeshift transaction')
continue
amount = float(''.join(ele for ele in splitmessage[tipindex + 2] if ele.isdigit() or ele == '.'))
print(amount)
block_io_btc.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(amount)+' btc to ltc! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_btc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(splitexc[n])+' btc to ltc :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to shift')
continue
elif coin == 'btc_doge':
try:
address_btc = block_io_btc.get_address_by_label(label=j['user'])
address_doge = block_io_doge.get_address_by_label(label=j['user'])
payload = {"withdrawal":address_doge['data']['address'], "pair":"btc_doge", "returnAddress":address_btc['data']['address'], "apiKey":shapeshift_pubkey}
print(payload)
try:
r = requests.post(url, data=payload)
response = r.text
jresponse = json.loads(response)
print(jresponse)
except:
traceback.print_exc()
print('failed generate shapeshift transaction')
continue
amount = float(''.join(ele for ele in splitmessage[tipindex + 2] if ele.isdigit() or ele == '.'))
print(amount)
block_io_btc.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(amount)+' btc to doge! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_btc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(splitexc[n])+' btc to doge! :unicorn_face', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to shift')
elif coin == 'ltc_doge':
try:
address_ltc = block_io_ltc.get_address_by_label(label=j['user'])
address_doge = block_io_doge.get_address_by_label(label=j['user'])
payload = {"withdrawal":address_doge['data']['address'], "pair":"ltc_doge", "returnAddress":address_ltc['data']['address'], "apiKey":shapeshift_pubkey}
print(payload)
try:
r = requests.post(url, data=payload)
response = r.text
jresponse = json.loads(response)
print(jresponse)
except:
traceback.print_exc()
print('failed generate shapeshift transaction')
continue
amount = float(''.join(ele for ele in splitmessage[tipindex + 2] if ele.isdigit() or ele == '.'))
print(amount)
block_io_ltc.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(amount)+' ltc to doge! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_ltc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(splitexc[n])+' ltc to doge! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to shift')
elif coin == 'ltc_btc':
try:
address_ltc = block_io_ltc.get_address_by_label(label=j['user'])
address_btc = block_io_btc.get_address_by_label(label=j['user'])
payload = {"withdrawal":address_btc['data']['address'], "pair":"ltc_btc", "returnAddress":address_ltc['data']['address'], "apiKey":shapeshift_pubkey}
print(payload)
try:
r = requests.post(url, data=payload)
response = r.text
jresponse = json.loads(response)
print(jresponse)
except:
traceback.print_exc()
print('failed generate shapeshift transaction')
continue
amount = float(''.join(ele for ele in splitmessage[tipindex + 2] if ele.isdigit() or ele == '.'))
print(amount)
block_io_ltc.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(amount)+' ltc to btc! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_ltc.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(splitexc[n])+' ltc to btc! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to shift')
elif coin == 'doge_btc':
try:
address_doge = block_io_doge.get_address_by_label(label=j['user'])
address_btc = block_io_btc.get_address_by_label(label=j['user'])
payload = {"withdrawal":address_btc['data']['address'], "pair":"doge_btc", "returnAddress":address_doge['data']['address'], "apiKey":shapeshift_pubkey}
print(payload)
try:
r = requests.post(url, data=payload)
response = r.text
jresponse = json.loads(response)
print(jresponse)
except:
traceback.print_exc()
print('failed generate shapeshift transaction')
continue
amount = float(''.join(ele for ele in splitmessage[tipindex + 2] if ele.isdigit() or ele == '.'))
print(amount)
block_io_doge.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(amount)+' doge to btc! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_doge.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=str(id2name[j['user']])+' shifted '+str(splitexc[n])+' doge to btc! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to shift')
elif coin == 'doge_ltc':
try:
address_doge = block_io_doge.get_address_by_label(label=j['user'])
address_ltc = block_io_ltc.get_address_by_label(label=j['user'])
payload = {"withdrawal":address_ltc['data']['address'], "pair":"doge_ltc", "returnAddress":address_doge['data']['address'], "apiKey":shapeshift_pubkey}
print(payload)
try:
r = requests.post(url, data=payload)
response = r.text
jresponse = json.loads(response)
print(jresponse)
except:
traceback.print_exc()
print('failed generate shapeshift transaction')
continue
amount = float(''.join(ele for ele in splitmessage[tipindex + 2] if ele.isdigit() or ele == '.'))
print(amount)
block_io_doge.withdraw_from_labels(amounts=amount, from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=j['user']+' shifted '+str(amount)+' doge to ltc! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
try:
exc = traceback.format_exc()
splitexc = exc.split()
n = len(splitexc)-2
print(splitexc[n])
block_io_doge.withdraw_from_labels(amounts=splitexc[n], from_labels=j['user'], to_addresses=jresponse['deposit'], priority='low')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' shifted '+str(splitexc[n])+' doge to ltc! :unicorn_face:', username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to shift')
# !tipbot addresses
elif command == 'addresses':
if len(splitmessage) < (tipindex + 2):
continue
coin = splitmessage[tipindex + 2]
if coin not in min_amount.keys():
print('unknown coin ='+coin)
continue
if coin == 'doge':
try:
addresses = block_io_doge.get_my_addresses()
for address in addresses['data']['addresses']:
if address['label'] not in id2name.keys():
continue
balance = block_io_doge.get_address_balance(addresses=address['address'])
print(sc.api_call("chat.postMessage", channel=j['channel'], text='|'+id2name[address['label']]+'|-- : '+address['address']+': '+balance['data']['available_balance'], username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to get doge addresses')
continue
elif coin == 'ltc':
try:
addresses = block_io_ltc.get_my_addresses()
for address in addresses['data']['addresses']:
if address['label'] not in id2name.keys():
continue
balance = block_io_ltc.get_address_balance(addresses=address['address'])
print(sc.api_call("chat.postMessage", channel=j['channel'], text='|'+id2name[address['label']]+'|-- : '+address['address']+': '+balance['data']['available_balance'], username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to get ltc addresses')
continue
elif coin == 'btc':
try:
addresses = block_io_btc.get_my_addresses()
for address in addresses['data']['addresses']:
if address['label'] not in id2name.keys():
continue
balance = block_io_btc.get_address_balance(addresses=address['address'])
print(sc.api_call("chat.postMessage", channel=j['channel'], text='|'+id2name[address['label']]+'|-- : '+address['address']+': '+balance['data']['available_balance'], username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to get btc addresses')
continue
# !tipbot register
elif command == 'register':
try:
block_io_doge.get_new_address(label=j['user'])
except:
traceback.print_exc()
print('failed to create doge address for '+id2name[j['user']]+' ('+j['user']+')')
try:
block_io_ltc.get_new_address(label=j['user'])
except:
traceback.print_exc()
print('failed to create ltc address for '+id2name[j['user']]+' ('+j['user']+')')
try:
block_io_btc.get_new_address(label=j['user'])
except:
traceback.print_exc()
print('failed to create btc address for '+id2name[j['user']]+' ('+j['user']+')')
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' registered! :tada:', username='pybot', icon_emoji=':robot_face:'))
# !tipbot check
elif command == 'check':
try:
balance = block_io_doge.get_address_balance(labels=j['user'])
address = block_io_doge.get_address_by_label(label=j['user'])
try:
c_doge = requests.get(coincap_doge)
c_text_doge = c_doge.text
jc_doge = json.loads(c_text_doge)
print('doge $'+str(jc_doge['usdPrice']))
usd_doge = float("{0:.2f}".format(float(balance['data']['available_balance'])*float(jc_doge['usdPrice'])))
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' dogecoin: - '+address['data']['address']+' - '+balance['data']['available_balance']+' doge ~$'+str(usd_doge), username='pybot', icon_emoji=':robot_face:'))
except:
c_doge = requests.get(cryptocomp_doge)
c_text_doge = c_doge.text
jc_doge = json.loads(c_text_doge)
print('doge $'+str(jc_doge['Data'][0]['Price']))
usd_doge = float("{0:.2f}".format(float(balance['data']['available_balance'])*float(jc_doge['Data'][0]['Price'])))
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' dogecoin: - '+address['data']['address']+' - '+balance['data']['available_balance']+' doge ~$'+str(usd_doge), username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to check doge for '+id2name[j['user']]+' ('+j['user']+')')
try:
balance = block_io_btc.get_address_balance(labels=j['user'])
address = block_io_btc.get_address_by_label(label=j['user'])
try:
c_btc = requests.get(coincap_btc)
c_text_btc = c_btc.text
jc_btc = json.loads(c_text_btc)
print('btc $'+str(jc_btc['usdPrice']))
usd_btc = float("{0:.2f}".format(float(balance['data']['available_balance'])*float(jc_btc['usdPrice'])))
print(usd_btc)
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' bitcoin: - '+address['data']['address']+' - '+balance['data']['available_balance']+' btc ~$'+str(usd_btc), username='pybot', icon_emoji=':robot_face:'))
except:
c_btc = requests.get(cryptocomp_btc)
c_text_btc = c_btc.text
jc_btc = json.loads(c_text_btc)
print('btc $'+str(jc_btc['Data'][0]['Price']))
usd_btc = float("{0:.2f}".format(float(balance['data']['available_balance'])*float(jc_btc['Data'][0]['Price'])))
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' btccoin: - '+address['data']['address']+' - '+balance['data']['available_balance']+' btc ~$'+str(usd_btc), username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to check btc for '+id2name[j['user']]+' ('+j['user']+')')
try:
balance = block_io_ltc.get_address_balance(labels=j['user'])
address = block_io_ltc.get_address_by_label(label=j['user'])
try:
c_ltc = requests.get(coincap_ltc)
c_text_ltc = c_ltc.text
jc_ltc = json.loads(c_text_ltc)
print('ltc $'+str(jc_ltc['usdPrice']))
usd_ltc = float("{0:.2f}".format(float(balance['data']['available_balance'])*float(jc_ltc['usdPrice'])))
print(usd_ltc)
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' litecoin: - '+address['data']['address']+' - '+balance['data']['available_balance']+' ltc ~$'+str(usd_ltc), username='pybot', icon_emoji=':robot_face:'))
except:
c_ltc = requests.get(cryptocomp_ltc)
c_text_ltc = c_ltc.text
jc_ltc = json.loads(c_text_ltc)
print('ltc $'+str(jc_ltc['Data'][0]['Price']))
usd_ltc = float("{0:.2f}".format(float(balance['data']['available_balance'])*float(jc_ltc['Data'][0]['Price'])))
print(sc.api_call("chat.postMessage", channel=j['channel'], text=id2name[j['user']]+' ltccoin: - '+address['data']['address']+' - '+balance['data']['available_balance']+' ltc ~$'+str(usd_ltc), username='pybot', icon_emoji=':robot_face:'))
except:
traceback.print_exc()
print('failed to check ltc for '+id2name[j['user']]+' ('+j['user']+')')
# !tipbot help
elif command == 'help':
print(sc.api_call("chat.postMessage", channel=j['channel'], text='https://github.com/peoplma/slacktipbot', username='pybot', icon_emoji=':robot_face:'))
def secondary():
try:
while True:
main()
except:
traceback.print_exc()
print('Resuming in 2sec...')
time.sleep(2)
print('Resumed')
while True:
secondary()
|
|
# -*- coding: utf-8 -*-
"""
Project Tracking & Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
mode_task = settings.get_project_mode_task()
# =============================================================================
def index():
""" Module's Home Page """
if mode_task:
# Bypass home page & go direct to browsing Tasks for a Project
redirect(URL(f="project", vars={"tasks":1}))
else:
# Bypass home page & go direct to filterable list of Projects
redirect(URL(f="project"))
# =============================================================================
def create():
""" Redirect to project/create """
redirect(URL(f="project", args="create"))
# -----------------------------------------------------------------------------
def project():
""" RESTful CRUD controller """
if "tasks" in get_vars:
# Open-Tasks-For-Project Selector
return open_tasks_for_project()
# Pre-process
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
component = r.component
component_name = component.name if component else None
hr_group = r.get_vars.get("group")
if r.method == "datalist":
# Set list_fields for renderer (project_project_list_layout)
s3db.configure("project_project",
list_fields = ["name",
"description",
"location.location_id",
"start_date",
"organisation_id",
"organisation_id$logo",
"modified_by",
]
)
# Filter human resource records if "group" in get_vars
elif component_name == "human_resource":
type_field = FS("human_resource.type")
if hr_group == "staff":
query = (type_field == 1)
elif hr_group == "volunteer":
query = (type_field == 2)
else:
query = None
if query:
r.resource.add_component_filter("human_resource", query)
if r.interactive:
htable = s3db.hrm_human_resource
htable.person_id.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Person"),
T("Select the person assigned to this role for this project."),
)
)
if not component or component_name == "activity":
# Filter Themes/Activity Types based on Sector
if r.record:
table = s3db.project_sector_project
query = (table.project_id == r.id) & \
(table.deleted == False)
rows = db(query).select(table.sector_id)
sector_ids = [row.sector_id for row in rows]
else:
sector_ids = []
set_theme_requires(sector_ids)
if not r.component:
if r.method in ("create", "update"):
# Context from a Profile page?"
location_id = get_vars.get("(location)", None)
if location_id:
field = s3db.project_location.location_id
field.default = location_id
field.readable = field.writable = False
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = r.table.organisation_id
field.default = organisation_id
field.readable = field.writable = False
if r.id:
r.table.human_resource_id.represent = \
s3db.hrm_HumanResourceRepresent(show_link=True)
elif r.get_vars.get("project.status_id", None):
stable = s3db.project_status
status = get_vars.get("project.status_id")
row = db(stable.name == status).select(stable.id,
limitby=(0, 1)).first()
if row:
r.table.status_id.default = row.id
r.table.status_id.writable = False
elif component_name == "organisation":
if r.method != "update":
allowed_roles = dict(settings.get_project_organisation_roles())
if settings.get_template() == "DRRPP":
# Partner NS should only come via sync from RMS
allowed_roles.pop(9, None)
lead_role = 1
otable = s3db.project_organisation
query = (otable.project_id == r.id) & \
(otable.role == lead_role) & \
(otable.deleted != True)
row = db(query).select(otable.id,
limitby=(0, 1)).first()
if row:
# Project has already a lead organisation
# => exclude lead_role in component add-form
allowed_roles.pop(lead_role, None)
otable.role.requires = IS_EMPTY_OR(IS_IN_SET(allowed_roles))
elif component_name == "activity":
# Filter Activity Type based on Sector
set_activity_type_requires("project_activity_activity_type", sector_ids)
elif component_name == "task":
if not auth.s3_has_role("STAFF"):
# Hide fields which are meant for staff members
# (avoid confusion both of inputters & recipients)
unwanted_fields = ["source",
"pe_id",
"date_due",
"time_estimated",
"time_actual",
"status",
]
ttable = component.table
for fieldname in unwanted_fields:
field = ttable[fieldname]
field.readable = field.writable = False
if "open" in r.get_vars:
# Show only the Open Tasks for this Project (unused?)
statuses = s3.project_task_active_statuses
query = FS("status").belongs(statuses)
r.resource.add_component_filter("task", query)
elif component_name == "beneficiary":
# Filter the location selector to the project's locations
component.table.project_location_id.requires = \
IS_EMPTY_OR(IS_ONE_OF(db, "project_location.id",
s3db.project_location_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
)
elif component_name == "human_resource":
htable = s3db.hrm_human_resource
htable.person_id.represent = \
s3db.pr_PersonRepresent(show_link=True)
# These values are defined in hrm_type_opts
human_resource_id = component.table.human_resource_id
filter_opts = None
if hr_group:
crud_strings = s3.crud_strings
if hr_group == "staff":
filter_opts = (1,)
human_resource_id.label = T("Staff")
crud_strings["project_human_resource"] = crud_strings["hrm_staff"]
elif hr_group == "volunteer":
filter_opts = (2,)
human_resource_id.label = T("Volunteer")
crud_strings["project_human_resource"] = crud_strings["hrm_volunteer"]
if filter_opts:
# Use the group to filter the form widget when
# adding a new record
human_resource_id.requires = \
IS_ONE_OF(db, "hrm_human_resource.id",
s3db.hrm_human_resource_represent,
filterby="type",
filter_opts=filter_opts,
orderby="hrm_human_resource.person_id",
sort=True
)
elif component_name == "document":
# Hide unnecessary fields
dtable = component.table
dtable.organisation_id.readable = \
dtable.organisation_id.writable = False
dtable.person_id.readable = \
dtable.person_id.writable = False
dtable.location_id.readable = \
dtable.location_id.writable = False
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('project_project_start_date','project_project_end_date')''')
if mode_task:
read_url = URL(args=["[id]", "task"])
update_url = URL(args=["[id]", "task"])
s3_action_buttons(r,
read_url=read_url,
update_url=update_url)
elif r.component_name == "beneficiary":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('project_beneficiary_date','project_beneficiary_end_date')''')
return output
s3.postp = postp
return s3_rest_controller(module, "project",
csv_template = "project",
hide_filter = {None: False, "_default": True},
rheader = s3db.project_rheader,
)
# -----------------------------------------------------------------------------
def open_tasks_for_project():
"""
Simplified controller to select a project and open the
list of open tasks for it
"""
def prep(r):
tablename = "project_project"
s3.crud_strings[tablename].title_list = T("Open Tasks for Project")
s3.crud_labels.READ = s3.crud_labels.UPDATE = T("Select")
s3db.configure(tablename,
deletable=False,
listadd=False,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive and not r.component:
tasklist_url = URL(f="task", vars={"project":"[id]"})
s3_action_buttons(r,
deletable=False,
read_url=tasklist_url,
update_url=tasklist_url)
return output
s3.postp = postp
return s3_rest_controller(module, "project",
hide_filter=False,
)
# -----------------------------------------------------------------------------
def set_theme_requires(sector_ids):
"""
Filters the theme_id based on the sector_id
"""
ttable = s3db.project_theme
tstable = s3db.project_theme_sector
# All themes linked to the project's sectors or to no sectors
rows = db().select(ttable.id,
tstable.sector_id,
left=tstable.on(ttable.id == tstable.theme_id))
sector_ids = sector_ids or []
theme_ids = [row.project_theme.id for row in rows
if not row.project_theme_sector.sector_id or
row.project_theme_sector.sector_id in sector_ids]
table = s3db.project_theme_project
table.theme_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_theme.id",
s3base.S3Represent(lookup="project_theme"),
filterby="id",
filter_opts=theme_ids,
sort=True,
)
)
# -----------------------------------------------------------------------------
def set_activity_type_requires(tablename, sector_ids):
"""
Filters the activity_type_id based on the sector_id
"""
attable = s3db.project_activity_type
if sector_ids:
atstable = s3db.project_activity_type_sector
# All activity_types linked to the projects sectors or to no sectors
rows = db().select(attable.id,
atstable.sector_id,
left=atstable.on(attable.id == atstable.activity_type_id))
activity_type_ids = [row.project_activity_type.id for row in rows
if not row.project_activity_type_sector.sector_id or
row.project_activity_type_sector.sector_id in sector_ids]
else:
activity_type_ids = []
s3db[tablename].activity_type_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_activity_type.id",
s3base.S3Represent(lookup="project_activity_type"),
filterby="id",
filter_opts=activity_type_ids,
sort=True,
)
)
# -----------------------------------------------------------------------------
def project_theme_id_widget():
"""
Used by the project controller to return dynamically generated
theme_id widget based on sector_id
- deprecated?
"""
table = s3db.project_theme_project
sector_ids = [int(id) for id in request.vars.sector_ids.split(",") if id]
if "value" in request.vars:
value = [int(id) for id in request.vars.value.split(",") if id]
else:
value = []
set_theme_requires(sector_ids)
widget = table.theme_id.widget(table.theme_id,
value)
return widget
# =============================================================================
def sector():
""" RESTful CRUD controller """
return s3_rest_controller("org", "sector")
# -----------------------------------------------------------------------------
def status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme_project():
"""
RESTful CRUD controller
- not normally exposed to users via a menu
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme_sector():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme_sector_widget():
""" Render a Widget with Theme options filtered by Sector """
try:
values = get_vars.sector_ids.split(",")
values = [int(v) for v in values]
except:
values = []
widget = s3base.s3forms.S3SQLInlineComponentCheckbox(
"theme",
label = T("Themes"),
field = "theme_id",
cols = 4,
translate = True,
# Filter Theme by Sector
filter = {"linktable": "project_theme_sector",
"lkey": "theme_id",
"rkey": "sector_id",
"values": values
}
)
resource = s3db.resource("project_project")
instance, fieldname, field = widget.resolve(resource)
value = widget.extract(resource, record_id=None)
output = widget(s3db.project_theme_project.theme_id,
value,
_name=field.name)
return output
# -----------------------------------------------------------------------------
def hazard():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def framework():
""" RESTful CRUD controller """
return s3_rest_controller(dtargs={"dt_text_maximum_len": 160},
hide_filter=True,
)
# =============================================================================
def organisation():
""" RESTful CRUD controller """
if settings.get_project_multiple_organisations():
# e.g. IFRC
s3db.configure("project_organisation",
insertable=False,
editable=False,
deletable=False)
#list_btn = A(T("Funding Report"),
# _href=URL(c="project", f="organisation",
# args="report", vars=get_vars),
# _class="action-btn")
return s3_rest_controller(#list_btn=list_btn,
)
else:
# e.g. DRRPP
tabs = [(T("Basic Details"), None),
(T("Projects"), "project"),
(T("Contacts"), "human_resource"),
]
rheader = lambda r: s3db.org_rheader(r, tabs)
return s3_rest_controller("org", resourcename,
rheader=rheader)
# =============================================================================
def beneficiary_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def beneficiary():
""" RESTful CRUD controller """
# Normally only used in Report
# - make changes as component of Project
s3db.configure("project_beneficiary",
deletable = False,
editable = False,
insertable = False,
)
list_btn = A(T("Beneficiary Report"),
_href=URL(c="project", f="beneficiary",
args="report", vars=get_vars),
_class="action-btn")
#def prep(r):
# if r.method in ("create", "create.popup", "update", "update.popup"):
# # Coming from Profile page?
# location_id = r.get_vars.get("~.(location)", None)
# if location_id:
# field = r.table.location_id
# field.default = location_id
# field.readable = field.writable = False
# if r.record:
# field = r.table.location_id
# field.comment = None
# field.writable = False
# return True
#s3.prep = prep
return s3_rest_controller(hide_filter=False)
# =============================================================================
def activity_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity_type_sector():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity_organisation():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity():
""" RESTful CRUD controller """
table = s3db.project_activity
if "project_id" in get_vars:
field = table.project_id
field.default = get_vars.project_id
field.writable = False
field.comment = None
# Pre-process
def prep(r):
if r.interactive:
if r.component is not None:
if r.component_name == "document":
doc_table = s3db.doc_document
doc_table.organisation_id.readable = doc_table.organisation_id.writable = False
doc_table.person_id.readable = doc_table.person_id.writable = False
doc_table.location_id.readable = doc_table.location_id.writable = False
return True
s3.prep = prep
return s3_rest_controller(csv_template = "activity",
hide_filter = False,
rheader = s3db.project_rheader,
)
# -----------------------------------------------------------------------------
def location():
""" RESTful CRUD controller """
table = s3db.project_location
# Pre-process
def prep(r):
if r.interactive:
if r.record:
table = s3db.project_sector_project
query = (table.project_id == r.record.project_id) & \
(table.deleted == False)
rows = db(query).select(table.sector_id)
sector_ids = [row.sector_id for row in rows]
else:
sector_ids = []
set_activity_type_requires("project_activity_type_location", sector_ids)
if r.component_name == "document":
table = db.doc_document
table.organisation_id.readable = table.organisation_id.writable = False
table.person_id.readable = table.person_id.writable = False
table.location_id.readable = table.location_id.writable = False
return True
s3.prep = prep
# Pre-process
def postp(r, output):
if r.representation == "plain":
# Replace the Map Popup contents with custom content
item = TABLE()
if settings.get_project_community():
# The Community is the primary resource
record = r.record
table.id.readable = False
table.location_id.readable = False
fields = [table[f] for f in table.fields if table[f].readable]
for field in fields:
data = record[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
hierarchy = gis.get_location_hierarchy()
gtable = s3db.gis_location
location = db(gtable.id == record.location_id).select(gtable.L1,
gtable.L2,
gtable.L3,
gtable.L4,
).first()
if location:
for field in ["L4", "L3", "L2", "L1"]:
if field in hierarchy and location[field]:
item.append(TR(TD(hierarchy[field]),
TD(location[field])))
output["item"] = item
else:
# The Project is the primary resource
project_id = r.record.project_id
ptable = s3db.project_project
query = (ptable.id == project_id)
project = db(query).select(limitby=(0, 1)).first()
ptable.id.readable = False
fields = [ptable[f] for f in ptable.fields if ptable[f].readable]
for field in fields:
if field == "currency":
# Don't display Currency if no Budget
if not project["budget"]:
continue
data = project[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
title = s3.crud_strings["project_project"].title_display
# Assume authorised to see details
popup_url = URL(f="project", args=[project_id])
details_btn = A(T("Open"),
_href=popup_url,
_class="btn",
_id="details-btn",
_target="_blank")
output = dict(item = item,
title = title,
details_btn = details_btn,
)
return output
s3.postp = postp
return s3_rest_controller(interactive_report=True,
rheader=s3db.project_rheader,
hide_filter=False,
csv_template="location",
)
# -----------------------------------------------------------------------------
def demographic():
""" RESTful CRUD controller """
return s3_rest_controller("stats", "demographic")
# -----------------------------------------------------------------------------
def demographic_data():
""" RESTful CRUD controller """
return s3db.stats_demographic_data_controller()
# -----------------------------------------------------------------------------
def location_contact():
""" RESTful CRUD controller for Community Contacts """
return s3_rest_controller(hide_filter=False)
# -----------------------------------------------------------------------------
def report():
"""
RESTful CRUD controller
@ToDo: Why is this needed? To have no rheader?
"""
return s3_rest_controller(module, "activity")
# -----------------------------------------------------------------------------
def partners():
"""
RESTful CRUD controller for Organisations filtered by Type
"""
# @ToDo: This could need to be a deployment setting
get_vars["organisation_type.name"] = \
"Academic,Bilateral,Government,Intergovernmental,NGO,UN agency"
# Load model
table = s3db.org_organisation
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create Partner Organization"),
title_display = T("Partner Organization Details"),
title_list = T("Partner Organizations"),
title_update = T("Edit Partner Organization"),
title_upload = T("Import Partner Organizations"),
label_list_button = T("List Partner Organizations"),
label_delete_button = T("Delete Partner Organization"),
msg_record_created = T("Partner Organization added"),
msg_record_modified = T("Partner Organization updated"),
msg_record_deleted = T("Partner Organization deleted"),
msg_list_empty = T("No Partner Organizations currently registered")
)
return s3db.org_organisation_controller()
# =============================================================================
def task():
""" RESTful CRUD controller """
return s3db.project_task_controller()
# =============================================================================
def task_project():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_activity():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_milestone():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def milestone():
""" RESTful CRUD controller """
if "project_id" in get_vars:
field = s3db.project_milestone.project_id
field.default = get_vars.project_id
field.writable = False
field.comment = None
return s3_rest_controller()
# =============================================================================
def time():
""" RESTful CRUD controller """
# Load model to get normal CRUD strings
table = s3db.project_time
hide_filter = False
if "mine" in get_vars:
# Display this user's Logged Hours in reverse-order
hide_filter = True
s3.crud_strings["project_time"].title_list = T("My Logged Hours")
person_id = auth.s3_logged_in_person()
if person_id:
# @ToDo: Use URL filter instead, but the Search page will have
# to populate it's widgets based on the URL filter
s3.filter = (table.person_id == person_id)
# Log time with just this user's open tasks visible
ttable = db.project_task
query = (ttable.pe_id == auth.user.pe_id) & \
(ttable.deleted == False)
if "update" not in request.args:
# Only log time against Open Tasks
query &= (ttable.status.belongs(s3db.project_task_active_statuses))
dbset = db(query)
table.task_id.requires = IS_ONE_OF(dbset, "project_task.id",
s3db.project_task_represent_w_project
)
list_fields = ["id",
"date",
"hours",
(T("Project"), "task_id$task_project.project_id"),
(T("Activity"), "task_id$task_activity.activity_id"),
"task_id",
"comments",
]
if settings.get_project_milestones():
# Use the field in this format to get the custom represent
list_fields.insert(5, (T("Milestone"), "task_id$task_milestone.milestone_id"))
s3db.configure("project_time",
orderby="project_time.date desc",
list_fields=list_fields)
elif "week" in get_vars:
# Filter to the specified number of weeks
weeks = int(get_vars.get("week", 1))
now = request.utcnow
week = datetime.timedelta(days=7)
delta = week * weeks
s3.filter = (table.date > (now - delta))
elif "month" in get_vars:
# Filter to the specified number of months
months = int(get_vars.get("month", 1))
now = request.utcnow
month = datetime.timedelta(weeks=4)
delta = month * months
s3.filter = (table.date > (now - delta))
return s3_rest_controller(hide_filter=hide_filter)
# =============================================================================
# Comments
# =============================================================================
def comment_parse(comment, comments, task_id=None):
"""
Parse a Comment
@param: comment - a gluon.sql.Row: the current comment
@param: comments - a gluon.sql.Rows: full list of comments
@param: task_id - a reference ID: optional task commented on
"""
author = B(T("Anonymous"))
if comment.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == comment.created_by)
left = [ltable.on(ltable.user_id == utable.id),
ptable.on(ptable.pe_id == ltable.pe_id)]
row = db(query).select(utable.email,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
import hashlib
hash = hashlib.md5(email).hexdigest()
url = "http://www.gravatar.com/%s" % hash
author = B(A(username, _href=url, _target="top"))
if not task_id and comment.task_id:
table = s3db.project_task
task = "re: %s" % table[comment.task_id].name
header = DIV(author, " ", task)
task_id = comment.task_id
else:
header = author
thread = LI(DIV(s3base.s3_avatar_represent(comment.created_by),
DIV(DIV(header,
_class="comment-header"),
DIV(XML(comment.body)),
_class="comment-text"),
DIV(DIV(comment.created_on,
_class="comment-date"),
DIV(A(T("Reply"),
_class="action-btn"),
_onclick="comment_reply(%i);" % comment.id,
_class="comment-reply"),
_class="fright"),
_id="comment-%i" % comment.id,
_task_id=task_id,
_class="comment-box"))
# Add the children of this thread
children = UL(_class="children")
id = comment.id
count = 0
for comment in comments:
if comment.parent == id:
count = 1
child = comment_parse(comment, comments, task_id=task_id)
children.append(child)
if count == 1:
thread.append(children)
return thread
# -----------------------------------------------------------------------------
def comments():
""" Function accessed by AJAX from rfooter to handle Comments """
try:
task_id = request.args[0]
except:
raise HTTP(400)
table = s3db.project_comment
field = table.task_id
field.default = task_id
field.writable = field.readable = False
# Form to add a new Comment
# @ToDo: Rewrite using SQLFORM or S3SQLCustomForm
from gluon.tools import Crud
# =============================================================================
class CrudS3(Crud):
"""
S3 extension of the gluon.tools.Crud class
- select() uses SQLTABLES3 (to allow different linkto construction)
"""
def __init__(self):
""" Initialise parent class & make any necessary modifications """
Crud.__init__(self, current.db)
def select(
self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers={},
**attr):
db = current.db
request = current.request
if not (isinstance(table, db.Table) or table in db.tables):
raise HTTP(404)
if not self.has_permission("select", table):
redirect(current.auth.settings.on_failed_authorization)
#if record_id and not self.has_permission("select", table):
# redirect(current.auth.settings.on_failed_authorization)
if not isinstance(table, db.Table):
table = db[table]
if not query:
query = table.id > 0
if not fields:
fields = [table.ALL]
rows = db(query).select(*fields, **dict(orderby=orderby,
limitby=limitby))
if not rows:
return None # Nicer than an empty table.
if not "linkto" in attr:
attr["linkto"] = self.url(args="read")
if not "upload" in attr:
attr["upload"] = self.url("download")
if request.extension != "html":
return rows.as_list()
return SQLTABLES3(rows, headers=headers, **attr)
# =============================================================================
class SQLTABLES3(SQLTABLE):
"""
S3 custom version of gluon.sqlhtml.SQLTABLE
Given a SQLRows object, as returned by a db().select(), generates
an html table with the rows.
- we need a different linkto construction for our CRUD controller
- we need to specify a different ID field to direct to for the M2M controller
- used by S3Resource.sqltable
Optional arguments:
@keyword linkto: URL (or lambda to generate a URL) to edit individual records
@keyword upload: URL to download uploaded files
@keyword orderby: Add an orderby link to column headers.
@keyword headers: dictionary of headers to headers redefinions
@keyword truncate: length at which to truncate text in table cells.
Defaults to 16 characters.
Optional names attributes for passed to the <table> tag
Simple linkto example::
rows = db.select(db.sometable.ALL)
table = SQLTABLES3(rows, linkto="someurl")
This will link rows[id] to .../sometable/value_of_id
More advanced linkto example::
def mylink(field):
return URL(args=[field])
rows = db.select(db.sometable.ALL)
table = SQLTABLES3(rows, linkto=mylink)
This will link rows[id] to::
current_app/current_controller/current_function/value_of_id
"""
def __init__(self, sqlrows,
linkto=None,
upload=None,
orderby=None,
headers={},
truncate=16,
columns=None,
th_link="",
**attributes):
# reverted since it causes errors (admin/user & manual importing of req/req/import)
# super(SQLTABLES3, self).__init__(**attributes)
TABLE.__init__(self, **attributes)
self.components = []
self.attributes = attributes
self.sqlrows = sqlrows
(components, row) = (self.components, [])
if not columns:
columns = sqlrows.colnames
if headers=="fieldname:capitalize":
headers = {}
for c in columns:
headers[c] = " ".join([w.capitalize() for w in c.split(".")[-1].split("_")])
elif headers=="labels":
headers = {}
for c in columns:
(t, f) = c.split(".")
field = sqlrows.db[t][f]
headers[c] = field.label
if headers!=None:
for c in columns:
if orderby:
row.append(TH(A(headers.get(c, c),
_href=th_link+"?orderby=" + c)))
else:
row.append(TH(headers.get(c, c)))
components.append(THEAD(TR(*row)))
tbody = []
table_field = re.compile("[\w_]+\.[\w_]+")
for (rc, record) in enumerate(sqlrows):
row = []
if rc % 2 == 0:
_class = "even"
else:
_class = "odd"
for colname in columns:
if not table_field.match(colname):
if "_extra" in record and colname in record._extra:
r = record._extra[colname]
row.append(TD(r))
continue
else:
raise KeyError("Column %s not found (SQLTABLE)" % colname)
(tablename, fieldname) = colname.split(".")
try:
field = sqlrows.db[tablename][fieldname]
except (KeyError, AttributeError):
field = None
if tablename in record \
and isinstance(record, Row) \
and isinstance(record[tablename], Row):
r = record[tablename][fieldname]
elif fieldname in record:
r = record[fieldname]
else:
raise SyntaxError("something wrong in Rows object")
r_old = r
if not field:
pass
elif linkto and field.type == "id":
#try:
#href = linkto(r, "table", tablename)
#except TypeError:
#href = "%s/%s/%s" % (linkto, tablename, r_old)
#r = A(r, _href=href)
try:
href = linkto(r)
except TypeError:
href = "%s/%s" % (linkto, r)
r = A(r, _href=href)
#elif linkto and field.type.startswith("reference"):
#ref = field.type[10:]
#try:
#href = linkto(r, "reference", ref)
#except TypeError:
#href = "%s/%s/%s" % (linkto, ref, r_old)
#if ref.find(".") >= 0:
#tref,fref = ref.split(".")
#if hasattr(sqlrows.db[tref],"_primarykey"):
#href = "%s/%s?%s" % (linkto, tref, urllib.urlencode({fref:r}))
#r = A(str(r), _href=str(href))
elif linkto \
and hasattr(field._table, "_primarykey") \
and fieldname in field._table._primarykey:
# have to test this with multi-key tables
key = urllib.urlencode(dict([ \
((tablename in record \
and isinstance(record, Row) \
and isinstance(record[tablename], Row)) \
and (k, record[tablename][k])) \
or (k, record[k]) \
for k in field._table._primarykey]))
r = A(r, _href="%s/%s?%s" % (linkto, tablename, key))
elif field.type.startswith("list:"):
r = field.represent(r or [])
elif field.represent:
r = field.represent(r)
elif field.type.startswith("reference"):
pass
elif field.type == "blob" and r:
r = "DATA"
elif field.type == "upload":
if upload and r:
r = A("file", _href="%s/%s" % (upload, r))
elif r:
r = "file"
else:
r = ""
elif field.type in ["string", "text"]:
r = str(field.formatter(r))
ur = unicode(r, "utf8")
if truncate!=None and len(ur) > truncate:
r = ur[:truncate - 3].encode("utf8") + "..."
row.append(TD(r))
tbody.append(TR(_class=_class, *row))
components.append(TBODY(*tbody))
crud = CrudS3()
crud.messages.submit_button = T("Save")
form = crud.create(table, formname="project_comment/%s" % task_id)
# List of existing Comments
comments = db(field == task_id).select(table.id,
table.parent,
table.body,
table.created_by,
table.created_on)
output = UL(_id="comments")
for comment in comments:
if not comment.parent:
# Show top-level threads at top-level
thread = comment_parse(comment, comments, task_id=task_id)
output.append(thread)
script = "".join((
'''$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})
$('#project_comment_parent__row1').hide()
$('#project_comment_parent__row').hide()
$('#project_comment_body').ckeditor(ck_config)
$('#submit_record__row input').click(function(){
$('#comment-form').hide()
$('#project_comment_body').ckeditorGet().destroy()
return true
})'''))
# No layout in this output!
#s3.jquery_ready.append(script)
output = DIV(output,
DIV(H4(T("New Post"),
_id="comment-title"),
form,
_id="comment-form",
_class="clear"),
SCRIPT(script))
return XML(output)
# =============================================================================
# Campaigns
# =============================================================================
def campaign():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_keyword():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_message():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_response():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_response_summary():
""" RESTful CRUD controller """
return s3_rest_controller()
# END =========================================================================
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_check_availability_request(
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_control_center_sso_request_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_recommendations_request(
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
domain_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
*,
force_hard_delete_domain: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if force_hard_delete_domain is not None:
query_parameters['forceHardDeleteDomain'] = _SERIALIZER.query("force_hard_delete_domain", force_hard_delete_domain, 'bool')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_ownership_identifiers_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_renew_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DomainsOperations(object):
"""DomainsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def check_availability(
self,
identifier: "_models.NameIdentifier",
**kwargs: Any
) -> "_models.DomainAvailabilityCheckResult":
"""Check if a domain is available for registration.
Description for Check if a domain is available for registration.
:param identifier: Name of the domain.
:type identifier: ~azure.mgmt.web.v2020_09_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainAvailabilityCheckResult, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainAvailabilityCheckResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainAvailabilityCheckResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(identifier, 'NameIdentifier')
request = build_check_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainAvailabilityCheckResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DomainCollection"]:
"""Get all domains in a subscription.
Description for Get all domains in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace
def get_control_center_sso_request(
self,
**kwargs: Any
) -> "_models.DomainControlCenterSsoRequest":
"""Generate a single sign-on request for the domain management portal.
Description for Generate a single sign-on request for the domain management portal.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainControlCenterSsoRequest, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainControlCenterSsoRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainControlCenterSsoRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_control_center_sso_request_request(
subscription_id=self._config.subscription_id,
template_url=self.get_control_center_sso_request.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainControlCenterSsoRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_control_center_sso_request.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest'} # type: ignore
@distributed_trace
def list_recommendations(
self,
parameters: "_models.DomainRecommendationSearchParameters",
**kwargs: Any
) -> Iterable["_models.NameIdentifierCollection"]:
"""Get domain name recommendations based on keywords.
Description for Get domain name recommendations based on keywords.
:param parameters: Search parameters for domain name recommendations.
:type parameters: ~azure.mgmt.web.v2020_09_01.models.DomainRecommendationSearchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NameIdentifierCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.NameIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NameIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_recommendations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("NameIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_recommendations.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DomainCollection"]:
"""Get all domains in a resource group.
Description for Get all domains in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> "_models.Domain":
"""Get a domain.
Description for Get a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> "_models.Domain":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'Domain')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> LROPoller["_models.Domain"]:
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2020_09_01.models.Domain
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Domain or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2020_09_01.models.Domain]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain=domain,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
domain_name: str,
force_hard_delete_domain: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Delete a domain.
Description for Delete a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param force_hard_delete_domain: Specify :code:`<code>true</code>` to delete the domain
immediately. The default is :code:`<code>false</code>` which deletes the domain after 24 hours.
:type force_hard_delete_domain: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
force_hard_delete_domain=force_hard_delete_domain,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.DomainPatchResource",
**kwargs: Any
) -> "_models.Domain":
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2020_09_01.models.DomainPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'DomainPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def list_ownership_identifiers(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> Iterable["_models.DomainOwnershipIdentifierCollection"]:
"""Lists domain ownership identifiers.
Description for Lists domain ownership identifiers.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainOwnershipIdentifierCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.list_ownership_identifiers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DomainOwnershipIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_ownership_identifiers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers'} # type: ignore
@distributed_trace
def get_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Get ownership identifier for domain.
Description for Get ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def create_or_update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_create_or_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def delete_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete ownership identifier for domain.
Description for Delete ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def renew(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> None:
"""Renew a domain.
Description for Renew a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_renew_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.renew.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew'} # type: ignore
|
|
from __future__ import print_function
import re
import os
import sys
import subprocess
import inspect
import pypro.console
import argparse
import traceback
import threading
import tempfile
import time
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
version = '0.1.1'
release = version + '.alpha'
class Runner:
"""
This class runs all added recipes
"""
def __init__(self):
self._recipes = []
self._args_parser = None
self._arguments = None
# Add current folder to sys.path
if not os.getcwd() in sys.path:
sys.path.append(os.getcwd())
# Check for recipes folder
recipe_path = os.path.join(os.getcwd(), 'recipes')
if not os.path.isdir(recipe_path):
raise PyproException("No recipes directory found!")
# Check for recipes __init__.py file and create it if not present
recipes_init_file = os.path.join(recipe_path, '__init__.py')
if not os.path.isfile(recipes_init_file):
f = open(recipes_init_file, 'w')
f.close()
@property
def arguments_parser(self):
"""
@rtype: argparse.ArgumentParser
"""
if not self._args_parser:
self._args_parser = argparse.ArgumentParser()
self._args_parser.add_argument('arguments',
help="Space separated list of parameters", type=str, nargs='*')
self._args_parser.add_argument('-y', '--yes',
help="Auto confirm on questions", action="store_true")
self._args_parser.add_argument('-r', '--recipe',
help="Run single recipe", type=str, metavar="recipe_name")
self._args_parser.add_argument('-s', '--suite',
help="Path to suite file", type=str, metavar='/path')
self._args_parser.add_argument('-v', '--verbose',
help="Verbose output", action="store_true")
return self._args_parser
def add_recipe(self, recipe):
"""
@recipe: Recipe
Add a recipe to the execution queue.
"""
assert isinstance(recipe, Recipe), "%s is not subclass of Recipe" % recipe.__class__
self._recipes += [recipe]
@property
def arguments(self):
return self._arguments
def _prepare(self):
self._arguments = self.arguments_parser.parse_args()
if self.arguments.suite:
self._prepare_suite()
elif self.arguments.recipe:
self._prepare_single_recipe()
else:
self.arguments_parser.print_help()
exit()
def _prepare_suite(self):
assert os.path.isfile(self.arguments.suite), 'Suite file not found.'
parser = Parser(self.arguments.suite)
for parts in parser.lines():
recipe_parts = parts[0].split('.')
package_name = recipe_parts[0]
recipe_name = recipe_parts[1]
recipe_arguments = parts[1:]
recipe_arguments = dict(zip(recipe_arguments[0::2], recipe_arguments[1::2]))
for key, param in recipe_arguments.items():
recipe_arguments[key] = Variables.replace(param)
recipe_class = None
try:
recipe_class = import_recipe(package_name, recipe_name, Parser.last_source, Parser.last_line)
recipe = recipe_class(**recipe_arguments)
self.add_recipe(recipe)
except TypeError:
needed = inspect.getargspec(recipe_class.__init__).args[1:]
got = recipe_arguments
missing = list(set(needed) - set(got))
raise PyproException("Wrong recipe arguments. Arguments needed: %s. Missing: %s" %
(str(', ').join(needed), str(', ').join(missing)))
def _prepare_single_recipe(self):
package_name = self.arguments.recipe.split('.')[0]
recipe_name = self.arguments.recipe.split('.')[-1]
recipe_arguments = []
for argument in self.arguments.arguments:
recipe_arguments += Parser.parse_shell_argument(argument)
recipe_arguments = dict(zip(recipe_arguments[0::2], recipe_arguments[1::2]))
for key, value in recipe_arguments.items():
recipe_arguments[key] = Variables.replace(value)
recipe_class = None
try:
recipe_class = import_recipe(package_name, recipe_name)
recipe = recipe_class(**recipe_arguments)
self.add_recipe(recipe)
except TypeError:
needed = inspect.getargspec(recipe_class.__init__).args[1:]
got = recipe_arguments
missing = list(set(needed) - set(got))
raise PyproException("Wrong recipe arguments. Arguments needed: %s. Missing: %s" %
(str(', ').join(needed), str(', ').join(missing)))
def run(self):
""" Starts recipes execution. """
self._prepare()
for recipe in self._recipes:
run_recipe = True
# Ask user whether to run current recipe if -y argument is not specified
if not self.arguments.yes:
run_recipe = pypro.console.ask_bool('Run %s.%s' % (recipe.module, recipe.name), "yes")
if run_recipe:
recipe.run(self, self.arguments)
if self.arguments.verbose:
pypro.console.out('Thanks for using pypro. Support this project at https://github.com/avladev/pypro')
def call(self, command):
"""
@command: str
Executes shell command.
"""
if self.arguments.verbose:
pypro.console.out('[Call] ', command)
#code = subprocess.call(command, shell=True, stdout=sys.stdout, stdin=sys.stdin, stderr=sys.stderr)
code, output = ProcessRunner.run(command)
if code:
raise PyproException("Unsuccessful system call '%s'" % command)
return output
class ProcessRunner:
@staticmethod
def _capture_output(process, field, output_file=None):
while True and getattr(process, field):
data = getattr(process, field).read(1024)
if data == '':
break
sys.stdout.write(data)
sys.stdout.flush()
if output_file:
output_file.write(data)
output_file.flush()
time.sleep(0.001)
@staticmethod
def run(command):
output_file = tempfile.TemporaryFile()
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stdin=sys.stdin, bufsize=0)
output_thread = threading.Thread(target=ProcessRunner._capture_output, args=(process, 'stdout', output_file))
output_thread.run()
input_thread = threading.Thread(target=ProcessRunner._capture_output, args=(process, 'stdin'))
input_thread.run()
process.wait()
output_file.seek(0)
return process.returncode, output_file.read()
class Recipe:
""" This class represents a given task called "recipe". """
def __init__(self):
self._settings = None
self.settings_keys = {}
pass
@property
def name(self):
""" Returns the recipe name which is its class name without package. """
if not hasattr(self, '_name'):
self._name = re.search('[a-z]+\.([a-z]+)\.([a-z]+)', str(self.__class__), re.IGNORECASE).group(2)
return self._name
@property
def module(self):
"""
Returns the module name of Recipe.
This actually represents the file basename of a recipe.
"""
if not hasattr(self, '_module'):
self._module = re.search('[a-z]+\.([a-z]+)\.([a-z]+)', str(self.__class__), re.IGNORECASE).group(1)
return self._module
@property
def settings(self):
"""
Loads the recipe settings file which is locate in:
./settings/{recipe_package}.ini
"""
settings_file = os.path.join(os.getcwd(), 'settings', self.module.lower() + '.ini')
# Loads the settings file once.
if (not hasattr(self, '_settings') or self._settings is None) and os.path.isfile(settings_file):
config = ConfigParser.ConfigParser()
config.read(settings_file)
settings = dict(config._sections)
for key in settings:
settings[key] = dict(config.defaults(), **settings[key])
settings[key].pop('__name__', None)
self._settings = SettingsDict(self, settings.get(self.name, {}))
elif not hasattr(self, '_settings'):
self._settings = SettingsDict(self, {})
return self._settings
def run(self, runner, arguments=None):
"""
This method is executed when recipe is run.
Each recipe should override this method.
"""
raise PyproException("Method 'run' not implemented in recipe.")
class SettingsDict(dict):
def __init__(self, recipe, iterable=None):
dict.__init__(self, iterable)
self.recipe = recipe
def get(self, k, d=None):
if not k in self.recipe.settings_keys:
raise PyproException("No key '%s' defined in recipe '%s.%s' settings_keys dict!" %
(k, self.recipe.module, self.recipe.name))
if not k in self:
raise PyproException("No key '%s' defined in './settings/%s.ini'" %
(k, self.recipe.module))
return Variables.replace(dict.get(self, k, d))
class Parser:
"""
This class parses suite files. Basic suite file structure is:
package.RecipeClassName argument1=value1 argument2="Some complex value with spaces and = (equal) sign."
"""
last_source = None
last_line = None
def __init__(self, path):
self.path = path
Parser.last_source = os.path.basename(self.path)
Parser.last_line = 0
def lines(self):
Parser.last_line = 0
with open(self.path) as f:
# execute file line by line
for line in f:
Parser.last_line += 1
line = line.strip()
# skip empty line
if not len(line):
continue
# skip commented line (ones that don't begin with letter)
if not re.match('\w', line[0]):
continue
yield self.parse_string(line)
@staticmethod
def parse_shell_argument(string):
return string.split('=', 1)
@staticmethod
def parse_string(string):
string = string.strip()
parts = []
char_buffer = ""
quote = None
escape = False
for char in string:
if char == '"' or char == "'":
# remove quote escape character
if escape:
char_buffer = char_buffer[0:-1] + char
continue
# opens quote
if not quote:
quote = char
continue
# closes quote
if quote == char:
quote = None
parts.append(char_buffer)
char_buffer = ""
continue
if char == "\\":
escape = True
else:
escape = False
# split by \s and = if not in quote
if (char == " " or char == "=") and not quote:
parts.append(char_buffer)
char_buffer = ""
continue
char_buffer += char
# write buffer leftovers
if len(char_buffer):
parts.append(char_buffer)
filtered = []
for index, part in enumerate(parts):
part = part.strip()
if not part or part == '=':
continue
if part[0] in ["'", '"'] and part[-1] in ["'", '"']:
part = part[1:-1]
filtered.append(part)
return filtered
class Variables:
_recipes = {}
def __init__(self):
raise PyproException("This class should not be instantiated!")
@staticmethod
def replace(string):
regex = r'\@\{([a-z\.\_\-0-9]+)\}'
return re.sub(regex, Variables._replace_variable, str(string), flags=re.IGNORECASE)
@staticmethod
def _replace_variable(match):
parts = match.group(1).split('.')
if len(parts) < 3:
raise PyproException("Invalid variable '%s'!" % match.group(0))
module, recipe_name, variable = parts
cache_key = module + '.' + recipe_name
if not cache_key in Variables._recipes:
Variables._recipes[cache_key] = import_recipe(module, recipe_name, Parser.last_source, Parser.last_line)()
return Variables._recipes[cache_key].settings.get(variable)
def import_recipe(package_name, recipe_name, source=None, line=None):
source = 'unknown' if not source else source
line = 'unknown' if not line else line
package_name = 'recipes.%s' % package_name
recipe_class = None
try:
# load recipe module and run instantiate the class
__import__(package_name)
for i, j in inspect.getmembers(sys.modules[package_name]):
if i.lower() == recipe_name.lower():
recipe_class = i
if not recipe_class:
raise AttributeError()
recipe_class = getattr(sys.modules[package_name], recipe_class)
if not recipe_class:
raise AttributeError()
return recipe_class
except ImportError as e:
raise PyproException("Error loading package for recipe '%s.%s'. File '%s' line %s.\n"
"%s" % (package_name, recipe_name, source, line, traceback.format_exc()))
except AttributeError:
# missing recipe module or class
raise PyproException("Recipe '%s' not found. File '%s' line %s." %
(recipe_name, source, line))
class PyproException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def exception_handler(exctype, value, traceback):
"""
This exception handler catches KeyboardInterrupt to cancel the Runner and
also stops the Runner in case of an error.
"""
if exctype == KeyboardInterrupt:
pypro.console.out('') # Adds a new line after Ctrl+C character
pypro.console.err('Canceled')
elif exctype == PyproException:
pypro.console.err('[Error] ', value.message)
exit()
else:
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = exception_handler
|
|
# Copyright 2012 AMG.lab, a Bull Group Company
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains functions invoked after
recognizing use case by argparser. Each of these functions
correspond to one command and it call proper sdk method
based on parameters.
"""
def vcdFromArgs(args):
vcd = {}
if args.name:
vcd["name"] = args.name[0]
if args.template:
vcd["template"] = open(args.template[0],'r').read()
if args.type:
vcd["type"] = args.type[0]
if args.tags:
vcd["tag"] = args.tags
if args.description:
vcd["description"] = args.description[0]
return vcd
def appFromArgs(args):
app = {}
if args.name:
app["name"] = args.name[0]
if args.type:
app["type"] = args.type[0]
if args.tags:
app["tag"] = args.tags
if args.description:
app["description"] = args.description[0]
if args.category:
app["category"] = args.category[0]
if args.version:
app["version"] = args.version[0]
if args.vendor:
app["vendor"] = args.vendor[0]
return app
class ArgumentsParseException(Exception):
def __init__(self,message):
self.message = message
def listApplications(client,args):
if args.vc_id:
vcId = args.vc_id[0]
return client.virtual_clusters.getApps(vcId)
elif args.account_id:
accountId = args.account_id[0]
return client.accounts.listApplications(vcId)
else:
return client.applications.listApps()
def getApplication(client,args):
id = args.id[0]
return client.applications.getApp(id)
def createApplication(client,args):
app = appFromArgs(args)
if args.account_id:
accountId = args.account_id[0]
return client.accounts.createApplication(accountId, app)
else:
return client.applications.createApp(app)
def deleteApplication(client,args):
id = args.id[0]
return client.applications.deleteApp(id)
def updateApplication(client,args):
id = args.id[0]
app = appFromArgs(args)
return client.applications.updateApp(id,app)
def listVirtualClusterDefinitions(client,args):
if args.account_id:
accId = args.account_id[0]
if args.app_ids:
return client.accounts.listVCDs(accId,appIds=args.app_ids)
elif args.type:
return client.accounts.listVCDs(accId,type=args.type,tags=args.tags)
else:
return client.accounts.listVCDs(accId)
else:
if args.app_ids:
return client.virtualClusterDefinitions.listVCDs(appIds=args.app_ids)
elif args.type:
return client.virtualClusterDefinitions.listVCDs(type=args.type,tags=args.tags)
else:
return client.virtualClusterDefinitions.listVCDs()
def getVirtualClusterDefinition(client,args):
id = args.id[0]
return client.virtualClusterDefinitions.getVCD(id)
def createVirtualClusterDefinition(client,args):
vcd = vcdFromArgs(args)
if args.account_id:
accountId = args.account_id[0]
return client.accounts.createVCD(accountId, vcd)
else:
return client.virtualClusterDefinitions.createVirtualClusterDefinition(vcd)
def deleteVirtualClusterDefinition(client,args):
id = args.id[0]
return client.virtualClusterDefinitions.deleteVirtualClusterDefinition(id)
def updateVirtualClusterDefinition(client,args):
id = args.id[0]
vcd = vcdFromArgs(args)
return client.virtualClusterDefinitions.updateVirtualClusterDefinition(id,vcd)
def validateVirtualClusterDefinition(client,args):
vcd = vcdFromArgs(args)
return client.virtualClusterDefinitions.validate(vcd)
def listImages(client,args):
if args.account_id:
return client.accounts.listImages(args.account_id[0])
else:
return client.images.listImages()
def getImage(client,args):
id = args.id[0]
return client.images.getImage(id)
def createImage(client,args):
image = {"name":args.name[0],"containerFormat":args.container[0],
"diskFormat":args.disk[0]}
if args.account_id:
accountId = args.account_id[0]
return client.accounts.createImage(accountId, image)
else:
return client.images.createImage(image)
def deleteImage(client,args):
id = args.id[0]
return client.images.deleteImage(id)
def listAccounts(client,args):
return client.accounts.listAccounts()
def listUsers(client,args):
if args.group_id:
return client.groups.getUsers(args.group_id[0])
elif args.project_id:
return client.projects.getUsers(args.project_id[0])
elif args.account_id:
return client.accounts.getUsers(args.account_id[0])
else:
return client.users.listUsers()
def listGroups(client,args):
if args.user_id:
return client.users.getGroups(args.user_id[0])
elif args.account_id:
return client.accounts.getGroups(args.account_id[0])
else:
raise ArgumentsParseException("listing groups lacks parameters")
def listVirtualClusters(client,args):
if args.project_id:
return client.projects.getVirtualClusters(args.project_id[0])
if args.account_id:
return client.accounts.getVirtualClusters(args.account_id[0])
else:
return client.virtual_clusters.listVirtualClusters()
def listEntitlements(client,args):
if args.user_id:
return client.users.getEntitlements(args.user_id[0])
elif args.group_id:
return client.groups.getEntitlements(args.group_id[0])
else:
raise ArgumentsParseException("listing entitlements lacks parameters")
def getUser(client,args):
id = args.id[0]
return client.users.get(id)
def getGroup(client,args):
id = args.id[0]
return client.groups.get(id)
def getVirtualCluster(client,args):
id = args.id[0]
if args.parameters:
return client.virtual_clusters.getParams(id)
else:
return client.virtual_clusters.get(id)
def deleteUser(client,args):
id = args.id[0]
return client.users.delete(id)
def deleteGroup(client,args):
id = args.id[0]
return client.groups.delete(id)
def deleteAccount(client,args):
id = args.id[0]
return client.accounts.delete(id)
def getAccount(client,args):
id = args.id[0]
return client.accounts.get(id)
def createUser(client,args):
user = {"username":args.name[0],"password":args.password[0]}
if args.account_id:
return client.accounts.addUser(args.account_id[0],user)
elif args.group_id:
return client.groups.addUser(args.group_id[0],user)
elif args.project_id:
return client.projects.addUser(args.project_id[0],user)
else:
return client.users.createUser(user)
def createGroup(client,args):
group = {"name":args.name[0]}
return client.accounts.addGroup(args.account_id[0],group)
def createVirtualCluster(client,args):
vc = {"name":args.name[0],"virtualClusterDefinitionId":args.vcd_id[0]}
return client.projects.addVirtualCluster(args.project_id[0],vc)
def createAccount(client,args):
account = {"name":args.name[0]}
return client.accounts.add(account)
def updateUser(client,args):
id = args.id[0]
user = {"username":args.name[0],"password":args.password[0]}
return client.users.update(id,user)
def updateGroup(client,args):
id = args.id[0]
if args.name:
group = {"name":args.name[0]}
return client.groups.update(id,group)
elif args.add_user:
userId = args.add_user[0]
return client.groups.assignUser(id,userId)
elif args.remove_user:
userId = args.remove_user[0]
return client.groups.deleteUser(id,userId)
else:
raise ArgumentsParseException("updating group lacks parameters")
def updateVirtualCluster(client,args):
id = args.id[0]
if args.add_application:
appId = args.add_application[0]
return client.virtual_clusters.putApp(id,appId)
elif args.remove_application:
appId = args.remove_application[0]
return client.virtual_clusters.deleteApp(id,appId)
elif args.update_parameters:
splitted = map(lambda s:s.split(','),args.update_parameters)
parameters = {"parameter":[{"name":name,"value":value} for name,value in splitted]}
return client.virtual_clusters.putParams(id,parameters)
else:
raise ArgumentsParseException("updating virtual cluster lacks parameters")
def provisionVirtualCluster(client,args):
id = args.id[0]
client.virtual_clusters.provision(id)
def stopVirtualCluster(client,args):
id = args.id[0]
client.virtual_clusters.stop(id)
def getCurrentSession(client,args):
id = args.id[0]
return client.virtual_clusters.getCurrentSession(id)
def listTokens(client,args):
userId = args.user_id[0]
return client.users.listAccessTokens(userId)
def getToken(client,args):
id = args.id[0]
return client.tokens.get(id)
def revokeToken(client,args):
id = args.id[0]
return client.tokens.revoke(id)
def listEntitlements(client,args):
if args.user_id:
userId = args.user_id[0]
return client.users.listEntitlements(userId)
elif args.group_id:
groupId = args.group_id[0]
return client.groups.listEntitlements(groupId)
elif args.access_token_id:
accessTokenId = args.access_token_id[0]
return client.tokens.listEntitlements(accessTokenId)
else:
raise ArgumentsParseException("listing entitlements lacks parameters")
def validateHttpActions(actions):
if any(map(lambda x:x not in ['GET','POST','PUT','DELETE'],actions)):
raise ArgumentsParseException("bad http action")
return actions
def createEntitlement(client,args):
actions = args.action[0].split(":")
validateHttpActions(actions)
entitlement = {"resource":args.resource[0],"action":actions}
if args.user_id:
userId = args.user_id[0]
return client.users.addEntitlement(userId,entitlement)
elif args.group_id:
groupId = args.group_id[0]
return client.groups.addEntitlement(groupId,entitlement)
elif args.access_token_id:
accessTokenId = args.access_token_id[0]
return client.tokens.addEntitlement(accessTokenId,entitlement)
else:
raise ArgumentsParseException("creating entitlement lacks parameters")
def updateEntitlements(client,args):
splitted = map(lambda s:s.split(','),args.entitlements)
entitlements = {"entitlement":[{"resource":resource,"action":validateHttpActions(action.split(":"))} for resource,action in splitted]}
if args.user_id:
userId = args.user_id[0]
return client.users.putEntitlements(userId,entitlements)
elif args.group_id:
groupId = args.group_id[0]
return client.groups.putEntitlements(groupId,entitlements)
elif args.access_token_id:
accessTokenId = args.access_token_id[0]
return client.tokens.putEntitlements(accessTokenId,entitlements)
else:
raise ArgumentsParseException("updating entitlements lacks parameters")
def getEntitlement(client,args):
id = args.id[0]
if args.user_id:
userId = args.user_id[0]
return client.users.getEntitlement(userId,id)
elif args.group_id:
groupId = args.group_id[0]
return client.groups.getEntitlement(groupId,id)
elif args.access_token_id:
accessTokenId = args.access_token_id[0]
return client.tokens.getEntitlement(accessTokenId,id)
else:
raise ArgumentsParseException("listing entitlements lacks parameters")
def deleteEntitlement(client,args):
id = args.id[0]
if args.user_id:
userId = args.user_id[0]
return client.users.deleteEntitlement(userId,id)
elif args.group_id:
groupId = args.group_id[0]
return client.groups.deleteEntitlement(groupId,id)
elif args.access_token_id:
accessTokenId = args.access_token_id[0]
return client.tokens.deleteEntitlement(accessTokenId,id)
else:
raise ArgumentsParseException("listing entitlements lacks parameters")
def createProject(client,args):
project = {"name":args.name[0]}
return client.accounts.addProject(args.account_id[0],project)
def deleteProject(client,args):
id = args.id[0]
return client.projects.delete(id)
def getProject(client,args):
id = args.id[0]
return client.projects.get(id)
def listProjects(client,args):
if args.user_id:
return client.users.listProjects(args.user_id[0])
elif args.account_id:
return client.accounts.getProjects(args.account_id[0])
else:
return client.projects.listProjects()
def updateProject(client,args):
id = args.id[0]
if args.add_user:
userId = args.add_user[0]
return client.projects.assignUser(id,userId)
elif args.remove_user:
userId = args.remove_user[0]
return client.projects.removeUser(id,userId)
else:
raise ArgumentsParseException("updating project lacks parameters")
def postImageData(client,args):
id = args.id[0]
data = open(args.data[0],'r').read()
return client.images.postData(id,data)
def getCookbooks(client,args):
appId = args.app_id[0]
return client.applications.getCookbooks(appId)
def createCookbook(client,args):
app = args.app_id[0]
data = open(args.data[0],'r').read()
cookbook = {"name":args.name[0]}
return client.applications.createCookbook(app,cookbook,data)
|
|
from eve.tests import TestBase
from eve.tests.utils import DummyEvent
from eve.tests.test_settings import MONGO_DBNAME
from eve import ETAG
from bson import ObjectId
class TestDelete(TestBase):
def setUp(self):
super(TestDelete, self).setUp()
# Etag used to delete an item (a contact)
self.etag_headers = [('If-Match', self.item_etag)]
def test_unknown_resource(self):
url = '%s%s/' % (self.unknown_resource_url, self.item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_from_resource_endpoint(self):
r, status = self.delete(self.known_resource_url)
self.assert200(status)
r, status = self.parse_response(self.test_client.get(
self.known_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
def test_delete_from_resource_endpoint_write_concern(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
_, status = self.delete(self.known_resource_url)
self.assert500(status)
def test_delete_from_resource_endpoint_different_resource(self):
r, status = self.delete(self.different_resource_url)
self.assert200(status)
r, status = self.parse_response(self.test_client.get(
self.different_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
# deletion of 'users' will still lave 'contacts' untouched (same db
# collection)
r, status = self.parse_response(self.test_client.get(
self.known_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 25)
def test_delete_empty_resource(self):
url = '%s%s/' % (self.empty_resource_url, self.item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_readonly_resource(self):
_, status = self.delete(self.readonly_id_url)
self.assert405(status)
def test_delete_unknown_item(self):
url = '%s%s/' % (self.known_resource_url, self.unknown_item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_ifmatch_missing(self):
_, status = self.delete(self.item_id_url)
self.assert403(status)
def test_delete_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
_, status = self.delete(self.item_id_url)
self.assert200(status)
def test_delete_ifmatch_bad_etag(self):
_, status = self.delete(self.item_id_url,
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_delete(self):
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert200(status)
r = self.test_client.get(self.item_id_url)
self.assert404(r.status_code)
def test_delete_non_existant(self):
url = self.item_id_url[:-5] + "00000"
r, status = self.delete(url, headers=self.etag_headers)
self.assert404(status)
def test_delete_write_concern(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
_, status = self.delete(self.item_id_url,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_delete_different_resource(self):
r, status = self.delete(self.user_id_url,
headers=[('If-Match', self.user_etag)])
self.assert200(status)
r = self.test_client.get(self.user_id_url)
self.assert404(r.status_code)
def test_delete_with_post_override(self):
# POST request with DELETE override turns into a DELETE
headers = [('X-HTTP-Method-Override', 'DELETE'),
('If-Match', self.item_etag)]
r = self.test_client.post(self.item_id_url, data={}, headers=headers)
self.assert200(r.status_code)
def test_delete_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# grab parent collection count; we will use this later to make sure we
# didn't delete all the users in the datanase. We add one extra invoice
# to make sure that the actual count will never be 1 (which would
# invalidate the test)
_db.invoices.insert({'inv_number': 1})
response, status = self.get('invoices')
invoices = len(response[self.app.config['ITEMS']])
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# verify that the only document retrieved is referencing the correct
# parent document
response, status = self.get('users/%s/invoices' % fake_contact_id)
person_id = ObjectId(response[self.app.config['ITEMS']][0]['person'])
self.assertEqual(person_id, fake_contact_id)
# delete all documents at the sub-resource endpoint
response, status = self.delete('users/%s/invoices' % fake_contact_id)
self.assert200(status)
# verify that the no documents are left at the sub-resource endpoint
response, status = self.get('users/%s/invoices' % fake_contact_id)
self.assertEqual(len(response['_items']), 0)
# verify that other documents in the invoices collection have not neen
# deleted
response, status = self.get('invoices')
self.assertEqual(len(response['_items']), invoices - 1)
def test_delete_subresource_item(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
headers = [('If-Match', etag)]
response, status = self.delete('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
headers=headers)
self.assert200(status)
def delete(self, url, headers=None):
r = self.test_client.delete(url, headers=headers)
return self.parse_response(r)
class TestDeleteEvents(TestBase):
def test_on_pre_DELETE_for_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertFalse(devent.called[1] is None)
def test_on_pre_DELETE_resource_for_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE_contacts += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_for_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_resource_for_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE_contacts += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_DELETE += filter_this
# Would normally delete the known document; will return 404 instead.
r, s = self.parse_response(self.delete_item())
self.assert404(s)
def test_on_post_DELETE_for_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_resource_for_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE_contacts += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_for_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_resource_for_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE_contacts += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_delete_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_resource += devent
self.delete_resource()
self.assertEqual(('contacts',), devent.called)
def test_on_delete_resource_contacts(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_resource_contacts += devent
self.delete_resource()
self.assertEqual(tuple(), devent.called)
def test_on_deleted_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_resource += devent
self.delete_resource()
self.assertEqual(('contacts',), devent.called)
def test_on_deleted_resource_contacts(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_resource_contacts += devent
self.delete_resource()
self.assertEqual(tuple(), devent.called)
def test_on_delete_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_item += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertEqual(
self.item_id, str(devent.called[1][self.app.config['ID_FIELD']]))
def test_on_delete_item_contacts(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_item_contacts += devent
self.delete_item()
self.assertEqual(
self.item_id, str(devent.called[0][self.app.config['ID_FIELD']]))
def test_on_deleted_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_item += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertEqual(
self.item_id, str(devent.called[1][self.app.config['ID_FIELD']]))
def test_on_deleted_item_contacts(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_item_contacts += devent
self.delete_item()
self.assertEqual(
self.item_id, str(devent.called[0][self.app.config['ID_FIELD']]))
def delete_resource(self):
self.test_client.delete(self.known_resource_url)
def delete_item(self):
return self.test_client.delete(
self.item_id_url, headers=[('If-Match', self.item_etag)])
def before_delete(self):
db = self.connection[MONGO_DBNAME]
return db.contacts.find_one(ObjectId(self.item_id)) is not None
def after_delete(self):
return not self.before_delete()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import copy
import sys
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from cinderclient.v1 import client as v1_client
from keystoneclient import exceptions as keystone_exception
from keystoneclient import session
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import six
from nova import availability_zones as az
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
cinder_opts = [
cfg.StrOpt('catalog_info',
default='volumev2:cinderv2:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
help='Region name of this node'),
cfg.IntOpt('http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones.'),
]
CONF = cfg.CONF
CINDER_OPT_GROUP = 'cinder'
# cinder_opts options in the DEFAULT group were deprecated in Juno
CONF.register_opts(cinder_opts, group=CINDER_OPT_GROUP)
deprecated = {'timeout': [cfg.DeprecatedOpt('http_timeout',
group=CINDER_OPT_GROUP)],
'cafile': [cfg.DeprecatedOpt('ca_certificates_file',
group=CINDER_OPT_GROUP)],
'insecure': [cfg.DeprecatedOpt('api_insecure',
group=CINDER_OPT_GROUP)]}
session.Session.register_conf_options(CONF,
CINDER_OPT_GROUP,
deprecated_opts=deprecated)
LOG = logging.getLogger(__name__)
_SESSION = None
_V1_ERROR_RAISED = False
def reset_globals():
"""Testing method to reset globals.
"""
global _SESSION
_SESSION = None
def cinderclient(context):
global _SESSION
global _V1_ERROR_RAISED
if not _SESSION:
_SESSION = session.Session.load_from_conf_options(CONF,
CINDER_OPT_GROUP)
url = None
endpoint_override = None
auth = context.get_auth_plugin()
service_type, service_name, interface = CONF.cinder.catalog_info.split(':')
service_parameters = {'service_type': service_type,
'service_name': service_name,
'interface': interface,
'region_name': CONF.cinder.os_region_name}
if CONF.cinder.endpoint_template:
url = CONF.cinder.endpoint_template % context.to_dict()
endpoint_override = url
else:
url = _SESSION.get_endpoint(auth, **service_parameters)
# TODO(jamielennox): This should be using proper version discovery from
# the cinder service rather than just inspecting the URL for certain string
# values.
version = cinder_client.get_volume_api_from_url(url)
if version == '1' and not _V1_ERROR_RAISED:
msg = _LW('Cinder V1 API is deprecated as of the Juno '
'release, and Nova is still configured to use it. '
'Enable the V2 API in Cinder and set '
'cinder.catalog_info in nova.conf to use it.')
LOG.warn(msg)
_V1_ERROR_RAISED = True
return cinder_client.Client(version,
session=_SESSION,
auth=auth,
endpoint_override=endpoint_override,
connect_retries=CONF.cinder.http_retries,
**service_parameters)
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# and use 'description' instead of 'display_description' for volume.
if hasattr(vol, 'display_name'):
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
else:
d['display_name'] = vol.name
d['display_description'] = vol.description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['bootable'] = strutils.bool_from_string(vol.bootable)
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# 'description' instead of 'display_description' for snapshot.
if hasattr(snapshot, 'display_name'):
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
else:
d['display_name'] = snapshot.name
d['display_description'] = snapshot.description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except (cinder_exception.ClientException,
keystone_exception.ClientException):
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, (keystone_exception.NotFound,
cinder_exception.NotFound)):
exc_value = exception.VolumeNotFound(volume_id=volume_id)
elif isinstance(exc_value, (keystone_exception.BadRequest,
cinder_exception.BadRequest)):
exc_value = exception.InvalidInput(
reason=six.text_type(exc_value))
six.reraise(exc_value, None, exc_trace)
except (cinder_exception.ConnectionError,
keystone_exception.ConnectionError):
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = exception.CinderConnectionFailed(
reason=six.text_type(exc_value))
six.reraise(exc_value, None, exc_trace)
return res
return wrapper
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except (cinder_exception.ClientException,
keystone_exception.ClientException):
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, (keystone_exception.NotFound,
cinder_exception.NotFound)):
exc_value = exception.SnapshotNotFound(snapshot_id=snapshot_id)
six.reraise(exc_value, None, exc_trace)
except (cinder_exception.ConnectionError,
keystone_exception.ConnectionError):
exc_type, exc_value, exc_trace = sys.exc_info()
reason = six.text_type(exc_value)
exc_value = exception.CinderConnectionFailed(reason=reason)
six.reraise(exc_value, None, exc_trace)
return res
return wrapper
class API(object):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id):
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
def get_all(self, context, search_opts=None):
search_opts = search_opts or {}
items = cinderclient(context).volumes.list(detailed=True,
search_opts=search_opts)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
if volume['status'] != "in-use":
msg = _("volume '%(vol)s' status must be 'in-use'. Currently in "
"'%(status)s' status") % {"vol": volume['id'],
"status": volume['status']}
raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("volume '%(vol)s' status must be 'available'. Currently "
"in '%(status)s'") % {'vol': volume['id'],
'status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("volume %s already attached") % volume['id']
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder.cross_az_attach:
# NOTE(sorrison): If instance is on a host we match against it's AZ
# else we check the intended AZ
if instance.get('host'):
instance_az = az.get_instance_availability_zone(
context, instance)
else:
instance_az = instance['availability_zone']
if instance_az != volume['availability_zone']:
msg = _("Instance %(instance)s and volume %(vol)s are not in "
"the same availability_zone. Instance is in "
"%(ins_zone)s. Volume is in %(vol_zone)s") % {
"instance": instance['id'],
"vol": volume['id'],
'ins_zone': instance_az,
'vol_zone': volume['availability_zone']}
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("volume %s already detached") % volume['id']
raise exception.InvalidVolume(reason=msg)
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id):
cinderclient(context).volumes.detach(volume_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.initialize_connection(volume_id,
connector)
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
client = cinderclient(context)
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
if isinstance(client, v1_client.Client):
kwargs['display_name'] = name
kwargs['display_description'] = description
else:
kwargs['name'] = name
kwargs['description'] = description
try:
item = client.volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
except cinder_exception.OverLimit:
raise exception.OverQuota(overs='volumes')
except (cinder_exception.BadRequest,
keystone_exception.BadRequest) as e:
raise exception.InvalidInput(reason=e)
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_volume_exception
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_volume_exception
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
|
|
#pylint: skip-file
from contextlib import contextmanager
import struct
import six
from mock import patch, sentinel
from . import unittest
from kafka.codec import has_snappy, gzip_decode, snappy_decode
from kafka.errors import (
ChecksumError, KafkaUnavailableError, UnsupportedCodecError,
ConsumerFetchSizeTooSmall, ProtocolError)
from kafka.protocol import (
ATTRIBUTE_CODEC_MASK, CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY, KafkaProtocol,
create_message, create_gzip_message, create_snappy_message,
create_message_set)
from kafka.structs import (
OffsetRequestPayload, OffsetResponsePayload,
OffsetCommitRequestPayload, OffsetCommitResponsePayload,
OffsetFetchRequestPayload, OffsetFetchResponsePayload,
ProduceRequestPayload, ProduceResponsePayload,
FetchRequestPayload, FetchResponsePayload,
Message, OffsetAndMessage, BrokerMetadata, ConsumerMetadataResponse)
class TestProtocol(unittest.TestCase):
def test_create_message(self):
payload = "test"
key = "key"
msg = create_message(payload, key)
self.assertEqual(msg.magic, 0)
self.assertEqual(msg.attributes, 0)
self.assertEqual(msg.key, key)
self.assertEqual(msg.value, payload)
def test_create_gzip(self):
payloads = [(b"v1", None), (b"v2", None)]
msg = create_gzip_message(payloads)
self.assertEqual(msg.magic, 0)
self.assertEqual(msg.attributes, ATTRIBUTE_CODEC_MASK & CODEC_GZIP)
self.assertEqual(msg.key, None)
# Need to decode to check since gzipped payload is non-deterministic
decoded = gzip_decode(msg.value)
expect = b"".join([
struct.pack(">q", 0), # MsgSet offset
struct.pack(">i", 16), # MsgSet size
struct.pack(">i", 1285512130), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", -1), # -1 indicates a null key
struct.pack(">i", 2), # Msg length (bytes)
b"v1", # Message contents
struct.pack(">q", 0), # MsgSet offset
struct.pack(">i", 16), # MsgSet size
struct.pack(">i", -711587208), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", -1), # -1 indicates a null key
struct.pack(">i", 2), # Msg length (bytes)
b"v2", # Message contents
])
self.assertEqual(decoded, expect)
def test_create_gzip_keyed(self):
payloads = [(b"v1", b"k1"), (b"v2", b"k2")]
msg = create_gzip_message(payloads)
self.assertEqual(msg.magic, 0)
self.assertEqual(msg.attributes, ATTRIBUTE_CODEC_MASK & CODEC_GZIP)
self.assertEqual(msg.key, None)
# Need to decode to check since gzipped payload is non-deterministic
decoded = gzip_decode(msg.value)
expect = b"".join([
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", 1474775406), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k1", # Key
struct.pack(">i", 2), # Length of value
b"v1", # Value
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", -16383415), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k2", # Key
struct.pack(">i", 2), # Length of value
b"v2", # Value
])
self.assertEqual(decoded, expect)
@unittest.skipUnless(has_snappy(), "Snappy not available")
def test_create_snappy(self):
payloads = [(b"v1", None), (b"v2", None)]
msg = create_snappy_message(payloads)
self.assertEqual(msg.magic, 0)
self.assertEqual(msg.attributes, ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY)
self.assertEqual(msg.key, None)
decoded = snappy_decode(msg.value)
expect = b"".join([
struct.pack(">q", 0), # MsgSet offset
struct.pack(">i", 16), # MsgSet size
struct.pack(">i", 1285512130), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", -1), # -1 indicates a null key
struct.pack(">i", 2), # Msg length (bytes)
b"v1", # Message contents
struct.pack(">q", 0), # MsgSet offset
struct.pack(">i", 16), # MsgSet size
struct.pack(">i", -711587208), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", -1), # -1 indicates a null key
struct.pack(">i", 2), # Msg length (bytes)
b"v2", # Message contents
])
self.assertEqual(decoded, expect)
@unittest.skipUnless(has_snappy(), "Snappy not available")
def test_create_snappy_keyed(self):
payloads = [(b"v1", b"k1"), (b"v2", b"k2")]
msg = create_snappy_message(payloads)
self.assertEqual(msg.magic, 0)
self.assertEqual(msg.attributes, ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY)
self.assertEqual(msg.key, None)
decoded = snappy_decode(msg.value)
expect = b"".join([
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", 1474775406), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k1", # Key
struct.pack(">i", 2), # Length of value
b"v1", # Value
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", -16383415), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k2", # Key
struct.pack(">i", 2), # Length of value
b"v2", # Value
])
self.assertEqual(decoded, expect)
def test_encode_message_header(self):
expect = b"".join([
struct.pack(">h", 10), # API Key
struct.pack(">h", 0), # API Version
struct.pack(">i", 4), # Correlation Id
struct.pack(">h", len("client3")), # Length of clientId
b"client3", # ClientId
])
encoded = KafkaProtocol._encode_message_header(b"client3", 4, 10)
self.assertEqual(encoded, expect)
def test_encode_message(self):
message = create_message(b"test", b"key")
encoded = KafkaProtocol._encode_message(message)
expect = b"".join([
struct.pack(">i", -1427009701), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 3), # Length of key
b"key", # key
struct.pack(">i", 4), # Length of value
b"test", # value
])
self.assertEqual(encoded, expect)
@unittest.skip('needs updating for new protocol classes')
def test_decode_message(self):
encoded = b"".join([
struct.pack(">i", -1427009701), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 3), # Length of key
b"key", # key
struct.pack(">i", 4), # Length of value
b"test", # value
])
offset = 10
(returned_offset, decoded_message) = list(KafkaProtocol._decode_message(encoded, offset))[0]
self.assertEqual(returned_offset, offset)
self.assertEqual(decoded_message, create_message(b"test", b"key"))
def test_encode_message_failure(self):
with self.assertRaises(ProtocolError):
KafkaProtocol._encode_message(Message(1, 0, "key", "test"))
@unittest.skip('needs updating for new protocol classes')
def test_encode_message_set(self):
message_set = [
create_message(b"v1", b"k1"),
create_message(b"v2", b"k2")
]
encoded = KafkaProtocol._encode_message_set(message_set)
expect = b"".join([
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", 1474775406), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k1", # Key
struct.pack(">i", 2), # Length of value
b"v1", # Value
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", -16383415), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k2", # Key
struct.pack(">i", 2), # Length of value
b"v2", # Value
])
self.assertEqual(encoded, expect)
@unittest.skip('needs updating for new protocol classes')
def test_decode_message_set(self):
encoded = b"".join([
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", 1474775406), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k1", # Key
struct.pack(">i", 2), # Length of value
b"v1", # Value
struct.pack(">q", 1), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", -16383415), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k2", # Key
struct.pack(">i", 2), # Length of value
b"v2", # Value
])
msgs = list(KafkaProtocol._decode_message_set_iter(encoded))
self.assertEqual(len(msgs), 2)
msg1, msg2 = msgs
returned_offset1, decoded_message1 = msg1
returned_offset2, decoded_message2 = msg2
self.assertEqual(returned_offset1, 0)
self.assertEqual(decoded_message1, create_message(b"v1", b"k1"))
self.assertEqual(returned_offset2, 1)
self.assertEqual(decoded_message2, create_message(b"v2", b"k2"))
@unittest.skip('needs updating for new protocol classes')
def test_decode_message_gzip(self):
gzip_encoded = (b'\xc0\x11\xb2\xf0\x00\x01\xff\xff\xff\xff\x00\x00\x000'
b'\x1f\x8b\x08\x00\xa1\xc1\xc5R\x02\xffc`\x80\x03\x01'
b'\x9f\xf9\xd1\x87\x18\x18\xfe\x03\x01\x90\xc7Tf\xc8'
b'\x80$wu\x1aW\x05\x92\x9c\x11\x00z\xc0h\x888\x00\x00'
b'\x00')
offset = 11
messages = list(KafkaProtocol._decode_message(gzip_encoded, offset))
self.assertEqual(len(messages), 2)
msg1, msg2 = messages
returned_offset1, decoded_message1 = msg1
self.assertEqual(returned_offset1, 0)
self.assertEqual(decoded_message1, create_message(b"v1"))
returned_offset2, decoded_message2 = msg2
self.assertEqual(returned_offset2, 0)
self.assertEqual(decoded_message2, create_message(b"v2"))
@unittest.skip('needs updating for new protocol classes')
@unittest.skipUnless(has_snappy(), "Snappy not available")
def test_decode_message_snappy(self):
snappy_encoded = (b'\xec\x80\xa1\x95\x00\x02\xff\xff\xff\xff\x00\x00'
b'\x00,8\x00\x00\x19\x01@\x10L\x9f[\xc2\x00\x00\xff'
b'\xff\xff\xff\x00\x00\x00\x02v1\x19\x1bD\x00\x10\xd5'
b'\x96\nx\x00\x00\xff\xff\xff\xff\x00\x00\x00\x02v2')
offset = 11
messages = list(KafkaProtocol._decode_message(snappy_encoded, offset))
self.assertEqual(len(messages), 2)
msg1, msg2 = messages
returned_offset1, decoded_message1 = msg1
self.assertEqual(returned_offset1, 0)
self.assertEqual(decoded_message1, create_message(b"v1"))
returned_offset2, decoded_message2 = msg2
self.assertEqual(returned_offset2, 0)
self.assertEqual(decoded_message2, create_message(b"v2"))
@unittest.skip('needs updating for new protocol classes')
def test_decode_message_checksum_error(self):
invalid_encoded_message = b"This is not a valid encoded message"
iter = KafkaProtocol._decode_message(invalid_encoded_message, 0)
self.assertRaises(ChecksumError, list, iter)
# NOTE: The error handling in _decode_message_set_iter() is questionable.
# If it's modified, the next two tests might need to be fixed.
@unittest.skip('needs updating for new protocol classes')
def test_decode_message_set_fetch_size_too_small(self):
with self.assertRaises(ConsumerFetchSizeTooSmall):
list(KafkaProtocol._decode_message_set_iter('a'))
@unittest.skip('needs updating for new protocol classes')
def test_decode_message_set_stop_iteration(self):
encoded = b"".join([
struct.pack(">q", 0), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", 1474775406), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k1", # Key
struct.pack(">i", 2), # Length of value
b"v1", # Value
struct.pack(">q", 1), # MsgSet Offset
struct.pack(">i", 18), # Msg Size
struct.pack(">i", -16383415), # CRC
struct.pack(">bb", 0, 0), # Magic, flags
struct.pack(">i", 2), # Length of key
b"k2", # Key
struct.pack(">i", 2), # Length of value
b"v2", # Value
b"@1$%(Y!", # Random padding
])
msgs = MessageSet.decode(io.BytesIO(encoded))
self.assertEqual(len(msgs), 2)
msg1, msg2 = msgs
returned_offset1, msg_size1, decoded_message1 = msg1
returned_offset2, msg_size2, decoded_message2 = msg2
self.assertEqual(returned_offset1, 0)
self.assertEqual(decoded_message1.value, b"v1")
self.assertEqual(decoded_message1.key, b"k1")
self.assertEqual(returned_offset2, 1)
self.assertEqual(decoded_message2.value, b"v2")
self.assertEqual(decoded_message2.key, b"k2")
@unittest.skip('needs updating for new protocol classes')
def test_encode_produce_request(self):
requests = [
ProduceRequestPayload("topic1", 0, [
kafka.protocol.message.Message(b"a"),
kafka.protocol.message.Message(b"b")
]),
ProduceRequestPayload("topic2", 1, [
kafka.protocol.message.Message(b"c")
])
]
msg_a_binary = KafkaProtocol._encode_message(create_message(b"a"))
msg_b_binary = KafkaProtocol._encode_message(create_message(b"b"))
msg_c_binary = KafkaProtocol._encode_message(create_message(b"c"))
header = b"".join([
struct.pack('>i', 0x94), # The length of the message overall
struct.pack('>h', 0), # Msg Header, Message type = Produce
struct.pack('>h', 0), # Msg Header, API version
struct.pack('>i', 2), # Msg Header, Correlation ID
struct.pack('>h7s', 7, b"client1"), # Msg Header, The client ID
struct.pack('>h', 2), # Num acks required
struct.pack('>i', 100), # Request Timeout
struct.pack('>i', 2), # The number of requests
])
total_len = len(msg_a_binary) + len(msg_b_binary)
topic1 = b"".join([
struct.pack('>h6s', 6, b'topic1'), # The topic1
struct.pack('>i', 1), # One message set
struct.pack('>i', 0), # Partition 0
struct.pack('>i', total_len + 24), # Size of the incoming message set
struct.pack('>q', 0), # No offset specified
struct.pack('>i', len(msg_a_binary)), # Length of message
msg_a_binary, # Actual message
struct.pack('>q', 0), # No offset specified
struct.pack('>i', len(msg_b_binary)), # Length of message
msg_b_binary, # Actual message
])
topic2 = b"".join([
struct.pack('>h6s', 6, b'topic2'), # The topic1
struct.pack('>i', 1), # One message set
struct.pack('>i', 1), # Partition 1
struct.pack('>i', len(msg_c_binary) + 12), # Size of the incoming message set
struct.pack('>q', 0), # No offset specified
struct.pack('>i', len(msg_c_binary)), # Length of message
msg_c_binary, # Actual message
])
expected1 = b"".join([ header, topic1, topic2 ])
expected2 = b"".join([ header, topic2, topic1 ])
encoded = KafkaProtocol.encode_produce_request(b"client1", 2, requests, 2, 100)
self.assertIn(encoded, [ expected1, expected2 ])
@unittest.skip('needs updating for new protocol classes')
def test_decode_produce_response(self):
t1 = b"topic1"
t2 = b"topic2"
_long = int
if six.PY2:
_long = long
encoded = struct.pack('>iih%dsiihqihqh%dsiihq' % (len(t1), len(t2)),
2, 2, len(t1), t1, 2, 0, 0, _long(10), 1, 1, _long(20),
len(t2), t2, 1, 0, 0, _long(30))
responses = list(KafkaProtocol.decode_produce_response(encoded))
self.assertEqual(responses,
[ProduceResponse(t1, 0, 0, _long(10)),
ProduceResponse(t1, 1, 1, _long(20)),
ProduceResponse(t2, 0, 0, _long(30))])
@unittest.skip('needs updating for new protocol classes')
def test_encode_fetch_request(self):
requests = [
FetchRequest(b"topic1", 0, 10, 1024),
FetchRequest(b"topic2", 1, 20, 100),
]
header = b"".join([
struct.pack('>i', 89), # The length of the message overall
struct.pack('>h', 1), # Msg Header, Message type = Fetch
struct.pack('>h', 0), # Msg Header, API version
struct.pack('>i', 3), # Msg Header, Correlation ID
struct.pack('>h7s', 7, b"client1"),# Msg Header, The client ID
struct.pack('>i', -1), # Replica Id
struct.pack('>i', 2), # Max wait time
struct.pack('>i', 100), # Min bytes
struct.pack('>i', 2), # Num requests
])
topic1 = b"".join([
struct.pack('>h6s', 6, b'topic1'),# Topic
struct.pack('>i', 1), # Num Payloads
struct.pack('>i', 0), # Partition 0
struct.pack('>q', 10), # Offset
struct.pack('>i', 1024), # Max Bytes
])
topic2 = b"".join([
struct.pack('>h6s', 6, b'topic2'),# Topic
struct.pack('>i', 1), # Num Payloads
struct.pack('>i', 1), # Partition 0
struct.pack('>q', 20), # Offset
struct.pack('>i', 100), # Max Bytes
])
expected1 = b"".join([ header, topic1, topic2 ])
expected2 = b"".join([ header, topic2, topic1 ])
encoded = KafkaProtocol.encode_fetch_request(b"client1", 3, requests, 2, 100)
self.assertIn(encoded, [ expected1, expected2 ])
@unittest.skip('needs updating for new protocol classes')
def test_decode_fetch_response(self):
t1 = b"topic1"
t2 = b"topic2"
msgs = [create_message(msg)
for msg in [b"message1", b"hi", b"boo", b"foo", b"so fun!"]]
ms1 = KafkaProtocol._encode_message_set([msgs[0], msgs[1]])
ms2 = KafkaProtocol._encode_message_set([msgs[2]])
ms3 = KafkaProtocol._encode_message_set([msgs[3], msgs[4]])
encoded = struct.pack('>iih%dsiihqi%dsihqi%dsh%dsiihqi%ds' %
(len(t1), len(ms1), len(ms2), len(t2), len(ms3)),
4, 2, len(t1), t1, 2, 0, 0, 10, len(ms1), ms1, 1,
1, 20, len(ms2), ms2, len(t2), t2, 1, 0, 0, 30,
len(ms3), ms3)
responses = list(KafkaProtocol.decode_fetch_response(encoded))
def expand_messages(response):
return FetchResponsePayload(response.topic, response.partition,
response.error, response.highwaterMark,
list(response.messages))
expanded_responses = list(map(expand_messages, responses))
expect = [FetchResponsePayload(t1, 0, 0, 10, [OffsetAndMessage(0, msgs[0]),
OffsetAndMessage(0, msgs[1])]),
FetchResponsePayload(t1, 1, 1, 20, [OffsetAndMessage(0, msgs[2])]),
FetchResponsePayload(t2, 0, 0, 30, [OffsetAndMessage(0, msgs[3]),
OffsetAndMessage(0, msgs[4])])]
self.assertEqual(expanded_responses, expect)
@unittest.skip('needs updating for new protocol classes')
def test_encode_metadata_request_no_topics(self):
expected = b"".join([
struct.pack(">i", 17), # Total length of the request
struct.pack('>h', 3), # API key metadata fetch
struct.pack('>h', 0), # API version
struct.pack('>i', 4), # Correlation ID
struct.pack('>h3s', 3, b"cid"),# The client ID
struct.pack('>i', 0), # No topics, give all the data!
])
encoded = KafkaProtocol.encode_metadata_request(b"cid", 4)
self.assertEqual(encoded, expected)
@unittest.skip('needs updating for new protocol classes')
def test_encode_metadata_request_with_topics(self):
expected = b"".join([
struct.pack(">i", 25), # Total length of the request
struct.pack('>h', 3), # API key metadata fetch
struct.pack('>h', 0), # API version
struct.pack('>i', 4), # Correlation ID
struct.pack('>h3s', 3, b"cid"),# The client ID
struct.pack('>i', 2), # Number of topics in the request
struct.pack('>h2s', 2, b"t1"), # Topic "t1"
struct.pack('>h2s', 2, b"t2"), # Topic "t2"
])
encoded = KafkaProtocol.encode_metadata_request(b"cid", 4, [b"t1", b"t2"])
self.assertEqual(encoded, expected)
def _create_encoded_metadata_response(self, brokers, topics):
encoded = []
encoded.append(struct.pack('>ii', 3, len(brokers)))
for broker in brokers:
encoded.append(struct.pack('>ih%dsi' % len(broker.host),
broker.nodeId, len(broker.host),
broker.host, broker.port))
encoded.append(struct.pack('>i', len(topics)))
for topic in topics:
encoded.append(struct.pack('>hh%dsi' % len(topic.topic),
topic.error, len(topic.topic),
topic.topic, len(topic.partitions)))
for metadata in topic.partitions:
encoded.append(struct.pack('>hiii', metadata.error,
metadata.partition, metadata.leader,
len(metadata.replicas)))
if len(metadata.replicas) > 0:
encoded.append(struct.pack('>%di' % len(metadata.replicas),
*metadata.replicas))
encoded.append(struct.pack('>i', len(metadata.isr)))
if len(metadata.isr) > 0:
encoded.append(struct.pack('>%di' % len(metadata.isr),
*metadata.isr))
return b''.join(encoded)
@unittest.skip('needs updating for new protocol classes')
def test_decode_metadata_response(self):
node_brokers = [
BrokerMetadata(0, b"brokers1.kafka.rdio.com", 1000),
BrokerMetadata(1, b"brokers1.kafka.rdio.com", 1001),
BrokerMetadata(3, b"brokers2.kafka.rdio.com", 1000)
]
'''
topic_partitions = [
TopicMetadata(b"topic1", 0, [
PartitionMetadata(b"topic1", 0, 1, (0, 2), (2,), 0),
PartitionMetadata(b"topic1", 1, 3, (0, 1), (0, 1), 1)
]),
TopicMetadata(b"topic2", 1, [
PartitionMetadata(b"topic2", 0, 0, (), (), 0),
]),
]
encoded = self._create_encoded_metadata_response(node_brokers,
topic_partitions)
decoded = KafkaProtocol.decode_metadata_response(encoded)
self.assertEqual(decoded, (node_brokers, topic_partitions))
'''
def test_encode_consumer_metadata_request(self):
expected = b"".join([
struct.pack(">i", 17), # Total length of the request
struct.pack('>h', 10), # API key consumer metadata
struct.pack('>h', 0), # API version
struct.pack('>i', 4), # Correlation ID
struct.pack('>h3s', 3, b"cid"),# The client ID
struct.pack('>h2s', 2, b"g1"), # Group "g1"
])
encoded = KafkaProtocol.encode_consumer_metadata_request(b"cid", 4, b"g1")
self.assertEqual(encoded, expected)
def test_decode_consumer_metadata_response(self):
encoded = b"".join([
struct.pack(">i", 42), # Correlation ID
struct.pack(">h", 0), # No Error
struct.pack(">i", 1), # Broker ID
struct.pack(">h23s", 23, b"brokers1.kafka.rdio.com"), # Broker Host
struct.pack(">i", 1000), # Broker Port
])
results = KafkaProtocol.decode_consumer_metadata_response(encoded)
self.assertEqual(results,
ConsumerMetadataResponse(error = 0, nodeId = 1, host = b'brokers1.kafka.rdio.com', port = 1000)
)
@unittest.skip('needs updating for new protocol classes')
def test_encode_offset_request(self):
expected = b"".join([
struct.pack(">i", 21), # Total length of the request
struct.pack('>h', 2), # Message type = offset fetch
struct.pack('>h', 0), # API version
struct.pack('>i', 4), # Correlation ID
struct.pack('>h3s', 3, b"cid"), # The client ID
struct.pack('>i', -1), # Replica Id
struct.pack('>i', 0), # No topic/partitions
])
encoded = KafkaProtocol.encode_offset_request(b"cid", 4)
self.assertEqual(encoded, expected)
@unittest.skip('needs updating for new protocol classes')
def test_encode_offset_request__no_payload(self):
expected = b"".join([
struct.pack(">i", 65), # Total length of the request
struct.pack('>h', 2), # Message type = offset fetch
struct.pack('>h', 0), # API version
struct.pack('>i', 4), # Correlation ID
struct.pack('>h3s', 3, b"cid"), # The client ID
struct.pack('>i', -1), # Replica Id
struct.pack('>i', 1), # Num topics
struct.pack(">h6s", 6, b"topic1"),# Topic for the request
struct.pack(">i", 2), # Two partitions
struct.pack(">i", 3), # Partition 3
struct.pack(">q", -1), # No time offset
struct.pack(">i", 1), # One offset requested
struct.pack(">i", 4), # Partition 3
struct.pack(">q", -1), # No time offset
struct.pack(">i", 1), # One offset requested
])
encoded = KafkaProtocol.encode_offset_request(b"cid", 4, [
OffsetRequest(b'topic1', 3, -1, 1),
OffsetRequest(b'topic1', 4, -1, 1),
])
self.assertEqual(encoded, expected)
@unittest.skip('needs updating for new protocol classes')
def test_decode_offset_response(self):
encoded = b"".join([
struct.pack(">i", 42), # Correlation ID
struct.pack(">i", 1), # One topics
struct.pack(">h6s", 6, b"topic1"),# First topic
struct.pack(">i", 2), # Two partitions
struct.pack(">i", 2), # Partition 2
struct.pack(">h", 0), # No error
struct.pack(">i", 1), # One offset
struct.pack(">q", 4), # Offset 4
struct.pack(">i", 4), # Partition 4
struct.pack(">h", 0), # No error
struct.pack(">i", 1), # One offset
struct.pack(">q", 8), # Offset 8
])
results = KafkaProtocol.decode_offset_response(encoded)
self.assertEqual(set(results), set([
OffsetResponse(topic = b'topic1', partition = 2, error = 0, offsets=(4,)),
OffsetResponse(topic = b'topic1', partition = 4, error = 0, offsets=(8,)),
]))
@unittest.skip('needs updating for new protocol classes')
def test_encode_offset_commit_request(self):
header = b"".join([
struct.pack('>i', 99), # Total message length
struct.pack('>h', 8), # Message type = offset commit
struct.pack('>h', 0), # API version
struct.pack('>i', 42), # Correlation ID
struct.pack('>h9s', 9, b"client_id"),# The client ID
struct.pack('>h8s', 8, b"group_id"), # The group to commit for
struct.pack('>i', 2), # Num topics
])
topic1 = b"".join([
struct.pack(">h6s", 6, b"topic1"), # Topic for the request
struct.pack(">i", 2), # Two partitions
struct.pack(">i", 0), # Partition 0
struct.pack(">q", 123), # Offset 123
struct.pack(">h", -1), # Null metadata
struct.pack(">i", 1), # Partition 1
struct.pack(">q", 234), # Offset 234
struct.pack(">h", -1), # Null metadata
])
topic2 = b"".join([
struct.pack(">h6s", 6, b"topic2"), # Topic for the request
struct.pack(">i", 1), # One partition
struct.pack(">i", 2), # Partition 2
struct.pack(">q", 345), # Offset 345
struct.pack(">h", -1), # Null metadata
])
expected1 = b"".join([ header, topic1, topic2 ])
expected2 = b"".join([ header, topic2, topic1 ])
encoded = KafkaProtocol.encode_offset_commit_request(b"client_id", 42, b"group_id", [
OffsetCommitRequest(b"topic1", 0, 123, None),
OffsetCommitRequest(b"topic1", 1, 234, None),
OffsetCommitRequest(b"topic2", 2, 345, None),
])
self.assertIn(encoded, [ expected1, expected2 ])
@unittest.skip('needs updating for new protocol classes')
def test_decode_offset_commit_response(self):
encoded = b"".join([
struct.pack(">i", 42), # Correlation ID
struct.pack(">i", 1), # One topic
struct.pack(">h6s", 6, b"topic1"),# First topic
struct.pack(">i", 2), # Two partitions
struct.pack(">i", 2), # Partition 2
struct.pack(">h", 0), # No error
struct.pack(">i", 4), # Partition 4
struct.pack(">h", 0), # No error
])
results = KafkaProtocol.decode_offset_commit_response(encoded)
self.assertEqual(set(results), set([
OffsetCommitResponse(topic = b'topic1', partition = 2, error = 0),
OffsetCommitResponse(topic = b'topic1', partition = 4, error = 0),
]))
@unittest.skip('needs updating for new protocol classes')
def test_encode_offset_fetch_request(self):
header = b"".join([
struct.pack('>i', 69), # Total message length
struct.pack('>h', 9), # Message type = offset fetch
struct.pack('>h', 0), # API version
struct.pack('>i', 42), # Correlation ID
struct.pack('>h9s', 9, b"client_id"),# The client ID
struct.pack('>h8s', 8, b"group_id"), # The group to commit for
struct.pack('>i', 2), # Num topics
])
topic1 = b"".join([
struct.pack(">h6s", 6, b"topic1"), # Topic for the request
struct.pack(">i", 2), # Two partitions
struct.pack(">i", 0), # Partition 0
struct.pack(">i", 1), # Partition 1
])
topic2 = b"".join([
struct.pack(">h6s", 6, b"topic2"), # Topic for the request
struct.pack(">i", 1), # One partitions
struct.pack(">i", 2), # Partition 2
])
expected1 = b"".join([ header, topic1, topic2 ])
expected2 = b"".join([ header, topic2, topic1 ])
encoded = KafkaProtocol.encode_offset_fetch_request(b"client_id", 42, b"group_id", [
OffsetFetchRequest(b"topic1", 0),
OffsetFetchRequest(b"topic1", 1),
OffsetFetchRequest(b"topic2", 2),
])
self.assertIn(encoded, [ expected1, expected2 ])
@unittest.skip('needs updating for new protocol classes')
def test_decode_offset_fetch_response(self):
encoded = b"".join([
struct.pack(">i", 42), # Correlation ID
struct.pack(">i", 1), # One topics
struct.pack(">h6s", 6, b"topic1"),# First topic
struct.pack(">i", 2), # Two partitions
struct.pack(">i", 2), # Partition 2
struct.pack(">q", 4), # Offset 4
struct.pack(">h4s", 4, b"meta"), # Metadata
struct.pack(">h", 0), # No error
struct.pack(">i", 4), # Partition 4
struct.pack(">q", 8), # Offset 8
struct.pack(">h4s", 4, b"meta"), # Metadata
struct.pack(">h", 0), # No error
])
results = KafkaProtocol.decode_offset_fetch_response(encoded)
self.assertEqual(set(results), set([
OffsetFetchResponse(topic = b'topic1', partition = 2, offset = 4, error = 0, metadata = b"meta"),
OffsetFetchResponse(topic = b'topic1', partition = 4, offset = 8, error = 0, metadata = b"meta"),
]))
@contextmanager
def mock_create_message_fns(self):
import kafka.protocol
with patch.object(kafka.protocol.legacy, "create_message",
return_value=sentinel.message):
with patch.object(kafka.protocol.legacy, "create_gzip_message",
return_value=sentinel.gzip_message):
with patch.object(kafka.protocol.legacy, "create_snappy_message",
return_value=sentinel.snappy_message):
yield
def test_create_message_set(self):
messages = [(1, "k1"), (2, "k2"), (3, "k3")]
# Default codec is CODEC_NONE. Expect list of regular messages.
expect = [sentinel.message] * len(messages)
with self.mock_create_message_fns():
message_set = create_message_set(messages)
self.assertEqual(message_set, expect)
# CODEC_NONE: Expect list of regular messages.
expect = [sentinel.message] * len(messages)
with self.mock_create_message_fns():
message_set = create_message_set(messages, CODEC_NONE)
self.assertEqual(message_set, expect)
# CODEC_GZIP: Expect list of one gzip-encoded message.
expect = [sentinel.gzip_message]
with self.mock_create_message_fns():
message_set = create_message_set(messages, CODEC_GZIP)
self.assertEqual(message_set, expect)
# CODEC_SNAPPY: Expect list of one snappy-encoded message.
expect = [sentinel.snappy_message]
with self.mock_create_message_fns():
message_set = create_message_set(messages, CODEC_SNAPPY)
self.assertEqual(message_set, expect)
# Unknown codec should raise UnsupportedCodecError.
with self.assertRaises(UnsupportedCodecError):
create_message_set(messages, -1)
|
|
import os, sys
import numpy as np
import itertools
import glob
import random
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tensorflow as tf
import data
'''
data transformations
'''
class Identity:
def __call__(self, x, y):
return x, y
class Mat2Gray:
def __call__(self, x, y):
x = tf.expand_dims(x, -1)
return x, y
class Mat2RGB:
def __call__(self, x, y):
x = tf.expand_dims(x, -1)
x = tf.repeat(x, 3, -1)
return x, y
class RGB2Gray:
def __call__(self, x, y):
x = tf.image.rgb_to_grayscale(x)
return x, y
class Float:
def __call__(self, x, y):
x = tf.cast(x, tf.float32)/255.
return x, y
class Cast:
def __init__(self, x_type, y_type):
self.x_type = x_type
self.y_type = y_type
def __call__(self, x, y):
if self.x_type is not None:
x = tf.cast(x, self.x_type)
if self.y_type is not None:
y = tf.cast(y, self.y_type)
return x, y
class Scaling:
def __init__(self, scaling_factor):
self.scaling_factor = scaling_factor
def __call__(self, x, y):
x = x * self.scaling_factor
return x, y
class Resize:
def __init__(self, size):
self.hw = size
def __call__(self, x, y):
# try:
# x = tf.image.resize_with_pad(x, self.hw[0], self.hw[1])
# except tf.python.framework.errors_impl.InvalidArgumentError:
x = tf.image.resize(x, self.hw)
return x, y
class Pad:
def __init__(self, pad_size):
assert(pad_size[0]%2==0 and pad_size[1]%2==0)
self.pad_size = pad_size
def __call__(self, x, y):
padding = tf.constant([[self.pad_size[0]//2, self.pad_size[0]//2], [self.pad_size[1]//2, self.pad_size[1]//2], [0, 0]])
x = tf.pad(x, padding)
return x, y
class Map2Tuple:
def __call__(self, m, x_name='image', y_name='label'):
return m[x_name], m[y_name]
class File2Tensor:
def __init__(self, n_channels):
self.n_channels = n_channels
def _get_image(self, fn):
img = tf.io.read_file(fn)
img = tf.image.decode_jpeg(img, channels=self.n_channels)
return img
def _get_label(self, fn):
##TODO: assume a numeric label in the file path
label = tf.strings.to_number(tf.strings.split(fn, '/')[-2], tf.int32)
return label
def __call__(self, fn):
x = self._get_image(fn)
y = self._get_label(fn)
return x, y
class PerImageStd:
def __call__(self, x, y):
x = tf.image.per_image_standardization(x)
return x, y
class DomainLabel:
def __init__(self, domain_id):
self.domain_id = tf.constant(domain_id)
def __call__(self, x, y):
y = self.domain_id
return x, y
class Squeeze:
def __call__(self, x, y):
x = tf.squeeze(x, axis=0)
y = tf.squeeze(y, axis=0)
return x, y
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, x, y):
x = (x - self.mean) / self.std
return x, y
class PNGFile2LabeledExample:
def __init__(self, str2label, n_channels):
self.str2label = str2label
self.n_channels = n_channels
def __call__(self, fn, y):
x = tf.io.read_file(fn)
x = tf.image.decode_png(x, channels=self.n_channels)
return x, y
class JPGFile2LabeledExample:
def __init__(self, str2label, n_channels):
self.str2label = str2label
self.n_channels = n_channels
def __call__(self, fn, y):
x = tf.io.read_file(fn)
x = tf.image.decode_jpeg(x, channels=self.n_channels)
return x, y
class RandomCrop:
def __init__(self, size):
self.size = size
def __call__(self, x, y):
x = tf.image.random_crop(x, self.size)
return x, y
class RandomHorizontalFlip:
def __call__(self, x, y):
x = tf.image.random_flip_left_right(x)
return x, y
class CenterCrop:
def __init__(self, size, size_ori):
self.size = size
self.size_ori = size_ori
def __call__(self, x, y):
x = tf.image.central_crop(x, float(self.size)/float(self.size_ori))
return x, y
class DuplicateX:
def __call__(self, x, y):
x1 = tf.identity(x)
x2 = tf.identity(x)
return [x1, x2], y
"""
loaders
"""
class DataLoader:
def _init_loader(self, dataset, n, shuffle, batch_size, tforms=[]):
AUTOTUNE = tf.data.experimental.AUTOTUNE
dataset = dataset.cache()
if shuffle:
dataset = dataset.shuffle(n, reshuffle_each_iteration=True)
for tform in tforms:
dataset = dataset.map(tform, num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def _split_data(self, x, y, ratio, random=True, seed=0):
assert(x.shape[0] == y.shape[0])
idx = [i for i in range(y.shape[0])]
if random:
np.random.seed(seed)
np.random.shuffle(idx)
n1 = round(y.shape[0]*ratio)
x1, y1 = x[idx[:n1]], y[idx[:n1]]
x2, y2 = x[idx[n1:]], y[idx[n1:]]
return (x1, y1), (x2, y2)
class DataFolderLoader(DataLoader):
def __init__(self, root, batch_size, shuffle, tforms=[], ext='png', n_channels=3, seed=0):
self.root = root
self.fns = glob.glob(os.path.join(self.root, '**', '*.'+ext))
random.seed(seed)
random.shuffle(self.fns)
label_str = list(set([os.path.split(os.path.split(fn)[0])[1] for fn in self.fns]))
str2label = {k: i for i, k in enumerate(label_str)}
labels = [str2label[os.path.split(os.path.split(fn)[0])[1]] for fn in self.fns]
AUTOTUNE = tf.data.experimental.AUTOTUNE
self.dataset = tf.data.Dataset.from_tensor_slices((tf.constant(self.fns), labels))
self.dataset = self.dataset.cache()
if shuffle:
self.dataset = self.dataset.shuffle(len(self.fns), reshuffle_each_iteration=True)
if ext == 'png':
self.dataset = self.dataset.map(PNGFile2LabeledExample(str2label, n_channels), num_parallel_calls=AUTOTUNE)
else:
assert(ext == 'jpg')
self.dataset = self.dataset.map(JPGFile2LabeledExample(str2label, n_channels), num_parallel_calls=AUTOTUNE)
for tform in tforms:
self.dataset = self.dataset.map(tform, num_parallel_calls=AUTOTUNE)
self.dataset = self.dataset.batch(batch_size)
self.dataset = self.dataset.prefetch(AUTOTUNE)
def __len__(self):
return len(self.fns)
def __iter__(self):
return self.dataset.__iter__()
def __next__(self):
return self.datset.__next__()
if __name__ == '__main__':
ld = DataFolderLoader('Imagenet32/val', 100, True)
for x, y in ld:
print(x.shape, y.shape)
print(y)
class MultiSourceDataset(DataLoader):
def __init__(self, src_names, aug_params, batch_size, **kwargs):
## init source datasets
ds_list = []
for s, a in itertools.product(src_names, aug_params):
if a is None:
print("src: %s + none"%(s)) ##TODO: simplify
else:
print("src: %s + %s"%(s, " + ".join(a_param[0] for a_param in a)))
ds_list.append(
getattr(data, s)(
root=os.path.join('data', s.lower()),
batch_size=batch_size, # draw batch from each source
aug_list=a,
**kwargs)
)
## convert to multi-source loaders
self.train = tf.data.experimental.sample_from_datasets([d.train for d in ds_list])
self.val = tf.data.experimental.sample_from_datasets([d.val for d in ds_list])
self.test = tf.data.experimental.sample_from_datasets([d.test for d in ds_list])
if 'domain_id' in kwargs.keys() and kwargs['domain_id'] is not None:
self.train_dom = tf.data.experimental.sample_from_datasets([d.train_dom for d in ds_list])
self.val_dom = tf.data.experimental.sample_from_datasets([d.val_dom for d in ds_list])
self.test_dom = tf.data.experimental.sample_from_datasets([d.test_dom for d in ds_list])
# class MultiSourceDataset(DataLoader):
# def __init__(self, ds_list, batch_size, train_shuffle=True, val_shuffle=False, test_shuffle=False, buffer_size=1000):
# # self.train = self._init_loader(tf.data.experimental.sample_from_datasets([d.train for d in ds_list]), buffer_size, train_shuffle, batch_size, [Squeeze()])
# # self.val = self._init_loader(tf.data.experimental.sample_from_datasets([d.val for d in ds_list]), buffer_size, val_shuffle, batch_size, [Squeeze()])
# # self.test = self._init_loader(tf.data.experimental.sample_from_datasets([d.test for d in ds_list]), buffer_size, test_shuffle, batch_size, [Squeeze()])
# # ##TODO
# # sys.exit()
# self.train = tf.data.experimental.sample_from_datasets([d.train for d in ds_list])
# self.val = tf.data.experimental.sample_from_datasets([d.val for d in ds_list])
# self.test = tf.data.experimental.sample_from_datasets([d.test for d in ds_list])
class JointLoader:
def __init__(self, lds):
self.lds = lds
def __iter__(self):
self.iters = [iter(ld) for ld in self.lds]
self.iter_end = [False for ld in self.lds]
return self
def __next__(self):
x_list, y_list = [], []
for i, it in enumerate(self.iters):
try:
x, y = next(it)
except StopIteration:
self.iter_end[i] = True
if all(self.iter_end):
raise StopIteration
else:
self.iters[i] = iter(self.lds[i])
x, y = next(self.iters[i])
x_list.append(x)
y_list.append(y)
# maintain the same batch size
bs_min = min([o.shape[0] for o in x_list])
x_list = [o[:bs_min] for o in x_list]
x_list = tf.concat(x_list, 0)
y_list = [o[:bs_min] for o in y_list]
y_list = tf.concat(y_list, 0)
return x_list, y_list
class ChainLoader:
def __init__(self, ld1, ld2):
self.ld1 = ld1
self.ld2 = ld2
def __iter__(self):
self.iter = itertools.chain(self.ld1, self.ld2)
return self
def __next__(self):
return next(self.iter)
class DomainDataset(DataLoader):
def __init__(self, dsld_src, dsld_tar):
self.train = JointLoader([dsld_src.train_dom, dsld_tar.train_dom])
self.val = JointLoader([dsld_src.val_dom, dsld_tar.val_dom])
self.test = JointLoader([dsld_src.test_dom, dsld_tar.test_dom])
class DomainDataset_old(DataLoader):
def __init__(self, src_names, aug_params, tar, batch_size, buffer_size=1000,
train_shuffle=True, val_shuffle=False, test_shuffle=False,
train_aug=True, val_aug=False, test_aug=False,
**kwargs
):
## init source datasets
src_ds_list = []
for s, a in itertools.product(src_names, aug_params):
if a is None:
print("[dom] src: %s + none"%(s)) ##TODO: simplify
else:
print("[dom] src: %s + %s"%(s, " + ".join(a_param[0] for a_param in a)))
src_ds_list.append(
getattr(data, s)(
root=os.path.join('data', s.lower()),
batch_size=1, # draw one sample from each source
aug_list=a,
domain_id=1,
train_shuffle=train_shuffle, val_shuffle=val_shuffle, test_shuffle=test_shuffle,
train_aug=train_aug, val_aug=val_aug, test_aug=test_aug,
**kwargs
)
)
## init the target dataset
tar_ds = getattr(data, tar)(
root=os.path.join('data', tar.lower()),
batch_size=1,
domain_id=0,
train_shuffle=train_shuffle, val_shuffle=val_shuffle, test_shuffle=test_shuffle,
train_aug=False, val_aug=False, test_aug=False,
**kwargs
)
## init loaders
train = [d.train_dom for d in src_ds_list] + [tar_ds.train_dom]
val = [d.val_dom for d in src_ds_list] + [tar_ds.val_dom]
test = [d.test_dom for d in src_ds_list] + [tar_ds.test_dom]
self.train = self._init_loader(tf.data.experimental.sample_from_datasets(train),
buffer_size, train_shuffle, batch_size, [Squeeze()])
self.val = self._init_loader(tf.data.experimental.sample_from_datasets(val),
buffer_size, val_shuffle, batch_size, [Squeeze()])
self.test = self._init_loader(tf.data.experimental.sample_from_datasets(test),
buffer_size, test_shuffle, batch_size, [Squeeze()])
# class DomainDataset(DataLoader):
# def __init__(self, src_list, tar, batch_size, train_shuffle=True, val_shuffle=False, test_shuffle=False, buffer_size=1000):
# raise NotImplementedError
# train = [d.train_dom for d in src_list] + [tar.train_dom]
# val = [d.val_dom for d in src_list] + [tar.val_dom]
# test = [d.test_dom for d in src_list] + [tar.test_dom]
# weights = [0.5/len(src_list)]*len(src_list) + [0.5]
# self.train = self._init_loader(tf.data.experimental.sample_from_datasets(train, weights=weights),
# buffer_size, train_shuffle, batch_size, [Squeeze()])
# self.val = self._init_loader(tf.data.experimental.sample_from_datasets(val, weights=weights),
# buffer_size, val_shuffle, batch_size, [Squeeze()])
# self.test = self._init_loader(tf.data.experimental.sample_from_datasets(test, weights=weights),
# buffer_size, test_shuffle, batch_size, [Squeeze()])
def rot_gaussian(rot, mu, cov):
rot_rad = np.deg2rad(rot)
R = np.array([[np.cos(rot_rad), -np.sin(rot_rad)], [np.sin(rot_rad), np.cos(rot_rad)]])
mu_rot = np.transpose(np.matmul(R, np.transpose(mu)))
cov_rot = np.matmul(np.matmul(R, cov), np.transpose(R))
return mu_rot, cov_rot
"""
plot
"""
def plot_data(x, y, markers, colors, alphas, labels, facecolors=None, fn=None, markersize=2, linewidth=1, w=None, classifier=None):
#y_id = np.unique(y)
y_id = np.arange(0, y.max()+1)
assert(len(y_id) == len(markers) == len(alphas))
if facecolors is None:
facecolors = colors
plt.figure(1)
plt.clf()
## plot data
hs = []
for y_i, m_i, c_i, fc_i, a_i, l_i in zip(y_id, markers, colors, facecolors, alphas, labels):
x_i = x[y==y_i]
h = plt.plot(x_i[:, 0], x_i[:, 1], m_i, alpha=a_i, markerfacecolor=fc_i, markeredgecolor=c_i, markersize=markersize, linewidth=linewidth, label=l_i)
hs.append(h[0])
## plot decision
if classifier is not None:
X, Y = np.meshgrid(
np.linspace(-2.5, 2.5, 100),
np.linspace(-2.5, 2.5, 100))
XY = np.concatenate((X.flatten()[..., np.newaxis], Y.flatten()[..., np.newaxis]), 1)
P = classifier(XY)
Z = P[:, 1]
Z = np.reshape(Z, X.shape)
plt.contourf(X, Y, Z, alpha=0.3, cmap='RdYlGn')
plt.colorbar()
## plot targetness
if w is not None:
X, Y = np.meshgrid(
np.linspace(-2.5, 2.5, 100),
np.linspace(-2.5, 2.5, 100))
XY = np.concatenate((X.flatten()[..., np.newaxis], Y.flatten()[..., np.newaxis]), 1)
W = w(tf.constant(XY, dtype=tf.float32))
W = np.reshape(W.numpy(), X.shape)
Z = W / (1+W)
Z = ((1 - 2*np.abs(Z-0.5)) > 1e-3).astype(np.float32)
#fig, ax = plt.subplots(1,1)
plt.contourf(X, Y, Z, alpha=0.5, zorder=5)
plt.colorbar()
## beautify
plt.grid('on')
plt.gca().set_aspect('equal')
plt.xlim((-2.5, 2.5))
plt.ylim((-2.5, 2.5))
plt.legend(handles=hs)
if fn is not None:
plt.savefig(fn+'.png', bbox_inches='tight')
plt.close()
def plot_targetness(w, alpha=0.7, fn=None, fig_id=None, close=True):
overlay = True if fig_id is not None else False
X, Y = np.meshgrid(
np.linspace(-2.5, 2.5, 100),
np.linspace(-2.5, 2.5, 100))
XY = np.concatenate((X.flatten()[..., np.newaxis], Y.flatten()[..., np.newaxis]), 1)
W = w(tf.constant(XY, dtype=tf.float32))
W = np.reshape(W.numpy(), X.shape)
Z = W / (1+W)
if overlay:
plt.figure(fig_id)
else:
plt.figure(1)
plt.clf()
fig, ax = plt.subplots(1,1)
cp = ax.contourf(X, Y, Z, alpha=alpha, zorder=10)
fig.colorbar(cp)
if fn is not None:
plt.savefig(fn+'.png')
if close:
plt.close()
def shuffle_labeled_examples(x, y, seed=None):
assert(x.shape[0] == y.shape[0])
n = y.shape[0]
if seed is None:
seed = int(time.time())
np.random.seed(seed)
i_rnd = np.random.permutation(n)
x, y = x[i_rnd], y[i_rnd]
return x, y
|
|
import traceback
import xml.dom.minidom
from spitfire.compiler.ast import *
import spitfire.compiler.util
enable_debug = False
def debug(func_name, dom_node):
if not enable_debug:
return
if dom_node.attributes:
print func_name, dom_node.nodeName, dom_node.attributes.keys()
else:
print func_name, dom_node.nodeName
class XHTML2AST(object):
namespace = 'py'
attr_op_namespace = 'pyattr'
def build_template(self, filename):
f = open(filename)
data = f.read().decode('utf8')
f.close()
return self.parse(data)
def parse(self, src_text):
dom = xml.dom.minidom.parseString(src_text)
template = TemplateNode()
template.extend(self.build_ast(dom))
return template
def build_ast(self, dom_node):
debug('build_ast', dom_node)
node_list = []
if dom_node.attributes:
# the key types have a precedence that needs to be preserved
# www.zope.org/Documentation/Books/ZopeBook/2_6Edition/AppendixC.stx
# since this is also how we scan the tree, on-error is included
# fixme: content/replace are mutually exclusive, that should generate an
# error
# the thing is, the way we process things is a little complicated, so
# the order is actually different - we might handle something like
# omit-tag early on, but really only apply it's implications later on
op_precedence = [
'omit-tag',
'define',
'condition',
'repeat',
'content',
'content-html',
'replace',
'replace-html',
'attributes',
'on-error',
]
# some of these operations can alter the output stream (most of them
# really) - also, some don't exactly make sense to be on the same object
# as a repeat - for instance, repeat->replace, whereas repeat->attributes
# makes more sense
# fixme: do I need keys() here? also, i think that attribute can be None
attr_name_list = dom_node.attributes.keys()
processed_any_op = False
for op in op_precedence:
op_attr_name = '%s:%s' % (self.namespace, op)
if dom_node.hasAttribute(op_attr_name): # in attr_name_list:
op_handler = 'handle_%s' % op
op_handler = op_handler.replace('-', '_')
# print "op_handler:", op_handler, dom_node.nodeName, dom_node.attributes.keys(), processed_any_op
node_list.extend(getattr(self, op_handler)(dom_node, op_attr_name))
processed_any_op = True
# process attribute namespace
attr_output_ast = []
attr_prune_list = []
# this is horribly un-pythonic - i'm having Java flashbacks
for i in xrange(dom_node.attributes.length):
attr = dom_node.attributes.item(i)
if attr.prefix == self.attr_op_namespace:
attr_prune_list.append(attr.localName)
attr_prune_list.append('%s:%s' % (self.attr_op_namespace,
attr.localName))
attr_output_ast.extend(self.make_attr_node(attr))
# print "attr_handler:", attr.prefix, attr.localName
#processed_any_op = True
for attr_name in attr_prune_list:
try:
dom_node.removeAttribute(attr_name)
except xml.dom.NotFoundErr:
print "ignoring missing", attr_name
if not processed_any_op:
node_list.extend(self.handle_default(dom_node,
attr_ast=attr_output_ast))
else:
node_list.extend(self.handle_default(dom_node))
#for child in dom_node.childNodes:
# node_list.extend(self.build_ast(child))
return node_list
# attr_ast - allow injecting some ast nodes
# fixme: feels like it could have a cleaner API
def handle_default(self, dom_node, attr_ast=None):
debug('handle_default', dom_node)
node_list = []
if dom_node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
node_list.extend(self.make_tag_node(dom_node, attr_ast=attr_ast))
for child in dom_node.childNodes:
node_list.extend(self.build_ast(child))
node_list.extend(self.make_tag_node(dom_node, close=True))
elif dom_node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
node_list.append(TextNode(dom_node.nodeValue))
elif dom_node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
# node_list.append(TextNode(dom_node.nodeValue))
pass
elif dom_node.nodeType == xml.dom.minidom.Node.DOCUMENT_NODE:
for child in dom_node.childNodes:
node_list.extend(self.build_ast(child))
elif dom_node.nodeType == xml.dom.minidom.Node.PROCESSING_INSTRUCTION_NODE:
if dom_node.nodeName == 'py-doctype':
node_list.append(TextNode(dom_node.nodeValue))
else:
raise Exception("unexepected processing instruction: %s" % dom_node)
else:
raise Exception("unexepected node type: %s" % dom_node.nodeType)
return node_list
def make_tag_node(self, dom_node, close=False, attr_ast=None):
debug("make_tag_node", dom_node)
node_list = []
node_name = dom_node.nodeName
if close:
if self.has_child_stuff(dom_node):
node_list.append(TextNode(u'</%(node_name)s>' % vars()))
else:
attr_text = ' '.join(['%s="%s"' % (key, value)
for key, value in dom_node.attributes.items()
if not key.startswith('py:')])
# fixme: this is starting to look fugly - hard to maintain and error prone
if self.has_child_stuff(dom_node):
if attr_text:
if attr_ast:
node_list.append(TextNode(u'<%(node_name)s %(attr_text)s' % vars()))
node_list.extend(attr_ast)
node_list.append(TextNode(u'>'))
else:
node_list.append(TextNode(u'<%(node_name)s %(attr_text)s>' % vars()))
else:
if attr_ast:
node_list.append(TextNode(u'<%(node_name)s' % vars()))
node_list.extend(attr_ast)
node_list.append(TextNode(u'>'))
else:
node_list.append(TextNode(u'<%(node_name)s>' % vars()))
else:
if attr_text:
if attr_ast:
# print "XXX make_tag_node", dom_node.nodeName, attr_ast
node_list.append(TextNode(u'<%(node_name)s %(attr_text)s' % vars()))
node_list.extend(attr_ast)
node_list.append(TextNode(u' />'))
else:
node_list.append(TextNode(u'<%(node_name)s %(attr_text)s />' % vars()))
else:
if attr_ast:
node_list.append(TextNode(u'<%(node_name)s' % vars()))
node_list.extend(attr_ast)
node_list.append(TextNode(u' />'))
else:
node_list.append(TextNode(u'<%(node_name)s />' % vars()))
omit_tag = getattr(dom_node, 'omit_tag', False)
omit_tag_ast = getattr(dom_node, 'omit_tag_ast', None)
if omit_tag:
if omit_tag_ast:
if_node = IfNode(omit_tag_ast)
if_node.extend(node_list)
return [if_node]
else:
return []
return node_list
def make_attr_node(self, attr):
node_list = []
new_attr_name = attr.localName
attr_ast = spitfire.compiler.util.parse(attr.nodeValue, 'rhs_expression')
node_list.append(TextNode(u' %(new_attr_name)s="' % vars()))
# fixme: need to guarantee good output - escape sequences etc
node_list.append(PlaceholderSubstitutionNode(attr_ast))
node_list.append(TextNode('"'))
return node_list
def handle_define(self, dom_node, attr_name):
node_list = []
node_name = dom_node.nodeName
# print "handle_define", node_name
# fixme: this is a nasty temp hack, it will generate the correct code
# for 1 define, but multiple expressions won't work
ast = spitfire.compiler.util.parse(dom_node.getAttribute(attr_name),
'argument_list')
dom_node.removeAttribute(attr_name)
node_list.extend(ast)
node_list.extend(self.build_ast(dom_node))
return node_list
def handle_content(self, dom_node, attr_name):
debug("handle_content", dom_node)
#traceback.print_stack()
expr_ast = spitfire.compiler.util.parse(
dom_node.getAttribute(attr_name), 'rhs_expression')
dom_node.removeAttribute(attr_name)
setattr(dom_node, 'has_child_stuff', True)
node_list = []
debug("handle_content start", dom_node)
node_list.extend(self.make_tag_node(dom_node))
node_list.append(PlaceholderSubstitutionNode(expr_ast))
debug("handle_content end", dom_node)
node_list.extend(self.make_tag_node(dom_node, close=True))
debug("handle_content return", dom_node)
return node_list
def handle_omit_tag(self, dom_node, attr_name):
debug("handle_omit_tag", dom_node)
node_list = []
node_name = dom_node.nodeName
raw_expression = dom_node.getAttribute(attr_name)
if raw_expression:
ast = spitfire.compiler.util.parse(raw_expression, 'argument_list')
else:
ast = None
dom_node.removeAttribute(attr_name)
setattr(dom_node, 'omit_tag', True)
setattr(dom_node, 'omit_tag_ast', ast)
return node_list
def handle_replace(self, dom_node, attr_name):
expr_ast = spitfire.compiler.util.parse(
dom_node.getAttribute(attr_name), 'rhs_expression')
dom_node.removeAttribute(attr_name)
return [PlaceholderSubstitutionNode(expr_ast)]
def has_child_stuff(self, dom_node):
if getattr(dom_node, 'has_child_stuff', False):
return True
has_child_stuff = False
for attr_name in ('py:content', 'py:replace',):
if dom_node.hasAttribute(attr_name):
has_child_stuff = True
break
else:
has_child_stuff = bool(dom_node.childNodes)
setattr(dom_node, 'has_child_stuff', has_child_stuff)
return has_child_stuff
def handle_repeat(self, dom_node, attr_name):
debug("handle_repeat", dom_node)
expr_pieces = dom_node.getAttribute(attr_name).split()
dom_node.removeAttribute(attr_name)
target = expr_pieces[0]
expr_ast = spitfire.compiler.util.parse(
' '.join(expr_pieces[1:]), 'rhs_expression')
node_list = []
# hack - assumes python syntax
fn = ForNode(
TargetListNode([IdentifierNode("self.repeat['%s']" % target),
IdentifierNode(target)]),
ExpressionListNode([CallFunctionNode(IdentifierNode('enumerate'),
ArgListNode([expr_ast]))]))
if self.has_child_stuff(dom_node):
debug("has_child_stuff:", dom_node)
fn.extend(self.build_ast(dom_node))
#fn.append(self.make_tag_node(dom_node))
#for n in dom_node.childNodes:
# fn.extend(self.build_ast(n))
else:
# print "no children"
fn.extend(self.build_ast(dom_node))
if (dom_node.previousSibling and
dom_node.previousSibling.nodeType == xml.dom.minidom.Node.TEXT_NODE and
not dom_node.previousSibling.nodeValue.strip()):
# inject the previous whitespace sibling to keep the output looking ok
# fixme: a conditional is probably required here - you only want to
# execute this if it's not the last execution of the loop
fn.prepend(self.build_ast(dom_node.previousSibling))
# now remove the previous sibling
#print "node", dom_node
#print "parent", dom_node.parentNode
#print "previous", dom_node.previousSibling, id(dom_node.previousSibling)
#print "next", dom_node.nextSibling, id(dom_node.nextSibling)
#dom_node.parentNode.removeChild(dom_node.previousSibling)
node_list.append(EatPrevious())
node_list.append(fn)
#fn.extend(self.make_tag_node(dom_node, close=True))
return node_list
def handle_condition(self, dom_node, attr_name):
expr_ast = spitfire.compiler.util.parse(
dom_node.getAttribute(attr_name), 'rhs_expression')
node_list = []
if_node = IfNode(expr_ast)
node_list.append(if_node)
if_node.append(self.make_tag_node(dom_node))
for n in dom_node.childNodes:
if_node.extend(self.build_ast(n))
if_node.extend(self.make_tag_node(dom_node, close=True))
return node_list
def build_udn_path_ast(self, path):
pieces = path.split('.')
node = PlaceholderNode(pieces[0])
for piece in pieces[1:]:
node = GetUDNNode(node, piece)
return node
if __name__ == '__main__':
import sys
import spitfire.compiler.util
x2a = XHTML2AST()
filename = sys.argv[1]
tnode = x2a.build_template(filename)
print tnode
classname = spitfire.compiler.util.filename2classname(filename)
src = spitfire.compiler.util.compile_ast(tnode, classname)
print src
module = spitfire.compiler.util.load_module_from_src(src, '<none>', classname)
tclass = getattr(module, classname)
d = {
'test_x': 'x var',
'test_y': 'y var',
'test_z': 'z var',
'test_number_list': [1, 2, 3, 4, 5],
'test_object_list': [{'id': 1, 'name': 'o1'},
{'id': 2, 'name': 'o2'},
{'id': 3, 'name': 'o3'},
],
'test_dict': {'key1': 1},
'test_whitespaced_dict': {'key 1': 1},
'test_range': range,
'content_type': 'test/spitfire',
}
print tclass(search_list=[d]).main()
|
|
#!/usr/bin/env python3
from __future__ import unicode_literals
import subprocess
import sys
import threading
from time import sleep
import re
from texttable import Texttable
from pyfeld.pingTest import ping_test_alive
try:
from pyfeld.rfcmd import RfCmd
except:
pass
sshcmd = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@"
scpcmd = "scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "
#class RFMacroCommand:
class UpdateProcessesFreeToKill:
def __init__(self):
self.processList = list()
def runCommand(self, cmd):
try:
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.processList.append(process)
except Exception as e:
return 0
def killall(self):
for proc in self.processList:
proc.kill()
def retrieve(cmd):
try:
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
return 0
lines = ""
while True:
nextline = process.stdout.readline()
if len(nextline) == 0 and process.poll() != None:
break
lines += nextline.decode('utf-8')
return lines
def get_ips():
RfCmd.discover()
result = RfCmd.get_device_ips(False, 'list')
return result
def show_pretty_versions():
result_list = list()
header_list = ["IP", "Role", "Version", "Name", "Streamcast version", ]
result_list.append(header_list)
print("Versions installed:")
ips = get_ips()
for ip in ips:
line = retrieve(sshcmd + ip + " cat /var/raumfeld-1.0/device-role.json")
if "true" in line:
moreinfo = "host"
else:
moreinfo = "slave"
renderer_name = RfCmd.get_device_name_by_ip(ip)
line = retrieve(sshcmd + ip + " cat /etc/raumfeld-version")
line_streamcast = retrieve(sshcmd + ip + " streamcastd --version")
single_result = list()
single_result.append(ip)
single_result.append(moreinfo)
single_result.append(line.rstrip())
single_result.append(renderer_name)
single_result.append(line_streamcast.rstrip())
result_list.append(single_result)
t = Texttable(250)
t.add_rows(result_list)
print(t.draw())
def show_versions():
print("Versions installed:")
ips = get_ips()
for ip in ips:
line = retrieve(sshcmd+ip+" cat /var/raumfeld-1.0/device-role.json")
if "true" in line:
moreinfo = "host"
else:
moreinfo = "slave"
renderer_name = RfCmd.get_device_name_by_ip(ip)
line = retrieve(sshcmd+ip+" cat /etc/raumfeld-version")
line_streamcast = retrieve(sshcmd+ip+" streamcastd --version")
print(ip + "\t" + moreinfo + "\t" + line.rstrip() + "\t" + line_streamcast.rstrip() + "\t" + str(renderer_name))
def clean_host_keys():
print("cleaning host_keys:")
ips = get_ips()
for ip in ips:
line = retrieve("ssh-keygen -R "+ip)
print(ip + ":\t" + line.rstrip())
def single_device_update(free_to_kill, ip, url):
cmd = sshcmd + ip + " raumfeld-update --force " + url
print("running cmd: "+cmd)
free_to_kill.runCommand(cmd)
def force_update(url):
print("Force updating with url " + url)
ips = get_ips()
processes = list()
device_pingable = dict()
free_to_kill = UpdateProcessesFreeToKill()
count = 0
for ip in ips:
proc = threading.Thread(target=single_device_update, args=(free_to_kill, ip, url))
proc.start()
processes.append(proc)
device_pingable[ip] = True
count += 1
temp_count = count
print("Waiting for action...")
sleep(5)
while count > 0:
sleep(10)
print("")
for ip in ips:
if device_pingable[ip]:
print("testing if ping alive: " + ip + " " + str(RfCmd.map_ip_to_friendly_name(ip)))
if not ping_test_alive(ip):
device_pingable[ip] = False
count -= 1
count = temp_count
print("Rebooting in progress...")
while count > 0:
sleep(10)
print("")
for ip in ips:
if not device_pingable[ip]:
print("testing if ping reborn: " + ip + " " + str(RfCmd.map_ip_to_friendly_name(ip)))
if ping_test_alive(ip):
device_pingable[ip] = True
count -= 1
print("done updating shells. Leaving the houses now.")
free_to_kill.killall()
for proc in processes:
proc.join()
print("Processes joined joyfully")
def single_device_command(ip, cmd):
cmd = sshcmd + ip + " " + cmd
print("running cmd on device {0}: {1}".format(ip, cmd))
lines = retrieve(cmd)
print("result from {0}".format(ip))
print(lines)
def ssh_command(cmd):
print("Send command to all devices: " + cmd)
ips = get_ips()
processes = list()
for ip in ips:
proc = threading.Thread(target=single_device_command, args=(ip, cmd))
proc.start()
processes.append(proc)
for proc in processes:
proc.join()
def scp_up_file(local_file, target_location):
print("Copy file:")
ips = get_ips()
for ip in ips:
line = retrieve(scpcmd+" {1} root@{0}:{2}".format(ip, local_file, target_location))
print(ip + ":\t" + line.rstrip())
def scp_down_file(remote_file, target_file):
print("Copy file:")
ips = get_ips()
for ip in ips:
line = retrieve(scpcmd+" root@{0}:{1} {2}".format(ip, remote_file, target_file))
print(ip + ":\t" + line.rstrip())
def usage(argv):
print("Usage: {0} COMMAND [params]".format(argv[0]))
print("Execute macrocommands over ssh for interacting with raumfeld if you got many devices, these need SSH access allowed")
print("COMMAND may be one of the following")
print("version show versions")
print("update <URL> force update")
print("ssh <command> any shell available on device, command in quotes")
print("upload <file> <target> copy a file to a target location")
print("download <file> <target> copy a file from device to target")
print("")
print("clean-hostkeys clean all host keys to avoid security messages")
'''
print("#you might add a file ~/.ssh/config with following content")
print("Host * (or 192.168.*")
print("StrictHostKeyChecking no")
'''
def run_macro(argv):
if len(argv) < 2:
usage(argv)
sys.exit(2)
command = argv[1]
if command == 'version':
show_pretty_versions()
elif command == 'update':
force_update(argv[2])
elif command == 'ssh':
ssh_command(" ".join(argv[2:]))
elif command == 'upload':
scp_up_file(argv[2], argv[3])
elif command == 'download':
scp_down_file(argv[2], argv[3])
elif command == 'clean-hostkeys':
clean_host_keys()
else:
print("Unknown command {0}".format(command))
usage(argv)
def run_main():
run_macro(sys.argv)
if __name__ == "__main__":
run_main()
|
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pybuilder import pluginloader
from pybuilder.errors import MissingPluginException, IncompatiblePluginException, UnspecifiedPluginNameException
from pybuilder import pip_common
from pybuilder.pip_utils import PIP_EXEC_STANZA
from pybuilder.pluginloader import (BuiltinPluginLoader,
DispatchingPluginLoader,
DownloadingPluginLoader,
_install_external_plugin,
_check_plugin_version)
from test_utils import patch, Mock, ANY
class PluginVersionCheckTest(unittest.TestCase):
def setUp(self):
self.old_pyb_version = pluginloader.PYB_VERSION
def tearDown(self):
pluginloader.PYB_VERSION = self.old_pyb_version
def test_version_exact_match(self):
plugin_module = Mock()
pluginloader.PYB_VERSION = pip_common.Version("1.2.3")
plugin_module.pyb_version = "===1.2.3"
_check_plugin_version(plugin_module, "test plugin")
def test_version_compatible_match(self):
plugin_module = Mock()
pluginloader.PYB_VERSION = pip_common.Version("1.2.3")
plugin_module.pyb_version = "~=1.2"
_check_plugin_version(plugin_module, "test plugin")
def test_version_multiple_specifier_match(self):
plugin_module = Mock()
pluginloader.PYB_VERSION = pip_common.Version("1.2.3")
plugin_module.pyb_version = ">=1.2.0,<=1.2.4"
_check_plugin_version(plugin_module, "test plugin")
def test_version_no_match(self):
plugin_module = Mock()
pluginloader.PYB_VERSION = pip_common.Version("1.2.3")
plugin_module.pyb_version = ">=1.2.5"
self.assertRaises(IncompatiblePluginException, _check_plugin_version, plugin_module, "test plugin")
class DownloadingPluginLoaderTest(unittest.TestCase):
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_download_module_from_pypi(self, install, load):
logger = Mock()
project = Mock()
project.get_property.side_effect = lambda x: "index_url" if x == "install_dependencies_index_url" \
else "extra_index_url" if x == "install_dependencies_extra_index_url" else None
load.side_effect = (MissingPluginException("external_plugin"), Mock())
DownloadingPluginLoader(logger).load_plugin(project, "pypi:external_plugin")
install.assert_called_with(project, "pypi:external_plugin", None, logger, None)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_load_module_after_downloading_with_pypi_when_download_succeeds(self, _, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
plugin = downloader.load_plugin(project, "pypi:external_plugin")
load.assert_called_with("external_plugin", "pypi:external_plugin")
self.assertEqual(plugin, load.return_value)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_not_load_module_twice_after_downloading_when_pypi_download_fails(self, install, load):
install.side_effect = MissingPluginException("PyPI Install Boom")
load.side_effect = MissingPluginException("PyPI Load Boom")
downloader = DownloadingPluginLoader(Mock())
self.assertRaises(MissingPluginException, downloader.load_plugin, Mock(), "pypi:external_plugin")
self.assertEqual(load.call_count, 1)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_not_load_module_twice_after_downloading_when_vcs_download_fails(self, install, load):
install.side_effect = MissingPluginException("VCS Install BOOM")
load.side_effect = MissingPluginException("VCS Load Boom")
downloader = DownloadingPluginLoader(Mock())
self.assertRaises(MissingPluginException, downloader.load_plugin, Mock(), "vcs:external_plugin URL",
plugin_module_name="vcs_module_name")
self.assertEqual(load.call_count, 1)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_fail_with_vcs_and_no_module_name(self, install, load):
install.side_effect = MissingPluginException("VCS BOOM")
downloader = DownloadingPluginLoader(Mock())
self.assertRaises(UnspecifiedPluginNameException, downloader.load_plugin, Mock(), "vcs:external_plugin URL")
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_fail_with_vcs_when_no_plugin_module_specified(self, _, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
self.assertRaises(UnspecifiedPluginNameException, downloader.load_plugin, project, "vcs:external_plugin URL")
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_load_module_after_downloading_with_vcs_when_download_succeeds(self, _, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
plugin = downloader.load_plugin(project, "vcs:external_plugin URL", plugin_module_name="external_plugin_module")
load.assert_called_with("external_plugin_module", "vcs:external_plugin URL")
self.assertEqual(plugin, load.return_value)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_raise_exception_when_requiring_plugin_and_plugin_is_not_found(self, _, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
load.side_effect = MissingPluginException("Load boom")
self.assertRaises(MissingPluginException, downloader.load_plugin, project, "spam")
load.assert_called_with("spam", "spam")
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_import_plugin_when_requiring_plugin_and_plugin_is_found_as_third_party(self, install, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
load.return_value = Mock()
self.assertEqual(load.return_value, downloader.load_plugin(project, "spam"))
install.assert_not_called()
self.assertEqual(install.call_count, 0)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_force_reinstall_vcs_plugin_before_first_loading_attempt(self, install, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
load.return_value = Mock()
self.assertEqual(load.return_value, downloader.load_plugin(project, "vcs:spam", plugin_module_name="spam"))
install.assert_called_with(project, "vcs:spam", None, downloader.logger, "spam", False, True)
self.assertEqual(install.call_count, 1)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_update_pypi_plugin_with_non_exact_version_before_first_loading_attempt(self, install, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
load.return_value = Mock()
self.assertEqual(load.return_value, downloader.load_plugin(project, "pypi:spam", ">1.2"))
install.assert_called_with(project, "pypi:spam", ">1.2", downloader.logger, None, True, False)
self.assertEqual(install.call_count, 1)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_update_pypi_plugin_with_compound_non_exact_version_before_first_loading_attempt(self, install,
load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
load.return_value = Mock()
self.assertEqual(load.return_value, downloader.load_plugin(project, "pypi:spam", ">1.2,==1.4"))
install.assert_called_with(project, "pypi:spam", ">1.2,==1.4", downloader.logger, None, True, False)
self.assertEqual(install.call_count, 1)
@patch("pybuilder.pluginloader._load_plugin")
@patch("pybuilder.pluginloader._install_external_plugin")
def test_should_not_update_pypi_plugin_with_exact_version_before_first_loading_attempt(self, install, load):
project = Mock()
downloader = DownloadingPluginLoader(Mock())
plugin = Mock()
load.side_effect = (MissingPluginException("no spam installed"), plugin)
self.assertEqual(plugin, downloader.load_plugin(project, "pypi:spam", "===1.4"))
install.assert_called_with(project, "pypi:spam", "===1.4", downloader.logger, None)
self.assertEqual(install.call_count, 1)
class InstallExternalPluginTests(unittest.TestCase):
def test_should_raise_error_when_protocol_is_invalid(self):
self.assertRaises(MissingPluginException, _install_external_plugin, Mock(), "some-plugin", None, Mock(), None)
@patch("pybuilder.pluginloader.read_file")
@patch("pybuilder.pluginloader.tempfile")
@patch("pybuilder.pip_utils.execute_command")
def test_should_install_plugin(self, execute, tempfile, read_file):
read_file.return_value = ["no problems", "so far"]
execute.return_value = 0
tempfile.NamedTemporaryFile().__enter__().name.__eq__.return_value = True
_install_external_plugin(Mock(), "pypi:some-plugin", None, Mock(), None)
execute.assert_called_with(
PIP_EXEC_STANZA + ['install', '--index-url', ANY, '--extra-index-url', ANY, '--trusted-host', ANY,
'some-plugin'], shell=False, outfile_name=ANY, error_file_name=ANY, cwd=".", env=ANY)
@patch("pybuilder.pluginloader.read_file")
@patch("pybuilder.pluginloader.tempfile")
@patch("pybuilder.pip_utils.execute_command")
def test_should_install_plugin_with_version(self, execute, tempfile, read_file):
read_file.return_value = ["no problems", "so far"]
execute.return_value = 0
tempfile.NamedTemporaryFile().__enter__().name.__eq__.return_value = True
_install_external_plugin(Mock(), "pypi:some-plugin", "===1.2.3", Mock(), None)
execute.assert_called_with(
PIP_EXEC_STANZA + ['install', '--index-url', ANY, '--extra-index-url', ANY, '--trusted-host', ANY] +
(["--upgrade"] if pip_common.pip_version < "9.0" else ["--upgrade", "--upgrade-strategy", "only-if-needed"])
+ ['some-plugin===1.2.3'], shell=False, outfile_name=ANY, error_file_name=ANY, cwd=".", env=ANY)
@patch("pybuilder.pluginloader.read_file")
@patch("pybuilder.pluginloader.tempfile")
@patch("pybuilder.pip_utils.execute_command")
def test_should_install_plugin_with_vcs(self, execute, tempfile, read_file):
read_file.return_value = ["no problems", "so far"]
execute.return_value = 0
tempfile.NamedTemporaryFile().__enter__().name.__eq__.return_value = True
_install_external_plugin(Mock(), "vcs:some-plugin URL", None, Mock(), None)
execute.assert_called_with(
PIP_EXEC_STANZA + ['install', '--index-url', ANY, '--extra-index-url', ANY, '--trusted-host', ANY,
'--force-reinstall', 'some-plugin URL'], shell=False, outfile_name=ANY,
error_file_name=ANY, cwd=".", env=ANY)
@patch("pybuilder.pluginloader.read_file")
@patch("pybuilder.pluginloader.tempfile")
@patch("pybuilder.pip_utils.execute_command")
def test_should_install_plugin_with_vcs_and_version(self, execute, tempfile, read_file):
read_file.return_value = ["no problems", "so far"]
execute.return_value = 0
tempfile.NamedTemporaryFile().__enter__().name.__eq__.return_value = True
_install_external_plugin(Mock(), "vcs:some-plugin URL", "===1.2.3", Mock(), None)
execute.assert_called_with(
PIP_EXEC_STANZA + ['install', '--index-url', ANY, '--extra-index-url', ANY, '--trusted-host', ANY,
'--force-reinstall', 'some-plugin URL'], shell=False, outfile_name=ANY,
error_file_name=ANY, cwd=".", env=ANY)
@patch("pybuilder.pluginloader.read_file")
@patch("pybuilder.pluginloader.tempfile")
@patch("pybuilder.pip_utils.execute_command")
def test_should_raise_error_when_install_from_pypi_fails(self, execute, tempfile, read_file):
read_file.return_value = ["something", "went wrong"]
execute.return_value = 1
tempfile.NamedTemporaryFile().__enter__().name.__eq__.return_value = True
self.assertRaises(MissingPluginException, _install_external_plugin, Mock(), "pypi:some-plugin", None, Mock(),
None)
@patch("pybuilder.pluginloader.read_file")
@patch("pybuilder.pluginloader.tempfile")
@patch("pybuilder.pip_utils.execute_command")
def test_should_raise_error_when_install_from_vcs_fails(self, execute, tempfile, read_file):
read_file.return_value = ["something", "went wrong"]
execute.return_value = 1
tempfile.NamedTemporaryFile().__enter__().name.__eq__.return_value = True
self.assertRaises(MissingPluginException, _install_external_plugin, Mock(), "vcs:some VCS URL", None, Mock(),
None)
class BuiltinPluginLoaderTest(unittest.TestCase):
def setUp(self):
self.project = Mock()
self.loader = BuiltinPluginLoader(Mock())
@patch("pybuilder.pluginloader._load_plugin")
def test_should_raise_exception_when_requiring_plugin_and_plugin_is_not_found(self, load):
load.side_effect = MissingPluginException("pybuilder.plugins.spam_plugin")
self.assertRaises(MissingPluginException, self.loader.load_plugin, self.project, "spam")
load.assert_called_with("pybuilder.plugins.spam_plugin", "spam")
@patch("pybuilder.pluginloader._load_plugin")
def test_should_import_plugin_when_requiring_plugin_and_plugin_is_found_as_builtin(self, load):
load.return_value = Mock()
plugin_module = self.loader.load_plugin(self.project, "spam")
load.assert_called_with("pybuilder.plugins.spam_plugin", "spam")
self.assertEqual(load.return_value, plugin_module)
class DispatchingPluginLoaderTest(unittest.TestCase):
def setUp(self):
self.project = Mock()
self.first_delegatee = Mock()
self.second_delegatee = Mock()
self.loader = DispatchingPluginLoader(
Mock(), self.first_delegatee, self.second_delegatee)
def test_should_raise_exception_when_all_delegatees_raise_exception(self):
self.first_delegatee.load_plugin.side_effect = MissingPluginException("spam")
self.second_delegatee.load_plugin.side_effect = MissingPluginException("spam")
self.assertRaises(
MissingPluginException, self.loader.load_plugin, self.project, "spam")
self.first_delegatee.load_plugin.assert_called_with(self.project, "spam", None, None)
self.second_delegatee.load_plugin.assert_called_with(self.project, "spam", None, None)
def test_should_return_module_returned_by_second_loader_when_first_delegatee_raises_exception(self):
result = "result"
self.first_delegatee.load_plugin.side_effect = MissingPluginException("spam")
self.second_delegatee.load_plugin.return_value = result
self.assertEqual(result, self.loader.load_plugin(self.project, "spam"))
self.first_delegatee.load_plugin.assert_called_with(self.project, "spam", None, None)
self.second_delegatee.load_plugin.assert_called_with(self.project, "spam", None, None)
def test_ensure_second_delegatee_will_not_try_when_first_delegatee_loads_plugin(self):
result = "result"
self.first_delegatee.load_plugin.return_value = result
self.assertEqual(result, self.loader.load_plugin(self.project, "spam"))
self.first_delegatee.load_plugin.assert_called_with(self.project, "spam", None, None)
self.second_delegatee.load_plugin.assert_not_called()
|
|
import zipfile
import io
from datetime import datetime
from util.ncconv.experimental.ocg_converter.subocg_converter import SubOcgConverter
#from xml.sax.saxutils import escape
class KmlConverter(SubOcgConverter):
'''Converts data to a KML string'''
def _convert_(self,request):
from pykml.factory import KML_ElementMaker as KML
from lxml import etree
## create the database
if self.use_stat:
raise(NotImplementedError)
else:
db = self.sub.to_db(wkt=True,to_disk=True)
meta = request.ocg
if request.environ['SERVER_PORT']=='80':
portstr = ''
else:
portstr = ':{port}'.format(port=request.environ['SERVER_PORT'])
url='{protocol}://{server}{port}{path}'.format(
protocol='http',
port=portstr,
server=request.environ['SERVER_NAME'],
path=request.environ['PATH_INFO'],
)
description = (
'<table border="1">'
'<tbody>'
'<tr><th>Archive</th><td>{archive}</td></tr>'
'<tr><th>Emissions Scenario</th><td>{scenario}</td></tr>'
'<tr><th>Climate Model</th><td>{model}</td></tr>'
'<tr><th>Run</th><td>{run}</td></tr>'
'<tr><th>Output Variable</th><td>{variable}</td></tr>'
'<tr><th>Units</th><td>{units}</td></tr>'
'<tr><th>Start Time</th><td>{start}</td></tr>'
'<tr><th>End Time</th><td>{end}</td></tr>'
'<tr>'
'<th>Request URL</th>'
'<td><a href="{url}">{url}</a></td>'
'</tr>'
'<tr>'
'<th>Other Available Formats</th>'
'<td>'
'<a href="{url}">KML</a> - Keyhole Markup Language<br/>'
'<a href="{url_kmz}">KMZ</a> - Keyhole Markup Language (zipped)<br/>'
'<a href="{url_shz}">Shapefile</a> - ESRI Shapefile<br/>'
'<a href="{url_csv}">CSV</a> - Comma Separated Values (text file)<br/>'
'<a href="{url_json}">JSON</a> - Javascript Object Notation'
'</td>'
'</tr>'
'</tbody>'
'</table>'
).format(
archive=meta.archive.name,
scenario=meta.scenario,
model=meta.climate_model,
run=meta.run,
variable=meta.variable,
units=meta.variable.units,
simout=meta.simulation_output.netcdf_variable,
start=meta.temporal[0],
end=meta.temporal[-1],
operation=meta.operation,
url=url,
url_kmz=url.replace('.kml', '.kmz'),
url_shz=url.replace('.kml', '.shz'),
url_csv=url.replace('.kml', '.csv'),
url_json=url.replace('.kml', '.geojson'),
)
##### TODO: build linked urls on the fly
#from piston.emitters import Emitter
#Emitter.EMITTERS.keys()
#['xml', 'sqlite', 'nc', 'shz', 'kml', 'kcsv', 'django', 'json', 'html', 'meta', 'lshz', 'csv', 'pickle', 'kmz']
doc = KML.kml(
KML.Document(
KML.name('Climate Simulation Output'),
KML.open(1),
KML.description(description),
KML.snippet(
'<i>Click for metadata!</i>',
maxLines="2",
),
KML.StyleMap(
KML.Pair(
KML.key('normal'),
KML.styleUrl('#style-normal'),
),
KML.Pair(
KML.key('highlight'),
KML.styleUrl('#style-highlight'),
),
id="smap",
),
KML.Style(
KML.LineStyle(
KML.color('ff0000ff'),
KML.width('2'),
),
KML.PolyStyle(
KML.color('400000ff'),
),
id="style-normal",
),
KML.Style(
KML.LineStyle(
KML.color('ff00ff00'),
KML.width('4'),
),
KML.PolyStyle(
KML.color('400000ff'),
),
KML.BalloonStyle(
KML.text(('<script type="text/javascript" src="http://dygraphs.com/dygraph-combined.js">'
'</script>'
'<div id="graphdiv"></div>'
'<script type="text/javascript">'
'g = new Dygraph('
'document.getElementById("graphdiv"),'
'$[csv_data],'
'{{'
'ylabel: \'{param} [{units}]\','
'legend: \'always\''
'}}'
');'
'</script>').format(
param=meta.variable.name,
units=meta.variable.units,
))
),
id="style-highlight",
),
#Time Folders will be appended here
),
)
try:
s = db.Session()
# create a folder to hold the geometries
geom_fld = KML.Folder(
KML.name('Geometries'),
)
for geom in s.query(db.Geometry).all():
coord_list = geom.as_kml_coords()
multigeom_args = [
KML.Polygon(
KML.tessellate('1'),
KML.outerBoundaryIs(
KML.LinearRing(
KML.coordinates(coords.text),
),
),
) for coords in coord_list
]
# TODO: sort values by time to speed loading
values = ['{0},{1}'.format(datetime.strftime(val.time, "%Y-%m-%d %H:%M:%S"),val.value) for val in geom.values]
pm = KML.Placemark(
KML.name('Geometry'),
KML.ExtendedData(
KML.Data(
KML.value('"Date,{param}\\n{data}"'.format(
param=meta.variable.name,
data='\\n'.join(values))
),
name="csv_data",
),
),
KML.description(''),
KML.styleUrl('#smap'),
KML.MultiGeometry(*multigeom_args),
)
geom_fld.append(pm)
doc.Document.append(geom_fld)
# for time in s.query(db.Time).all():
# # create a folder for the time
# timefld = KML.Folder(
## KML.Style(
## KML.ListStyle(
## KML.listItemType('checkHideChildren'),
## KML.bgColor('00ffffff'),
## KML.maxSnippetLines('2'),
## ),
## ),
# KML.name(time.as_xml_date()),
# # placemarks will be appended here
# )
# for val in time.values:
# poly_desc = (
# '<table border="1">'
# '<tbody>'
# '<tr><th>Variable</th><td>{variable}</td></tr>'
# '<tr><th>Date/Time (UTC)</th><td>{time}</td></tr>'
# '<tr><th>Value</th><td>{value:.{digits}f} {units}</td></tr>'
# '</tbody>'
# '</table>'
# ).format(
# variable=meta.variable.name,
# time=val.time_ref.as_xml_date(),
# value=val.value,
# digits=3,
# units=meta.variable.units,
# )
#
# coords = val.geometry.as_kml_coords()
# timefld.append(
# KML.Placemark(
# KML.name('Geometry'),
# KML.description(poly_desc),
# KML.styleUrl('#smap'),
# KML.Polygon(
# KML.tessellate('1'),
# KML.outerBoundaryIs(
# KML.LinearRing(
# KML.coordinates(coords),
# ),
# ),
# ),
# )
# )
# doc.Document.append(timefld)
# pass
finally:
s.close()
# return the pretty print string
output = etree.tostring(doc, pretty_print=True)
# Unescape newline characters
#return(output.replace('&#10;','\\n'))
return(output)
class KmzConverter(KmlConverter):
def _response_(self,payload):
'''Get the KML response and zip it up'''
# logger.info("starting KmzConverter._response_()...")
#kml = super(KmzConverter,self)._response_(payload)
iobuffer = io.BytesIO()
zf = zipfile.ZipFile(
iobuffer,
mode='w',
compression=zipfile.ZIP_DEFLATED,
)
try:
zf.writestr('doc.kml',payload)
finally:
zf.close()
iobuffer.flush()
zip_stream = iobuffer.getvalue()
iobuffer.close()
# logger.info("...ending KmzConverter._response_()")
return(zip_stream)
|
|
#!/usr/bin/env python
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
"""\
__SCRIPTNAME__: checkout utility for sparse Subversion working copies
Usage: 1. __SCRIPTNAME__ checkout VIEWSPEC-FILE TARGET-DIR
2. __SCRIPTNAME__ examine VIEWSPEC-FILE
3. __SCRIPTNAME__ help
4. __SCRIPTNAME__ help-format
VIEWSPEC-FILE is the path of a file whose contents describe a
Subversion sparse checkouts layout, or '-' if that description should
be read from stdin. TARGET-DIR is the working copy directory created
by this script as it checks out the specified layout.
1. Parse VIEWSPEC-FILE and execute the necessary 'svn' command-line
operations to build out a working copy tree at TARGET-DIR.
2. Parse VIEWSPEC-FILE and dump out a human-readable representation of
the tree described in the specification.
3. Show this usage message.
4. Show information about the file format this program expects.
"""
FORMAT_HELP = """\
Viewspec File Format
====================
The viewspec file format used by this tool is a collection of headers
(using the typical one-per-line name:value syntax), followed by an
empty line, followed by a set of one-per-line rules.
The headers must contain at least the following:
Format - version of the viewspec format used throughout the file
Url - base URL applied to all rules; tree checkout location
The following headers are optional:
Revision - version of the tree items to checkout
Following the headers and blank line separator are the path rules.
The rules are list of URLs -- relative to the base URL stated in the
headers -- with optional annotations to specify the desired working
copy depth of each item:
PATH/** - checkout PATH and all its children to infinite depth
PATH/* - checkout PATH and its immediate children
PATH/~ - checkout PATH and its file children
PATH - checkout PATH non-recursively
By default, the top-level directory (associated with the base URL) is
checked out with empty depth. You can override this using the special
rules '**', '*', and '~' as appropriate.
It is not necessary to explicitly list the parent directories of each
path associated with a rule. If the parent directory of a given path
is not "covered" by a previous rule, it will be checked out with empty
depth.
Examples
========
Here's a sample viewspec file:
Format: 1
Url: http://svn.apache.org/repos/asf/subversion
Revision: 36366
trunk/**
branches/1.5.x/**
branches/1.6.x/**
README
branches/1.4.x/STATUS
branches/1.4.x/subversion/tests/cmdline/~
You may wish to version your viewspec files. If so, you can use this
script in conjunction with 'svn cat' to fetch, parse, and act on a
versioned viewspec file:
$ svn cat http://svn.example.com/specs/dev-spec.txt |
__SCRIPTNAME__ checkout - /path/to/target/directory
"""
#########################################################################
### Possible future improvements that could be made:
###
### - support for excluded paths (PATH!)
### - support for static revisions of individual paths (PATH@REV/**)
###
import sys
import os
import urllib
DEPTH_EMPTY = 'empty'
DEPTH_FILES = 'files'
DEPTH_IMMEDIATES = 'immediates'
DEPTH_INFINITY = 'infinity'
class TreeNode:
"""A representation of a single node in a Subversion sparse
checkout tree."""
def __init__(self, name, depth):
self.name = name # the basename of this tree item
self.depth = depth # its depth (one of the DEPTH_* values)
self.children = {} # its children (basename -> TreeNode)
def add_child(self, child_node):
child_name = child_node.name
assert not self.children.has_key(child_node)
self.children[child_name] = child_node
def dump(self, recurse=False, indent=0):
sys.stderr.write(" " * indent)
sys.stderr.write("Path: %s (depth=%s)\n" % (self.name, self.depth))
if recurse:
child_names = self.children.keys()
child_names.sort(svn_path_compare_paths)
for child_name in child_names:
self.children[child_name].dump(recurse, indent + 2)
class SubversionViewspec:
"""A representation of a Subversion sparse checkout specification."""
def __init__(self, base_url, revision, tree):
self.base_url = base_url # base URL of the checkout
self.revision = revision # revision of the checkout (-1 == HEAD)
self.tree = tree # the top-most TreeNode item
def svn_path_compare_paths(path1, path2):
"""Compare PATH1 and PATH2 as paths, sorting depth-first-ily.
NOTE: Stolen unapologetically from Subversion's Python bindings
module svn.core."""
path1_len = len(path1);
path2_len = len(path2);
min_len = min(path1_len, path2_len)
i = 0
# Are the paths exactly the same?
if path1 == path2:
return 0
# Skip past common prefix
while (i < min_len) and (path1[i] == path2[i]):
i = i + 1
# Children of paths are greater than their parents, but less than
# greater siblings of their parents
char1 = '\0'
char2 = '\0'
if (i < path1_len):
char1 = path1[i]
if (i < path2_len):
char2 = path2[i]
if (char1 == '/') and (i == path2_len):
return 1
if (char2 == '/') and (i == path1_len):
return -1
if (i < path1_len) and (char1 == '/'):
return -1
if (i < path2_len) and (char2 == '/'):
return 1
# Common prefix was skipped above, next character is compared to
# determine order
return cmp(char1, char2)
def parse_viewspec_headers(viewspec_fp):
"""Parse the headers from the viewspec file, return them as a
dictionary mapping header names to values."""
headers = {}
while 1:
line = viewspec_fp.readline().strip()
if not line:
break
name, value = [x.strip() for x in line.split(':', 1)]
headers[name] = value
return headers
def parse_viewspec(viewspec_fp):
"""Parse the viewspec file, returning a SubversionViewspec object
that represents the specification."""
headers = parse_viewspec_headers(viewspec_fp)
format = headers['Format']
assert format == '1'
base_url = headers['Url']
revision = int(headers.get('Revision', -1))
root_depth = DEPTH_EMPTY
rules = {}
while 1:
line = viewspec_fp.readline()
if not line:
break
line = line.rstrip()
# These are special rules for the top-most dir; don't fall thru.
if line == '**':
root_depth = DEPTH_INFINITY
continue
elif line == '*':
root_depth = DEPTH_IMMEDIATES
continue
elif line == '~':
root_depth = DEPTH_FILES
continue
# These are the regular per-path rules.
elif line[-3:] == '/**':
depth = DEPTH_INFINITY
path = line[:-3]
elif line[-2:] == '/*':
depth = DEPTH_IMMEDIATES
path = line[:-2]
elif line[-2:] == '/~':
depth = DEPTH_FILES
path = line[:-2]
else:
depth = DEPTH_EMPTY
path = line
# Add our rule to the set thereof.
assert not rules.has_key(path)
rules[path] = depth
tree = TreeNode('', root_depth)
paths = rules.keys()
paths.sort(svn_path_compare_paths)
for path in paths:
depth = rules[path]
path_parts = filter(None, path.split('/'))
tree_ptr = tree
for part in path_parts[:-1]:
child_node = tree_ptr.children.get(part, None)
if not child_node:
child_node = TreeNode(part, DEPTH_EMPTY)
tree_ptr.add_child(child_node)
tree_ptr = child_node
tree_ptr.add_child(TreeNode(path_parts[-1], depth))
return SubversionViewspec(base_url, revision, tree)
def checkout_tree(base_url, revision, tree_node, target_dir, is_top=True):
"""Checkout from BASE_URL, and into TARGET_DIR, the TREE_NODE
sparse checkout item. IS_TOP is set iff this node represents the
root of the checkout tree. REVISION is the revision to checkout,
or -1 if checking out HEAD."""
depth = tree_node.depth
revision_str = ''
if revision != -1:
revision_str = "--revision=%d " % (revision)
if is_top:
os.system('svn checkout "%s" "%s" --depth=%s %s'
% (base_url, target_dir, depth, revision_str))
else:
os.system('svn update "%s" --set-depth=%s %s'
% (target_dir, depth, revision_str))
child_names = tree_node.children.keys()
child_names.sort(svn_path_compare_paths)
for child_name in child_names:
checkout_tree(base_url + '/' + child_name,
revision,
tree_node.children[child_name],
os.path.join(target_dir, urllib.unquote(child_name)),
False)
def checkout_spec(viewspec, target_dir):
"""Checkout the view specification VIEWSPEC into TARGET_DIR."""
checkout_tree(viewspec.base_url,
viewspec.revision,
viewspec.tree,
target_dir)
def usage_and_exit(errmsg=None):
stream = errmsg and sys.stderr or sys.stdout
msg = __doc__.replace("__SCRIPTNAME__", os.path.basename(sys.argv[0]))
stream.write(msg)
if errmsg:
stream.write("ERROR: %s\n" % (errmsg))
sys.exit(errmsg and 1 or 0)
def main():
argc = len(sys.argv)
if argc < 2:
usage_and_exit('Not enough arguments.')
subcommand = sys.argv[1]
if subcommand == 'help':
usage_and_exit()
elif subcommand == 'help-format':
msg = FORMAT_HELP.replace("__SCRIPTNAME__",
os.path.basename(sys.argv[0]))
sys.stdout.write(msg)
elif subcommand == 'examine':
if argc < 3:
usage_and_exit('No viewspec file specified.')
fp = (sys.argv[2] == '-') and sys.stdin or open(sys.argv[2], 'r')
viewspec = parse_viewspec(fp)
sys.stdout.write("Url: %s\n" % (viewspec.base_url))
revision = viewspec.revision
if revision != -1:
sys.stdout.write("Revision: %s\n" % (revision))
else:
sys.stdout.write("Revision: HEAD\n")
sys.stdout.write("\n")
viewspec.tree.dump(True)
elif subcommand == 'checkout':
if argc < 3:
usage_and_exit('No viewspec file specified.')
if argc < 4:
usage_and_exit('No target directory specified.')
fp = (sys.argv[2] == '-') and sys.stdin or open(sys.argv[2], 'r')
checkout_spec(parse_viewspec(fp), sys.argv[3])
else:
usage_and_exit('Unknown subcommand "%s".' % (subcommand))
if __name__ == "__main__":
main()
|
|
"""Support for monitoring OctoPrint 3D printers."""
from datetime import timedelta
import logging
from pyoctoprintapi import ApiError, OctoprintClient, PrinterOffline
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import slugify as util_slugify
import homeassistant.util.dt as dt_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
def has_all_unique_names(value):
"""Validate that printers have an unique name."""
names = [util_slugify(printer["name"]) for printer in value]
vol.Schema(vol.Unique())(names)
return value
def ensure_valid_path(value):
"""Validate the path, ensuring it starts and ends with a /."""
vol.Schema(cv.string)(value)
if value[0] != "/":
value = f"/{value}"
if value[-1] != "/":
value += "/"
return value
PLATFORMS = ["binary_sensor", "sensor"]
DEFAULT_NAME = "Octoprint"
CONF_NUMBER_OF_TOOLS = "number_of_tools"
CONF_BED = "bed"
BINARY_SENSOR_TYPES = [
"Printing",
"Printing Error",
]
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(BINARY_SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SENSOR_TYPES = [
"Temperatures",
"Current State",
"Job Percentage",
"Time Remaining",
"Time Elapsed",
]
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=80): cv.port,
vol.Optional(CONF_PATH, default="/"): ensure_valid_path,
# Following values are not longer used in the configuration of the integration
# and are here for historical purposes
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_NUMBER_OF_TOOLS, default=0
): cv.positive_int,
vol.Optional(CONF_BED, default=False): cv.boolean,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(
CONF_BINARY_SENSORS, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
has_all_unique_names,
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the OctoPrint component."""
if DOMAIN not in config:
return True
domain_config = config[DOMAIN]
for conf in domain_config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_API_KEY: conf[CONF_API_KEY],
CONF_HOST: conf[CONF_HOST],
CONF_PATH: conf[CONF_PATH],
CONF_PORT: conf[CONF_PORT],
CONF_SSL: conf[CONF_SSL],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up OctoPrint from a config entry."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
websession = async_get_clientsession(hass)
client = OctoprintClient(
entry.data[CONF_HOST],
websession,
entry.data[CONF_PORT],
entry.data[CONF_SSL],
entry.data[CONF_PATH],
)
client.set_api_key(entry.data[CONF_API_KEY])
coordinator = OctoprintDataUpdateCoordinator(hass, client, entry.entry_id, 30)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = {"coordinator": coordinator, "client": client}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class OctoprintDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Octoprint data."""
def __init__(
self,
hass: HomeAssistant,
octoprint: OctoprintClient,
config_entry_id: str,
interval: int,
) -> None:
"""Initialize."""
super().__init__(
hass,
_LOGGER,
name=f"octoprint-{config_entry_id}",
update_interval=timedelta(seconds=interval),
)
self._octoprint = octoprint
self._printer_offline = False
self.data = {"printer": None, "job": None, "last_read_time": None}
async def _async_update_data(self):
"""Update data via API."""
printer = None
try:
job = await self._octoprint.get_job_info()
except ApiError as err:
raise UpdateFailed(err) from err
# If octoprint is on, but the printer is disconnected
# printer will return a 409, so continue using the last
# reading if there is one
try:
printer = await self._octoprint.get_printer_info()
except PrinterOffline:
if not self._printer_offline:
_LOGGER.error("Unable to retrieve printer information: Printer offline")
self._printer_offline = True
except ApiError as err:
raise UpdateFailed(err) from err
else:
self._printer_offline = False
return {"job": job, "printer": printer, "last_read_time": dt_util.utcnow()}
|
|
"""Base for all node resource services.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import collections
import contextlib
import errno
import functools
import glob
import importlib
import io
import logging
import os
import select
import shutil
import socket
import struct
import tempfile
import time
import six
from treadmill import dirwatch
from treadmill import exc
from treadmill import fs
from treadmill import logcontext as lc
from treadmill import utils
from treadmill import watchdog
from treadmill import yamlwrapper as yaml
from treadmill.syscall import eventfd
_LOGGER = lc.ContainerAdapter(logging.getLogger(__name__))
#: Name of the directory holding the resources requests
_RSRC_DIR = 'resources'
#: Name of request payload file
_REQ_FILE = 'request.yml'
#: Name of reply payload file
_REP_FILE = 'reply.yml'
#: Name of service status file
_STATUS_SOCK = 'status.sock'
def _wait_for_file(filename, timeout=60 * 60):
"""Wait at least ``timeout`` seconds for a file to appear or be modified.
:param ``int`` timeout:
Minimum amount of seconds to wait for the file.
:returns ``bool``:
``True`` if there was an event, ``False`` otherwise (timeout).
"""
if timeout is None:
timeout = 60 * 60
elif timeout == 0:
return os.path.exists(filename)
filedir = os.path.dirname(filename)
# TODO: Fine tune the watcher mask for efficiency.
watcher = dirwatch.DirWatcher(filedir)
now = time.time()
end_time = now + timeout
while not os.path.exists(filename):
if watcher.wait_for_events(timeout=max(0, end_time - now)):
watcher.process_events()
now = time.time()
if now > end_time:
return False
return True
class ResourceServiceError(exc.TreadmillError):
"""Base Resource Service error.
"""
__slots__ = ()
def __init__(self, message):
super(ResourceServiceError, self).__init__(message)
class ResourceServiceRequestError(ResourceServiceError):
"""Resource Service Request error.
"""
__slots__ = ('request')
def __init__(self, message, request):
super(ResourceServiceRequestError, self).__init__(message)
self.request = request
class ResourceServiceTimeoutError(ResourceServiceError, socket.timeout):
"""Resource Service timeout.
"""
__slots__ = ()
def __init__(self, message):
super(ResourceServiceTimeoutError, self).__init__(message)
class ResourceServiceClient(object):
"""Client class for all Treadmill services.
/apps/<container>/rsrc/req-<svc_name>/
request.yml
reply.yml
svc_req_id
"""
_REQ_UID_FILE = 'svc_req_id'
__slots__ = (
'_serviceinst',
'_clientdir',
)
def __init__(self, serviceinst, clientdir):
self._serviceinst = serviceinst
fs.mkdir_safe(clientdir)
self._clientdir = os.path.realpath(clientdir)
def put(self, rsrc_id, rsrc_data):
"""Request creation/update of a resource.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:param `str` rsrc_data:
(New) Parameters for the requested resource.
"""
req_dir = self._req_dirname(rsrc_id)
fs.mkdir_safe(req_dir)
with io.open(os.path.join(req_dir, _REQ_FILE), 'w') as f:
os.fchmod(f.fileno(), 0o644)
yaml.dump(rsrc_data,
explicit_start=True, explicit_end=True,
default_flow_style=False,
stream=f)
req_uuid_file = os.path.join(req_dir, self._REQ_UID_FILE)
try:
with io.open(req_uuid_file) as f:
svc_req_uuid = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
svc_req_uuid = None
else:
raise
with lc.LogContext(_LOGGER, rsrc_id):
if svc_req_uuid is None:
try:
# New request
svc_req_uuid = self._serviceinst.clt_new_request(rsrc_id,
req_dir)
# Write down the UUID
with io.open(req_uuid_file, 'w') as f:
f.write(svc_req_uuid)
os.fchmod(f.fileno(), 0o644)
except OSError:
# Error registration failed, delete the request.
_LOGGER.exception('Unable to submit request')
shutil.rmtree(req_dir)
else:
self._serviceinst.clt_update_request(svc_req_uuid)
def delete(self, rsrc_id):
"""Delete an existing resource.
:param `str` rsrc_id:
Unique identifier for the requested resource.
"""
with lc.LogContext(_LOGGER, rsrc_id) as log:
req_dir = self._req_dirname(rsrc_id)
try:
with io.open(os.path.join(req_dir, self._REQ_UID_FILE)) as f:
svc_req_uuid = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
log.logger.warning('Resource %r does not exist', rsrc_id)
return
raise
self._serviceinst.clt_del_request(svc_req_uuid)
os.rename(
req_dir,
self._bck_dirname(svc_req_uuid)
)
def get(self, rsrc_id):
"""Get the result of a resource request.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:raises ``ResourceServiceRequestError``:
If the request resulted in error.
"""
try:
res = self.wait(rsrc_id, timeout=0)
except ResourceServiceTimeoutError:
res = None
return res
def wait(self, rsrc_id, timeout=None):
"""Wait for a requested resource to be ready.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:raises ``ResourceServiceRequestError``:
If the request resulted in error.
:raises ``ResourceServiceTimeoutError``:
If the request was not available before timeout.
"""
req_dir = self._req_dirname(rsrc_id)
rep_file = os.path.join(req_dir, _REP_FILE)
if not _wait_for_file(rep_file, timeout):
raise ResourceServiceTimeoutError(
'Resource %r not available in time' % rsrc_id
)
try:
with io.open(rep_file) as f:
reply = yaml.load(stream=f)
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
raise ResourceServiceTimeoutError(
'Resource %r not available in time' % rsrc_id
)
if isinstance(reply, dict) and '_error' in reply:
raise ResourceServiceRequestError(reply['_error']['why'],
reply['_error']['input'])
return reply
def status(self, timeout=30):
"""Query the status of the resource service.
"""
return self._serviceinst.status(timeout=timeout)
def _req_dirname(self, rsrc_id):
"""Request directory name for a given resource id.
:param `str` rsrc_id:
Unique identifier for the requested resource.
"""
req_dir_name = 'req-{name}-{rsrc_id}'.format(
name=self._serviceinst.name,
rsrc_id=rsrc_id
)
req_dir = os.path.join(self._clientdir, req_dir_name)
return req_dir
def _bck_dirname(self, req_uuid):
"""Return a unique backup directory name.
"""
bck_dir_name = 'bck{ts}-{name}-{req_uuid}'.format(
name=self._serviceinst.name,
req_uuid=req_uuid,
ts=int(time.time()),
)
bck_dir = os.path.join(self._clientdir, bck_dir_name)
return bck_dir
class ResourceService(object):
"""Server class for all Treadmill services.
/service_dir/resources/<containerid>-<uid>/ ->
/apps/<containerid>/rsrc/req-<svc_name>/
/apps/<container>/rsrc/<svc_name>/
request.yml
reply.yml
svc_req_id
"""
__slots__ = (
'_is_dead',
'_dir',
'_rsrc_dir',
'_service_impl',
'_service_class',
'_service_name',
'_io_eventfd',
)
_IO_EVENT_PENDING = struct.pack('@Q', 1)
def __init__(self, service_dir, impl):
fs.mkdir_safe(service_dir)
self._dir = os.path.realpath(service_dir)
self._rsrc_dir = os.path.join(self._dir, _RSRC_DIR)
fs.mkdir_safe(self._rsrc_dir)
self._is_dead = False
self._service_impl = impl
self._service_class = None
self._io_eventfd = None
# Figure out the service's name
if isinstance(self._service_impl, six.string_types):
svc_name = self._service_impl.rsplit('.', 1)[-1]
else:
svc_name = self._service_impl.__name__
self._service_name = svc_name
@property
def name(self):
"""Name of the service."""
return self._service_name
@property
def status_sock(self):
"""status socket of the service.
"""
return os.path.join(self._dir, _STATUS_SOCK)
def make_client(self, client_dir):
"""Create a client using `clientdir` as request dir location.
"""
return ResourceServiceClient(self, client_dir)
def status(self, timeout=30):
"""Query the status of the resource service.
:param ``float`` timeout:
Wait at least timeout seconds for the service to reply.
:raises ``ResourceServiceTimeoutError``:
If the requested service does not come up before timeout.
:raises ``socket.error``:
If there is a communication error with the service.
"""
backoff = 0
while backoff <= (timeout / 2):
with contextlib.closing(socket.socket(socket.AF_UNIX,
type=socket.SOCK_STREAM,
proto=0)) as status_socket:
try:
status_socket.connect(self.status_sock)
status = yaml.load(stream=status_socket.makefile('r'))
except socket.error as err:
if err.errno in (errno.ECONNREFUSED, errno.ENOENT):
status = None
else:
raise
if status is not None:
break
_LOGGER.info('Waiting for service %r to become available',
self.name)
# Implement a backoff mechanism
backoff += (backoff or 1)
time.sleep(backoff)
else:
raise ResourceServiceTimeoutError(
'Service %r timed out' % (self.name),
)
return status
def get(self, req_id):
"""Read the reply of a given request.
"""
rep_file = os.path.join(self._rsrc_dir, req_id, _REP_FILE)
with io.open(rep_file) as f:
reply = yaml.load(stream=f)
if isinstance(reply, dict) and '_error' in reply:
raise ResourceServiceRequestError(reply['_error']['why'],
reply['_error']['input'])
return reply
def run(self, watchdogs_dir, *impl_args, **impl_kwargs):
"""Run the service."""
# Load the implementation
if self._service_class is None:
self._service_class = self._load_impl()
impl = self._service_class(*impl_args, **impl_kwargs)
# Setup the watchdog
watchdogs = watchdog.Watchdog(os.path.realpath(watchdogs_dir))
watchdog_lease = watchdogs.create(
name='svc-{svc_name}'.format(svc_name=self.name),
timeout='{hb:d}s'.format(hb=impl.WATCHDOG_HEARTBEAT_SEC),
content='Service %r failed' % self.name
)
# Create the status socket
ss = self._create_status_socket()
# Run initialization
impl.initialize(self._dir)
watcher = dirwatch.DirWatcher(self._rsrc_dir)
# Call all the callbacks with the implementation instance
watcher.on_created = functools.partial(self._on_created, impl)
watcher.on_deleted = functools.partial(self._on_deleted, impl)
# NOTE: A modified request is treated as a brand new request
watcher.on_modified = functools.partial(self._on_created, impl)
self._io_eventfd = eventfd.eventfd(0, eventfd.EFD_CLOEXEC)
# Before starting, check the request directory
svcs = self._check_requests()
# and "fake" a created event on all the existing requests
for existing_svcs in svcs:
self._on_created(impl, existing_svcs)
# Before starting, make sure backend state and service state are
# synchronized.
impl.synchronize()
# Report service status
status_info = {}
status_info.update(impl.report_status())
# Setup the poll object
loop_poll = select.poll()
loop_callbacks = {}
base_event_handlers = [
(
self._io_eventfd,
select.POLLIN,
functools.partial(
self._handle_queued_io_events,
watcher=watcher,
impl=impl,
)
),
(
watcher.inotify,
select.POLLIN,
functools.partial(
self._handle_io_events,
watcher=watcher,
impl=impl,
)
),
(
ss,
select.POLLIN,
functools.partial(
self._publish_status,
status_socket=ss,
status_info=status_info,
)
),
]
# Initial collection of implementation' event handlers
impl_event_handlers = impl.event_handlers()
self._update_poll_registration(
loop_poll,
loop_callbacks,
base_event_handlers + impl_event_handlers,
)
loop_timeout = impl.WATCHDOG_HEARTBEAT_SEC / 2
while not self._is_dead:
# Check for events
updated = self._run_events(
loop_poll,
loop_timeout,
loop_callbacks,
)
if updated:
# Report service status
status_info.clear()
status_info.update(impl.report_status())
# Update poll registration if needed
impl_event_handlers = impl.event_handlers()
self._update_poll_registration(
loop_poll, loop_callbacks,
base_event_handlers + impl_event_handlers,
)
# Clean up stale requests
self._check_requests()
# Heartbeat
watchdog_lease.heartbeat()
_LOGGER.info('Shuting down %r service', self.name)
# Remove the service heartbeat
watchdog_lease.remove()
def _publish_status(self, status_socket, status_info):
"""Publish service status on the incomming connection on socket
"""
with contextlib.closing(status_socket.accept()[0]) as clt:
clt_stream = clt.makefile(mode='w')
try:
yaml.dump(status_info,
explicit_start=True, explicit_end=True,
default_flow_style=False,
stream=clt_stream)
clt_stream.flush()
except socket.error as err:
if err.errno == errno.EPIPE:
pass
else:
raise
@staticmethod
def _run_events(loop_poll, loop_timeout, loop_callbacks):
"""Wait for events up to `loop_timeout` and execute each of the
registered handlers.
:returns ``bool``:
True is any of the callbacks returned True
"""
pending_callbacks = []
try:
# poll timeout is in milliseconds
for (fd, _event) in loop_poll.poll(loop_timeout * 1000):
fd_data = loop_callbacks[fd]
_LOGGER.debug('Event on %r: %r', fd, fd_data)
pending_callbacks.append(
fd_data['callback']
)
except select.error as err:
# Ignore signal interruptions
if six.PY2:
# pylint: disable=W1624,E1136,indexing-exception
if err[0] != errno.EINTR:
raise
else:
if err.errno != errno.EINTR:
raise
results = [
callback()
for callback in pending_callbacks
]
return any(results)
@staticmethod
def _update_poll_registration(poll, poll_callbacks, handlers):
"""Setup the poll object and callbacks based on handlers.
"""
def _normalize_fd(filedescriptor):
"""Return the fd number or filedescriptor.
"""
if not isinstance(filedescriptor, int):
fd = filedescriptor.fileno()
else:
fd = filedescriptor
return fd
handlers = [
(_normalize_fd(fd), events, callback)
for (fd, events, callback) in handlers
]
for (fd, events, callback) in handlers:
fd_data = {'callback': callback, 'events': events}
if fd not in poll_callbacks:
poll.register(fd, events)
poll_callbacks[fd] = fd_data
_LOGGER.debug('Registered %r: %r', fd, fd_data)
elif poll_callbacks[fd] != fd_data:
poll.modify(fd, events)
poll_callbacks[fd] = fd_data
_LOGGER.debug('Updated %r: %r', fd, fd_data)
all_fds = set(handler[0] for handler in handlers)
for fd in list(poll_callbacks):
if fd not in all_fds:
_LOGGER.debug('Unregistered %r: %r', fd, poll_callbacks[fd])
poll.unregister(fd)
del poll_callbacks[fd]
def _load_impl(self):
"""Load the implementation class of the service.
"""
if isinstance(self._service_impl, six.string_types):
(module_name, cls_name) = self._service_impl.rsplit('.', 1)
impl_module = importlib.import_module(module_name)
impl_class = getattr(impl_module, cls_name)
else:
impl_class = self._service_impl
assert issubclass(impl_class, BaseResourceServiceImpl), \
'Invalid implementation %r' % impl_class
return impl_class
def clt_new_request(self, req_id, req_data_dir):
"""Add a request data dir as `req_id` to the service.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(self._rsrc_dir, req_id)
_LOGGER.info('Registering %r: %r -> %r',
req_id, svc_req_lnk, req_data_dir)
# NOTE(boysson): We use a temporary file + rename behavior to override
# any potential old symlinks.
tmpsymlink = tempfile.mktemp(dir=self._rsrc_dir,
prefix='.tmp' + req_id)
os.symlink(req_data_dir, tmpsymlink)
os.rename(tmpsymlink, svc_req_lnk)
return req_id
def clt_del_request(self, req_id):
"""Remove an existing request.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(self._rsrc_dir, req_id)
_LOGGER.info('Unegistering %r: %r', req_id, svc_req_lnk)
fs.rm_safe(svc_req_lnk)
return req_id
def clt_update_request(self, req_id):
"""Update an existing request.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(self._rsrc_dir, req_id)
_LOGGER.debug('Updating %r: %r',
req_id, svc_req_lnk)
# Remove any reply if it exists
fs.rm_safe(os.path.join(svc_req_lnk, _REP_FILE))
# NOTE(boysson): This does the equivalent of a touch on the symlink
try:
os.lchown(
svc_req_lnk,
os.getuid(),
os.getgid()
)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def _check_requests(self):
"""Check each existing request and remove stale ones.
"""
svcs = collections.deque()
for svc in glob.glob(os.path.join(self._rsrc_dir, '*')):
try:
os.stat(svc)
svcs.append(svc)
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.warning('Deleting stale request: %r', svc)
fs.rm_safe(svc)
else:
raise
return svcs
def _create_status_socket(self):
"""Create a listening socket to process status requests.
"""
fs.rm_safe(self.status_sock)
status_socket = socket.socket(
family=socket.AF_UNIX,
type=socket.SOCK_STREAM,
proto=0
)
status_socket.bind(self.status_sock)
os.chmod(self.status_sock, 0o666)
status_socket.listen(5)
return status_socket
def _handle_queued_io_events(self, watcher, impl):
"""Process queued IO events.
Base service IO event handler (dispatches to on_created/on_deleted.
:returns ``bool``:
``True`` if any of the event handlers returns ``True``.
"""
# Always start by clearing the IO event fd. We will reset it if we need
# below (there is always 8 bytes in a eventfd).
os.read(self._io_eventfd, 8)
return self._handle_io_events(watcher=watcher, impl=impl, resume=True)
def _handle_io_events(self, watcher, impl, resume=False):
"""Process IO events.
Base service IO event handler (dispatches to on_created/on_deleted.
:returns ``bool``:
``True`` if any of the event handlers returns ``True``.
"""
io_res = watcher.process_events(
max_events=impl.MAX_REQUEST_PER_CYCLE,
resume=resume
)
# Check if there were more events to process
if io_res and io_res[-1][0] == dirwatch.DirWatcherEvent.MORE_PENDING:
_LOGGER.debug('More requests events pending')
os.write(self._io_eventfd, self._IO_EVENT_PENDING)
return any(
[
callback_res
for (_, _, callback_res) in
io_res
]
)
def _on_created(self, impl, filepath):
"""Private handler for request creation events.
"""
# Avoid triggering on changes to the service directory itself.
if filepath == self._rsrc_dir:
return
req_id = os.path.basename(filepath)
# Avoid triggerring on temporary files
if req_id[0] == '.':
return
req_file = os.path.join(filepath, _REQ_FILE)
rep_file = os.path.join(filepath, _REP_FILE)
try:
with io.open(req_file) as f:
req_data = yaml.load(stream=f)
except IOError as err:
if (err.errno == errno.ENOENT or
err.errno == errno.ENOTDIR):
_LOGGER.exception('Removing invalid request: %r', req_id)
fs.rm_safe(filepath)
return
raise
try:
# TODO: We should also validate the req_id format
utils.validate(req_data, impl.PAYLOAD_SCHEMA)
res = impl.on_create_request(req_id, req_data)
_LOGGER.debug('created %r', req_id)
except exc.InvalidInputError as err:
_LOGGER.error('Invalid request data: %r: %s', req_data, err)
res = {'_error': {'input': req_data, 'why': str(err)}}
except Exception as err: # pylint: disable=W0703
_LOGGER.exception('Unable to process request: %r %r:',
req_id, req_data)
res = {'_error': {'input': req_data, 'why': str(err)}}
if res is None:
# Request was not actioned
return False
_LOGGER.debug('created %r', req_id)
fs.write_safe(
rep_file,
lambda f: yaml.dump(
res, explicit_start=True, explicit_end=True,
default_flow_style=False, stream=f
),
permission=0o644
)
# Return True if there were no error
return not bool(res.get('_error', False))
def _on_deleted(self, impl, filepath):
"""Private handler for request deletion events.
"""
req_id = os.path.basename(filepath)
# Avoid triggerring on temporary files
if req_id[0] == '.':
return
_LOGGER.debug('deleted %r', req_id)
# TODO: We should also validate the req_id format
res = impl.on_delete_request(req_id)
return res
@six.add_metaclass(abc.ABCMeta)
class BaseResourceServiceImpl(object):
"""Base interface of Resource Service implementations.
"""
__slots__ = (
'_service_dir',
'_service_rsrc_dir',
)
MAX_REQUEST_PER_CYCLE = 5
PAYLOAD_SCHEMA = ()
WATCHDOG_HEARTBEAT_SEC = 60
def __init__(self):
self._service_dir = None
self._service_rsrc_dir = None
@abc.abstractmethod
def initialize(self, service_dir):
"""Service initialization."""
self._service_dir = service_dir
self._service_rsrc_dir = os.path.join(service_dir, _RSRC_DIR)
@abc.abstractmethod
def synchronize(self):
"""Assert that the internal state of the service matches the backend
state.
"""
return
@abc.abstractmethod
def report_status(self):
"""Record service status information.
Will be called at least once after initialization is complete.
"""
return {}
def event_handlers(self):
"""Returns a list of `(fileno, event, callback)` to be registered in
the event loop.
"""
return []
@abc.abstractmethod
def on_create_request(self, rsrc_id, rsrc_data):
"""Call back invoked when a new resource request is received.
Args:
rsrc_id ``str``: Unique resource identifier
rsrc_data ``dict``: Resource request metadata
Returns:
``dict``: Result communicated back to the requestor, ``None``,
``False`` or ``{}`` if no changes to the service were made.
"""
pass
@abc.abstractmethod
def on_delete_request(self, rsrc_id):
"""Call back invoked when a resource is deleted.
Arguments::
rsrc_id ``str``: Unique resource identifier
"""
pass
|
|
"""Test suite for 2to3's parser and grammar files.
This is the place to add tests for changes to 2to3's grammar, such as those
merging the grammars for Python 2 and 3. In addition to specific tests for
parts of the grammar we've changed, we also make sure we can parse the
test_grammar.py files from both Python 2 and Python 3.
"""
from __future__ import with_statement
# Testing imports
from . import support
from .support import driver, test_dir
# Python imports
import os
import sys
# Local imports
from lib2to3.pgen2 import tokenize
from ..pgen2.parse import ParseError
from lib2to3.pygram import python_symbols as syms
class TestDriver(support.TestCase):
def test_formfeed(self):
s = """print 1\n\x0Cprint 2\n"""
t = driver.parse_string(s)
self.assertEqual(t.children[0].children[0].type, syms.print_stmt)
self.assertEqual(t.children[1].children[0].type, syms.print_stmt)
class GrammarTest(support.TestCase):
def validate(self, code):
support.parse_string(code)
def invalid_syntax(self, code):
try:
self.validate(code)
except ParseError:
pass
else:
raise AssertionError("Syntax shouldn't have been valid")
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
self.validate("raise")
def test_2x_style_2(self):
self.validate("raise E, V")
def test_2x_style_3(self):
self.validate("raise E, V, T")
def test_2x_style_invalid_1(self):
self.invalid_syntax("raise E, V, T, Z")
def test_3x_style(self):
self.validate("raise E1 from E2")
def test_3x_style_invalid_1(self):
self.invalid_syntax("raise E, V from E1")
def test_3x_style_invalid_2(self):
self.invalid_syntax("raise E from E1, E2")
def test_3x_style_invalid_3(self):
self.invalid_syntax("raise from E1, E2")
def test_3x_style_invalid_4(self):
self.invalid_syntax("raise E from")
# Adaptated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
self.validate("""def f(x) -> list: pass""")
def test_2(self):
self.validate("""def f(x:int): pass""")
def test_3(self):
self.validate("""def f(*x:str): pass""")
def test_4(self):
self.validate("""def f(**x:float): pass""")
def test_5(self):
self.validate("""def f(x, y:1+2): pass""")
def test_6(self):
self.validate("""def f(a, (b:1, c:2, d)): pass""")
def test_7(self):
self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
def test_8(self):
s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
self.validate(s)
class TestExcept(GrammarTest):
def test_new(self):
s = """
try:
x
except E as N:
y"""
self.validate(s)
def test_old(self):
s = """
try:
x
except E, N:
y"""
self.validate(s)
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
class TestSetLiteral(GrammarTest):
def test_1(self):
self.validate("""x = {'one'}""")
def test_2(self):
self.validate("""x = {'one', 1,}""")
def test_3(self):
self.validate("""x = {'one', 'two', 'three'}""")
def test_4(self):
self.validate("""x = {2, 3, 4,}""")
class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self):
self.validate("""0o7777777777777""")
self.invalid_syntax("""0o7324528887""")
def test_new_binary_notation(self):
self.validate("""0b101010""")
self.invalid_syntax("""0b0101021""")
class TestClassDef(GrammarTest):
def test_new_syntax(self):
self.validate("class B(t=7): pass")
self.validate("class B(t, *args): pass")
self.validate("class B(t, **kwargs): pass")
self.validate("class B(t, *args, **kwargs): pass")
self.validate("class B(t, y=9, *args, **kwargs): pass")
class TestParserIdempotency(support.TestCase):
"""A cut-down version of pytree_idempotency.py."""
def test_all_project_files(self):
if sys.platform.startswith("win"):
# XXX something with newlines goes wrong on Windows.
return
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertTrue(encoding is not None,
"can't detect encoding for %s" % filepath)
with open(filepath, "r") as fp:
source = fp.read()
source = source.decode(encoding)
tree = driver.parse_string(source)
new = unicode(tree)
if diff(filepath, new, encoding):
self.fail("Idempotency failed: %s" % filepath)
def test_extended_unpacking(self):
driver.parse_string("a, *b, c = x\n")
driver.parse_string("[*a, b] = x\n")
driver.parse_string("(z, *y, w) = m\n")
driver.parse_string("for *z, m in d: pass\n")
class TestLiterals(GrammarTest):
def validate(self, s):
driver.parse_string(support.dedent(s) + "\n\n")
def test_multiline_bytes_literals(self):
s = """
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def test_multiline_bytes_tripquote_literals(self):
s = '''
b"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
"""
'''
self.validate(s)
def test_multiline_str_literals(self):
s = """
md5test("\xaa" * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def diff(fn, result, encoding):
f = open("@", "w")
try:
f.write(result.encode(encoding))
finally:
f.close()
try:
fn = fn.replace('"', '\\"')
return os.system('diff -u "%s" @' % fn)
finally:
os.remove("@")
|
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import socket
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from six.moves import urllib
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.metadata.services import base
from cloudbaseinit.metadata.services import cloudstack
from cloudbaseinit.tests import testutils
CONF = cloudbaseinit_conf.CONF
class CloudStackTest(unittest.TestCase):
def setUp(self):
CONF.set_override('retry_count_interval', 0)
CONF.set_override('retry_count', 1)
self._service = self._get_service()
self._service._metadata_uri = "http://10.1.1.1/latest/meta-data/"
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def _get_service(self, mock_os_util):
return cloudstack.CloudStack()
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._http_request')
def test_test_api(self, mock_http_request):
url = '127.0.0.1'
mock_http_request.side_effect = [
'200 OK. Successfully!', # Request to Web Service
urllib.error.HTTPError(url=url, code=404, hdrs={}, fp=None,
msg='Testing 404 Not Found.'),
urllib.error.HTTPError(url=url, code=427, hdrs={}, fp=None,
msg='Testing 429 Too Many Requests.'),
base.NotExistingMetadataException(),
socket.error,
]
self.assertTrue(self._service._test_api(url))
for _ in range(3):
self.assertFalse(self._service._test_api(url))
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._test_api')
def test_load(self, mock_test_api, mock_os_util):
self._service._osutils.get_dhcp_hosts_in_use = mock.Mock()
self._service._osutils.get_dhcp_hosts_in_use.side_effect = [
[(mock.sentinel.mac_address, '10.10.0.1'),
(mock.sentinel.mac_address, '10.10.0.2'),
(mock.sentinel.mac_address, '10.10.0.3')]
]
mock_test_api.side_effect = [False, False, False, True]
self.assertTrue(self._service.load())
self.assertEqual(4, mock_test_api.call_count)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._test_api')
def test_load_default(self, mock_test_api):
mock_test_api.side_effect = [True]
self._service._test_api = mock_test_api
self.assertTrue(self._service.load())
mock_test_api.assert_called_once_with(
CONF.cloudstack.metadata_base_url)
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._test_api')
def test_load_fail(self, mock_test_api, mock_os_util):
self._service._osutils.get_dhcp_hosts_in_use.side_effect = [None]
mock_test_api.side_effect = [False]
self.assertFalse(self._service.load()) # No DHCP server was found.
mock_test_api.assert_called_once_with(
CONF.cloudstack.metadata_base_url)
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._test_api')
def test_load_no_service(self, mock_test_api, mock_os_util):
self._service._osutils.get_dhcp_hosts_in_use = mock.Mock()
self._service._osutils.get_dhcp_hosts_in_use.side_effect = [
[(mock.sentinel.mac_address, CONF.cloudstack.metadata_base_url)]
]
mock_test_api.side_effect = [False, False]
# No service
self.assertFalse(self._service.load())
self.assertEqual(2, mock_test_api.call_count)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._get_data')
def test_get_cache_data(self, mock_get_data):
side_effect = mock.sentinel.metadata
mock_get_data.side_effect = [side_effect]
self._service._get_data = mock_get_data
response = self._service._get_cache_data(mock.sentinel.metadata)
self.assertEqual(mock.sentinel.metadata, response)
mock_get_data.assert_called_once_with(mock.sentinel.metadata)
mock_get_data.reset_mock()
response = self._service._get_cache_data(mock.sentinel.metadata)
self.assertEqual(mock.sentinel.metadata, response)
self.assertEqual(0, mock_get_data.call_count)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._get_cache_data')
def _test_cache_response(self, mock_get_cache_data, method, metadata,
decode=True):
mock_get_cache_data.side_effect = [mock.sentinel.response]
response = method()
self.assertEqual(mock.sentinel.response, response)
cache_assert = functools.partial(
mock_get_cache_data.assert_called_once_with,
metadata)
if decode:
cache_assert(decode=decode)
def test_get_instance_id(self):
self._test_cache_response(method=self._service.get_instance_id,
metadata='latest/meta-data/instance-id')
def test_get_host_name(self):
self._test_cache_response(method=self._service.get_host_name,
metadata='latest/meta-data/local-hostname')
def test_get_user_data(self):
self._test_cache_response(method=self._service.get_user_data,
metadata='latest/user-data', decode=False)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._get_cache_data')
def test_get_public_keys(self, mock_get_cache_data):
mock_get_cache_data.side_effect = [
"ssh-rsa AAAA\nssh-rsa BBBB\nssh-rsa CCCC",
"\n\nssh-rsa AAAA\n\nssh-rsa BBBB\n\nssh-rsa CCCC",
" \n \n ssh-rsa AAAA \n \n ssh-rsa BBBB \n \n ssh-rsa CCCC",
" ", "\n", " \n "
]
for _ in range(3):
response = self._service.get_public_keys()
self.assertEqual(["ssh-rsa AAAA", "ssh-rsa BBBB", "ssh-rsa CCCC"],
response)
for _ in range(3):
response = self._service.get_public_keys()
self.assertEqual([], response)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._password_client')
def test_get_password(self, mock_password_client):
headers = {"DomU_Request": "send_my_password"}
expected_password = "password"
mock_password_client.return_value = expected_password
expected_output = [
"Try to get password from the Password Server.",
"The password server returned a valid password "
"for the current instance."
]
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'cloudstack') as snatcher:
password = self._service._get_password()
mock_password_client.assert_called_once_with(headers=headers)
self.assertEqual(expected_password, password)
self.assertEqual(expected_output, snatcher.output)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._password_client')
def test_get_password_fail(self, mock_password_client):
mock_password_client.side_effect = ["", cloudstack.BAD_REQUEST,
cloudstack.SAVED_PASSWORD]
expected_output = [
["Try to get password from the Password Server.",
"The password was already taken from the Password Server "
"for the current instance."],
["Try to get password from the Password Server.",
"The Password Server did not recognize the request."],
["Try to get password from the Password Server.",
"The Password Server did not have any password for the "
"current instance."],
]
for _ in range(3):
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'cloudstack') as snatcher:
self.assertIsNone(self._service._get_password())
self.assertEqual(expected_output.pop(), snatcher.output)
self.assertEqual(3, mock_password_client.call_count)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack'
'._password_client')
def test_delete_password(self, mock_password_client):
mock_password_client.side_effect = [cloudstack.BAD_REQUEST,
cloudstack.SAVED_PASSWORD]
expected_output = [
'Remove the password for this instance from the '
'Password Server.',
'Fail to remove the password from the Password Server.',
'Remove the password for this instance from the '
'Password Server.',
'The password was removed from the Password Server',
]
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'cloudstack') as snatcher:
self.assertIsNone(self._service._delete_password())
self.assertIsNone(self._service._delete_password())
self.assertEqual(2, mock_password_client.call_count)
for expected, output in zip(expected_output, snatcher.output):
self.assertTrue(output.startswith(expected))
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack.'
'_delete_password')
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack.'
'_get_password')
def test_get_admin_password(self, mock_get_password, mock_delete_password):
mock_get_password.return_value = mock.sentinel.password
password = self._service.get_admin_password()
self.assertEqual(mock.sentinel.password, password)
self.assertEqual(1, mock_get_password.call_count)
self.assertEqual(1, mock_delete_password.call_count)
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack.'
'_delete_password')
@mock.patch('cloudbaseinit.metadata.services.cloudstack.CloudStack.'
'_get_password')
def test_get_admin_password_fail(self, mock_get_password,
mock_delete_password):
mock_get_password.return_value = None
self.assertIsNone(self._service.get_admin_password())
self.assertEqual(1, mock_get_password.call_count)
self.assertEqual(0, mock_delete_password.call_count)
|
|
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p16
import pandas as pd
from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
import pandas.util.testing as tm
from . import base
@pytest.fixture(params=['float', 'object'])
def dtype(request):
return PandasDtype(np.dtype(request.param))
@pytest.fixture
def allow_in_pandas(monkeypatch):
"""
A monkeypatch to tells pandas to let us in.
By default, passing a PandasArray to an index / series / frame
constructor will unbox that PandasArray to an ndarray, and treat
it as a non-EA column. We don't want people using EAs without
reason.
The mechanism for this is a check against ABCPandasArray
in each constructor.
But, for testing, we need to allow them in pandas. So we patch
the _typ of PandasArray, so that we evade the ABCPandasArray
check.
"""
with monkeypatch.context() as m:
m.setattr(PandasArray, '_typ', 'extension')
yield
@pytest.fixture
def data(allow_in_pandas, dtype):
if dtype.numpy_dtype == 'object':
return pd.Series([(i,) for i in range(100)]).array
return PandasArray(np.arange(1, 101, dtype=dtype._dtype))
@pytest.fixture
def data_missing(allow_in_pandas, dtype):
# For NumPy <1.16, np.array([np.nan, (1,)]) raises
# ValueError: setting an array element with a sequence.
if dtype.numpy_dtype == 'object':
if _np_version_under1p16:
raise pytest.skip("Skipping for NumPy <1.16")
return PandasArray(np.array([np.nan, (1,)]))
return PandasArray(np.array([np.nan, 1.0]))
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
def cmp(a, b):
return np.isnan(a) and np.isnan(b)
return cmp
@pytest.fixture
def data_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
if dtype.numpy_dtype == 'object':
# Use an empty tuple for first element, then remove,
# to disable np.array's shape inference.
return PandasArray(
np.array([(), (2,), (3,), (1,)])[1:]
)
return PandasArray(
np.array([1, 2, 0])
)
@pytest.fixture
def data_missing_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
if dtype.numpy_dtype == 'object':
return PandasArray(
np.array([(1,), np.nan, (0,)])
)
return PandasArray(
np.array([1, np.nan, 0])
)
@pytest.fixture
def data_for_grouping(allow_in_pandas, dtype):
"""Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
if dtype.numpy_dtype == 'object':
a, b, c = (1,), (2,), (3,)
else:
a, b, c = np.arange(3)
return PandasArray(np.array(
[b, b, np.nan, np.nan, a, a, b, c]
))
@pytest.fixture
def skip_numpy_object(dtype):
"""
Tests for PandasArray with nested data. Users typically won't create
these objects via `pd.array`, but they can show up through `.array`
on a Series with nested data. Many of the base tests fail, as they aren't
appropriate for nested data.
This fixture allows these tests to be skipped when used as a usefixtures
marker to either an individual test or a test class.
"""
if dtype == 'object':
raise pytest.skip("Skipping for object dtype.")
skip_nested = pytest.mark.usefixtures('skip_numpy_object')
class BaseNumPyTests:
pass
class TestCasting(BaseNumPyTests, base.BaseCastingTests):
@skip_nested
def test_astype_str(self, data):
# ValueError: setting an array element with a sequence
super().test_astype_str(data)
class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
@pytest.mark.skip(reason="We don't register our dtype")
# We don't want to register. This test should probably be split in two.
def test_from_dtype(self, data):
pass
@skip_nested
def test_array_from_scalars(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_array_from_scalars(data)
class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
@pytest.mark.skip(reason="Incorrect expected.")
# we unsurprisingly clash with a NumPy name.
def test_check_dtype(self, data):
pass
class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
@skip_nested
def test_getitem_scalar(self, data):
# AssertionError
super().test_getitem_scalar(data)
@skip_nested
def test_take_series(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_take_series(data)
class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
@skip_nested
def test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op):
# ValueError: Names should be list-like for a MultiIndex
super().test_groupby_extension_apply(data_for_grouping,
groupby_apply_op)
class TestInterface(BaseNumPyTests, base.BaseInterfaceTests):
@skip_nested
def test_array_interface(self, data):
# NumPy array shape inference
super().test_array_interface(data)
class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
@pytest.mark.skip(reason="TODO: remove?")
def test_value_counts(self, all_data, dropna):
pass
@pytest.mark.skip(reason="Incorrect expected")
# We have a bool dtype, so the result is an ExtensionArray
# but expected is not
def test_combine_le(self, data_repeated):
super().test_combine_le(data_repeated)
@skip_nested
def test_combine_add(self, data_repeated):
# Not numeric
super().test_combine_add(data_repeated)
@skip_nested
def test_shift_fill_value(self, data):
# np.array shape inference. Shift implementation fails.
super().test_shift_fill_value(data)
@skip_nested
@pytest.mark.parametrize('box', [pd.Series, lambda x: x])
@pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
# Fails creating expected
super().test_unique(data, box, method)
@skip_nested
def test_fillna_copy_frame(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_frame(data_missing)
@skip_nested
def test_fillna_copy_series(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_series(data_missing)
@skip_nested
def test_hash_pandas_object_works(self, data, as_frame):
# ndarray of tuples not hashable
super().test_hash_pandas_object_works(data, as_frame)
@skip_nested
def test_searchsorted(self, data_for_sorting, as_series):
# Test setup fails.
super().test_searchsorted(data_for_sorting, as_series)
@skip_nested
def test_where_series(self, data, na_value, as_frame):
# Test setup fails.
super().test_where_series(data, na_value, as_frame)
@skip_nested
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
def test_repeat(self, data, repeats, as_series, use_numpy):
# Fails creating expected
super().test_repeat(data, repeats, as_series, use_numpy)
@skip_nested
class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
divmod_exc = None
series_scalar_exc = None
frame_scalar_exc = None
series_array_exc = None
def test_divmod_series_array(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data, exc=None)
@pytest.mark.skip("We implement ops")
def test_error(self, data, all_arithmetic_operators):
pass
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
super().test_arith_series_with_array(data, all_arithmetic_operators)
class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
pass
@skip_nested
class TestNumericReduce(BaseNumPyTests, base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
# avoid coercing int -> float. Just cast to the actual numpy type.
expected = getattr(s.astype(s.dtype._dtype), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
@skip_nested
class TestBooleanReduce(BaseNumPyTests, base.BaseBooleanReduceTests):
pass
class TestMissing(BaseNumPyTests, base.BaseMissingTests):
@skip_nested
def test_fillna_scalar(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_scalar(data_missing)
@skip_nested
def test_fillna_series_method(self, data_missing, fillna_method):
# Non-scalar "scalar" values.
super().test_fillna_series_method(
data_missing, fillna_method)
@skip_nested
def test_fillna_series(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_series(data_missing)
@skip_nested
def test_fillna_frame(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_frame(data_missing)
class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
@pytest.mark.skip("Incorrect parent test")
# not actually a mixed concat, since we concat int and int.
def test_concat_mixed_dtypes(self, data):
super().test_concat_mixed_dtypes(data)
@skip_nested
def test_merge(self, data, na_value):
# Fails creating expected
super().test_merge(data, na_value)
@skip_nested
def test_merge_on_extension_array(self, data):
# Fails creating expected
super().test_merge_on_extension_array(data)
@skip_nested
def test_merge_on_extension_array_duplicates(self, data):
# Fails creating expected
super().test_merge_on_extension_array_duplicates(data)
class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
@skip_nested
def test_setitem_scalar_series(self, data, box_in_series):
# AssertionError
super().test_setitem_scalar_series(data, box_in_series)
@skip_nested
def test_setitem_sequence(self, data, box_in_series):
# ValueError: shape mismatch: value array of shape (2,1) could not
# be broadcast to indexing result of shape (2,)
super().test_setitem_sequence(data, box_in_series)
@skip_nested
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
# ValueError: PandasArray must be 1-dimensional.
super().test_setitem_sequence_mismatched_length_raises(data, as_array)
@skip_nested
def test_setitem_sequence_broadcasts(self, data, box_in_series):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_sequence_broadcasts(data, box_in_series)
@skip_nested
def test_setitem_loc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_loc_scalar_mixed(data)
@skip_nested
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_loc_scalar_multiple_homogoneous(data)
@skip_nested
def test_setitem_iloc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_iloc_scalar_mixed(data)
@skip_nested
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_iloc_scalar_multiple_homogoneous(data)
@skip_nested
@pytest.mark.parametrize('setter', ['loc', None])
def test_setitem_mask_broadcast(self, data, setter):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_mask_broadcast(data, setter)
@skip_nested
def test_setitem_scalar_key_sequence_raise(self, data):
# Failed: DID NOT RAISE <class 'ValueError'>
super().test_setitem_scalar_key_sequence_raise(data)
@skip_nested
class TestParsing(BaseNumPyTests, base.BaseParsingTests):
pass
|
|
import errno
import filecmp
import os
import shutil
import stat
import textwrap
import time
from unittest.mock import call, patch
import colorama
import pytest
import dvc as dvc_module
from dvc.dvcfile import DVC_FILE_SUFFIX
from dvc.exceptions import (
DvcException,
InvalidArgumentError,
OutputDuplicationError,
OverlappingOutputPathsError,
RecursiveAddingWhileUsingFilename,
)
from dvc.fs.local import LocalFileSystem
from dvc.hash_info import HashInfo
from dvc.main import main
from dvc.objects.db import ODBManager
from dvc.output import (
OutputAlreadyTrackedError,
OutputDoesNotExistError,
OutputIsStageFileError,
)
from dvc.stage import Stage
from dvc.stage.exceptions import (
StageExternalOutputsError,
StagePathNotFoundError,
)
from dvc.system import System
from dvc.utils import LARGE_DIR_SIZE, file_md5, relpath
from dvc.utils.fs import path_isin
from dvc.utils.serialize import YAMLFileCorruptedError, load_yaml
from tests.basic_env import TestDvc
from tests.utils import get_gitignore_content
def test_add(tmp_dir, dvc):
(stage,) = tmp_dir.dvc_gen({"foo": "foo"})
md5 = file_md5("foo", dvc.fs)
assert stage is not None
assert isinstance(stage, Stage)
assert os.path.isfile(stage.path)
assert len(stage.outs) == 1
assert len(stage.deps) == 0
assert stage.cmd is None
assert stage.outs[0].hash_info == HashInfo("md5", md5)
assert stage.md5 is None
assert (tmp_dir / "foo.dvc").parse() == {
"outs": [
{
"md5": "acbd18db4cc2f85cedef654fccc4a4d8",
"path": "foo",
"size": 3,
}
]
}
@pytest.mark.skipif(os.name == "nt", reason="can't set exec bit on Windows")
def test_add_executable(tmp_dir, dvc):
tmp_dir.gen("foo", "foo")
st = os.stat("foo")
os.chmod("foo", st.st_mode | stat.S_IEXEC)
dvc.add("foo")
assert (tmp_dir / "foo.dvc").parse() == {
"outs": [
{
"md5": "acbd18db4cc2f85cedef654fccc4a4d8",
"path": "foo",
"size": 3,
"isexec": True,
}
]
}
assert os.stat("foo").st_mode & stat.S_IEXEC
def test_add_unicode(tmp_dir, dvc):
with open("\xe1", "wb", encoding=None) as fd:
fd.write(b"something")
(stage,) = dvc.add("\xe1")
assert os.path.isfile(stage.path)
def test_add_unsupported_file(dvc):
with pytest.raises(DvcException):
dvc.add("unsupported://unsupported")
def test_add_directory(tmp_dir, dvc):
from dvc.objects import load
(stage,) = tmp_dir.dvc_gen({"dir": {"file": "file"}})
assert stage is not None
assert len(stage.deps) == 0
assert len(stage.outs) == 1
hash_info = stage.outs[0].hash_info
obj = load(dvc.odb.local, hash_info)
for key, _, _ in obj:
for part in key:
assert "\\" not in part
class TestAddDirectoryRecursive(TestDvc):
def test(self):
stages = self.dvc.add(self.DATA_DIR, recursive=True)
self.assertEqual(len(stages), 2)
class TestAddCmdDirectoryRecursive(TestDvc):
def test(self):
ret = main(["add", "--recursive", self.DATA_DIR])
self.assertEqual(ret, 0)
def test_warn_about_large_directories(self):
warning = (
"You are adding a large directory 'large-dir' recursively."
"\nConsider tracking it as a whole instead with "
"`{cyan}dvc add large-dir{nc}`"
).format(
cyan=colorama.Fore.CYAN,
nc=colorama.Style.RESET_ALL,
)
os.mkdir("large-dir")
# Create a lot of files
for iteration in range(LARGE_DIR_SIZE + 1):
path = os.path.join("large-dir", str(iteration))
with open(path, "w", encoding="utf-8") as fobj:
fobj.write(path)
assert main(["add", "--recursive", "large-dir"]) == 0
assert warning in self._capsys.readouterr()[1]
class TestAddDirectoryWithForwardSlash(TestDvc):
def test(self):
dname = "directory/"
os.mkdir(dname)
self.create(os.path.join(dname, "file"), "file")
stages = self.dvc.add(dname)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertTrue(stage is not None)
self.assertEqual(os.path.abspath("directory.dvc"), stage.path)
def test_add_tracked_file(tmp_dir, scm, dvc):
path = "tracked_file"
tmp_dir.scm_gen(path, "...", commit="add tracked file")
msg = f""" output '{path}' is already tracked by SCM \\(e.g. Git\\).
You can remove it from Git, then add to DVC.
To stop tracking from Git:
git rm -r --cached '{path}'
git commit -m "stop tracking {path}" """
with pytest.raises(OutputAlreadyTrackedError, match=msg):
dvc.add(path)
class TestAddDirWithExistingCache(TestDvc):
def test(self):
dname = "a"
fname = os.path.join(dname, "b")
os.mkdir(dname)
shutil.copyfile(self.FOO, fname)
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
stages = self.dvc.add(dname)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
class TestAddModifiedDir(TestDvc):
def test(self):
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
os.unlink(self.DATA)
time.sleep(2)
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
def test_add_file_in_dir(tmp_dir, dvc):
tmp_dir.gen({"dir": {"subdir": {"subdata": "subdata content"}}})
subdir_path = os.path.join("dir", "subdir", "subdata")
(stage,) = dvc.add(subdir_path)
assert stage is not None
assert len(stage.deps) == 0
assert len(stage.outs) == 1
assert stage.relpath == subdir_path + ".dvc"
# Current dir should not be taken into account
assert stage.wdir == os.path.dirname(stage.path)
assert stage.outs[0].def_path == "subdata"
@pytest.mark.parametrize(
"target, expected_def_paths, expected_rel_paths",
[
(
os.path.join("dir", "subdir", "subdata*"),
["subdata", "subdata123"],
[
os.path.join("dir", "subdir", "subdata") + ".dvc",
os.path.join("dir", "subdir", "subdata123") + ".dvc",
],
),
(
os.path.join("dir", "subdir", "?subdata"),
["esubdata", "isubdata"],
[
os.path.join("dir", "subdir", "esubdata") + ".dvc",
os.path.join("dir", "subdir", "isubdata") + ".dvc",
],
),
(
os.path.join("dir", "subdir", "[aiou]subdata"),
["isubdata"],
[os.path.join("dir", "subdir", "isubdata") + ".dvc"],
),
(
os.path.join("dir", "**", "subdata*"),
["subdata", "subdata123", "subdata4", "subdata5"],
[
os.path.join("dir", "subdir", "subdata") + ".dvc",
os.path.join("dir", "subdir", "subdata123") + ".dvc",
os.path.join("dir", "anotherdir", "subdata4") + ".dvc",
os.path.join("dir", "subdata5") + ".dvc",
],
),
],
)
def test_add_filtered_files_in_dir(
tmp_dir, dvc, target, expected_def_paths, expected_rel_paths
):
tmp_dir.gen(
{
"dir": {
"subdir": {
"subdata": "subdata content",
"esubdata": "extra subdata content",
"isubdata": "i subdata content",
"subdata123": "subdata content 123",
},
"anotherdir": {
"subdata4": "subdata 4 content",
"esubdata": "extra 2 subdata content",
},
"subdata5": "subdata 5 content",
}
}
)
stages = dvc.add(target, glob=True)
assert len(stages) == len(expected_def_paths)
for stage in stages:
assert stage is not None
assert len(stage.deps) == 0
assert len(stage.outs) == 1
assert stage.relpath in expected_rel_paths
# Current dir should not be taken into account
assert stage.wdir == os.path.dirname(stage.path)
assert stage.outs[0].def_path in expected_def_paths
@pytest.mark.parametrize(
"workspace, hash_name, hash_value",
[
(
pytest.lazy_fixture("local_cloud"),
"md5",
"8c7dd922ad47494fc02c388e12c00eac",
),
pytest.param(
pytest.lazy_fixture("ssh"),
"md5",
"8c7dd922ad47494fc02c388e12c00eac",
marks=pytest.mark.skipif(
os.name == "nt", reason="disabled on windows"
),
),
(
pytest.lazy_fixture("s3"),
"etag",
"8c7dd922ad47494fc02c388e12c00eac",
),
(
pytest.lazy_fixture("hdfs"),
"checksum",
"000002000000000000000000a86fe4d846edc1bf4c355cb6112f141e",
),
(
pytest.lazy_fixture("webhdfs"),
"checksum",
"000002000000000000000000a86fe4d846edc1bf4c355cb6112f141e00000000",
),
],
indirect=["workspace"],
)
def test_add_external_file(tmp_dir, dvc, workspace, hash_name, hash_value):
workspace.gen("file", "file")
with pytest.raises(StageExternalOutputsError):
dvc.add(workspace.url)
dvc.add("remote://workspace/file")
assert (tmp_dir / "file.dvc").read_text() == (
"outs:\n"
f"- {hash_name}: {hash_value}\n"
" size: 4\n"
" path: remote://workspace/file\n"
)
assert (workspace / "file").read_text() == "file"
assert (
workspace / "cache" / hash_value[:2] / hash_value[2:]
).read_text() == "file"
assert dvc.status() == {}
def test_add_external_relpath(tmp_dir, dvc, local_cloud):
(fpath,) = local_cloud.gen("file", "file")
rel = os.path.relpath(fpath)
with pytest.raises(StageExternalOutputsError):
dvc.add(rel)
dvc.add(rel, external=True)
assert (tmp_dir / "file.dvc").read_text() == (
"outs:\n"
"- md5: 8c7dd922ad47494fc02c388e12c00eac\n"
" size: 4\n"
f" path: {rel}\n"
)
assert fpath.read_text() == "file"
assert dvc.status() == {}
@pytest.mark.parametrize(
"workspace, hash_name, hash_value",
[
(
pytest.lazy_fixture("local_cloud"),
"md5",
"b6dcab6ccd17ca0a8bf4a215a37d14cc.dir",
),
pytest.param(
pytest.lazy_fixture("ssh"),
"md5",
"b6dcab6ccd17ca0a8bf4a215a37d14cc.dir",
marks=pytest.mark.skipif(
os.name == "nt", reason="disabled on windows"
),
),
(
pytest.lazy_fixture("s3"),
"etag",
"ec602a6ba97b2dd07bd6d2cd89674a60.dir",
),
],
indirect=["workspace"],
)
def test_add_external_dir(tmp_dir, dvc, workspace, hash_name, hash_value):
workspace.gen({"dir": {"file": "file", "subdir": {"subfile": "subfile"}}})
dvc.add("remote://workspace/dir")
assert (tmp_dir / "dir.dvc").read_text() == (
"outs:\n"
f"- {hash_name}: {hash_value}\n"
" size: 11\n"
" nfiles: 2\n"
" path: remote://workspace/dir\n"
)
assert (workspace / "cache" / hash_value[:2] / hash_value[2:]).is_file()
class TestAddLocalRemoteFile(TestDvc):
def test(self):
"""
Making sure that 'remote' syntax is handled properly for local outs.
"""
cwd = os.getcwd()
remote = "myremote"
ret = main(["remote", "add", remote, cwd])
self.assertEqual(ret, 0)
self.dvc.config.load()
foo = f"remote://{remote}/{self.FOO}"
ret = main(["add", foo])
self.assertEqual(ret, 0)
d = load_yaml("foo.dvc")
self.assertEqual(d["outs"][0]["path"], foo)
bar = os.path.join(cwd, self.BAR)
ret = main(["add", bar])
self.assertEqual(ret, 0)
d = load_yaml("bar.dvc")
self.assertEqual(d["outs"][0]["path"], self.BAR)
class TestCmdAdd(TestDvc):
def test(self):
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
ret = main(["add", "non-existing-file"])
self.assertNotEqual(ret, 0)
class TestDoubleAddUnchanged(TestDvc):
def test_file(self):
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
def test_dir(self):
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
def test_should_update_state_entry_for_file_after_add(mocker, dvc, tmp_dir):
file_md5_counter = mocker.spy(dvc_module.objects.stage, "file_md5")
tmp_dir.gen("foo", "foo")
ret = main(["config", "cache.type", "copy"])
assert ret == 0
ret = main(["add", "foo"])
assert ret == 0
assert file_md5_counter.mock.call_count == 1
ret = main(["status"])
assert ret == 0
assert file_md5_counter.mock.call_count == 1
ret = main(["run", "--single-stage", "-d", "foo", "echo foo"])
assert ret == 0
assert file_md5_counter.mock.call_count == 1
os.rename("foo", "foo.back")
ret = main(["checkout"])
assert ret == 0
assert file_md5_counter.mock.call_count == 1
ret = main(["status"])
assert ret == 0
assert file_md5_counter.mock.call_count == 1
def test_should_update_state_entry_for_directory_after_add(
mocker, dvc, tmp_dir
):
file_md5_counter = mocker.spy(dvc_module.objects.stage, "file_md5")
tmp_dir.gen({"data/data": "foo", "data/data_sub/sub_data": "foo"})
ret = main(["config", "cache.type", "copy"])
assert ret == 0
ret = main(["add", "data"])
assert ret == 0
assert file_md5_counter.mock.call_count == 3
ret = main(["status"])
assert ret == 0
assert file_md5_counter.mock.call_count == 3
ls = "dir" if os.name == "nt" else "ls"
ret = main(
["run", "--single-stage", "-d", "data", "{} {}".format(ls, "data")]
)
assert ret == 0
assert file_md5_counter.mock.call_count == 3
os.rename("data", "data" + ".back")
ret = main(["checkout"])
assert ret == 0
assert file_md5_counter.mock.call_count == 3
ret = main(["status"])
assert ret == 0
assert file_md5_counter.mock.call_count == 3
class TestAddCommit(TestDvc):
def test(self):
ret = main(["add", self.FOO, "--no-commit"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(self.FOO))
self.assertFalse(os.path.exists(self.dvc.odb.local.cache_dir))
ret = main(["commit", self.FOO + ".dvc"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(self.FOO))
self.assertEqual(len(os.listdir(self.dvc.odb.local.cache_dir)), 1)
def test_should_collect_dir_cache_only_once(mocker, tmp_dir, dvc):
tmp_dir.gen({"data/data": "foo"})
counter = mocker.spy(dvc_module.objects.stage, "_stage_tree")
ret = main(["add", "data"])
assert ret == 0
assert counter.mock.call_count == 1
ret = main(["status"])
assert ret == 0
assert counter.mock.call_count == 1
ret = main(["status"])
assert ret == 0
assert counter.mock.call_count == 1
class TestShouldPlaceStageInDataDirIfRepositoryBelowSymlink(TestDvc):
def test(self):
def is_symlink_true_below_dvc_root(path):
if path == os.path.dirname(self.dvc.root_dir):
return True
return False
with patch.object(
System, "is_symlink", side_effect=is_symlink_true_below_dvc_root
):
ret = main(["add", self.DATA])
self.assertEqual(0, ret)
stage_file_path_on_data_below_symlink = (
os.path.basename(self.DATA) + DVC_FILE_SUFFIX
)
self.assertFalse(
os.path.exists(stage_file_path_on_data_below_symlink)
)
stage_file_path = self.DATA + DVC_FILE_SUFFIX
self.assertTrue(os.path.exists(stage_file_path))
class TestShouldThrowProperExceptionOnCorruptedStageFile(TestDvc):
def test(self):
ret = main(["add", self.FOO])
assert 0 == ret
foo_stage = relpath(self.FOO + DVC_FILE_SUFFIX)
# corrupt stage file
with open(foo_stage, "a+", encoding="utf-8") as file:
file.write("this will break yaml file structure")
self._caplog.clear()
ret = main(["add", self.BAR])
assert 1 == ret
expected_error = (
f"unable to read: '{foo_stage}', YAML file structure is corrupted"
)
assert expected_error in self._caplog.text
class TestAddFilename(TestDvc):
def test(self):
ret = main(["add", self.FOO, self.BAR, "--file", "error.dvc"])
self.assertNotEqual(0, ret)
ret = main(["add", "-R", self.DATA_DIR, "--file", "error.dvc"])
self.assertNotEqual(0, ret)
with self.assertRaises(RecursiveAddingWhileUsingFilename):
self.dvc.add(self.DATA_DIR, recursive=True, fname="error.dvc")
ret = main(["add", self.DATA_DIR, "--file", "data_directory.dvc"])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists("data_directory.dvc"))
ret = main(["add", self.FOO, "--file", "bar.dvc"])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists("bar.dvc"))
self.assertFalse(os.path.exists("foo.dvc"))
os.remove("bar.dvc")
ret = main(["add", self.FOO, "--file", "bar.dvc"])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists("bar.dvc"))
self.assertFalse(os.path.exists("foo.dvc"))
def test_failed_add_cleanup(tmp_dir, scm, dvc):
tmp_dir.gen({"foo": "foo", "bar": "bar"})
# Add and corrupt a stage file
dvc.add("foo")
tmp_dir.gen("foo.dvc", "- broken\nyaml")
with pytest.raises(YAMLFileCorruptedError):
dvc.add("bar")
assert not os.path.exists("bar.dvc")
gitignore_content = get_gitignore_content()
assert "/bar" not in gitignore_content
def test_should_not_track_git_internal_files(mocker, dvc, tmp_dir):
stage_creator_spy = mocker.spy(dvc_module.repo.add, "create_stages")
ret = main(["add", "-R", dvc.root_dir])
assert ret == 0
created_stages_filenames = stage_creator_spy.mock.call_args[0][1]
for fname in created_stages_filenames:
assert ".git" not in fname
class TestAddUnprotected(TestDvc):
def test(self):
ret = main(["config", "cache.type", "hardlink"])
self.assertEqual(ret, 0)
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(System.is_hardlink(self.FOO))
ret = main(["unprotect", self.FOO])
self.assertEqual(ret, 0)
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(System.is_hardlink(self.FOO))
@pytest.fixture
def temporary_windows_drive(tmp_path_factory):
import string
from ctypes import windll
try:
# pylint: disable=import-error
import win32api
from win32con import DDD_REMOVE_DEFINITION
except ImportError:
pytest.skip("pywin32 not installed")
drives = [
s[0].upper()
for s in win32api.GetLogicalDriveStrings().split("\000")
if len(s) > 0
]
new_drive_name = [
letter for letter in string.ascii_uppercase if letter not in drives
][0]
new_drive = f"{new_drive_name}:"
target_path = tmp_path_factory.mktemp("tmp_windows_drive")
set_up_result = windll.kernel32.DefineDosDeviceW(
0, new_drive, os.fspath(target_path)
)
if set_up_result == 0:
raise RuntimeError("Failed to mount windows drive!")
# NOTE: new_drive has form of `A:` and joining it with some relative
# path might result in non-existing path (A:path\\to)
yield os.path.join(new_drive, os.sep)
tear_down_result = windll.kernel32.DefineDosDeviceW(
DDD_REMOVE_DEFINITION, new_drive, os.fspath(target_path)
)
if tear_down_result == 0:
raise RuntimeError("Could not unmount windows drive!")
@pytest.mark.skipif(os.name != "nt", reason="Windows specific")
def test_windows_should_add_when_cache_on_different_drive(
tmp_dir, dvc, temporary_windows_drive
):
dvc.config["cache"]["dir"] = temporary_windows_drive
dvc.odb = ODBManager(dvc)
(stage,) = tmp_dir.dvc_gen({"file": "file"})
cache_path = stage.outs[0].cache_path
assert path_isin(cache_path, temporary_windows_drive)
assert os.path.isfile(cache_path)
filecmp.cmp("file", cache_path)
def test_readding_dir_should_not_unprotect_all(tmp_dir, dvc, mocker):
tmp_dir.gen("dir/data", "data")
dvc.odb.local.cache_types = ["symlink"]
dvc.add("dir")
tmp_dir.gen("dir/new_file", "new_file_content")
unprotect_spy = mocker.spy(dvc.odb.local, "unprotect")
dvc.add("dir")
assert not unprotect_spy.mock.called
assert System.is_symlink(os.path.join("dir", "new_file"))
def test_should_not_checkout_when_adding_cached_copy(tmp_dir, dvc, mocker):
dvc.odb.local.cache_types = ["copy"]
tmp_dir.dvc_gen({"foo": "foo", "bar": "bar"})
shutil.copy("bar", "foo")
copy_spy = mocker.spy(dvc.odb.local.fs, "copy")
dvc.add("foo")
assert copy_spy.mock.call_count == 0
@pytest.mark.parametrize(
"link,new_link,link_test_func",
[
("hardlink", "copy", lambda path: not System.is_hardlink(path)),
("symlink", "copy", lambda path: not System.is_symlink(path)),
("copy", "hardlink", System.is_hardlink),
("copy", "symlink", System.is_symlink),
],
)
def test_should_relink_on_repeated_add(
link, new_link, link_test_func, tmp_dir, dvc
):
dvc.config["cache"]["type"] = link
tmp_dir.dvc_gen({"foo": "foo", "bar": "bar"})
os.remove("foo")
getattr(dvc.odb.local.fs, link)(
(tmp_dir / "bar").fs_path, (tmp_dir / "foo").fs_path
)
dvc.odb.local.cache_types = [new_link]
dvc.add("foo")
assert link_test_func("foo")
@pytest.mark.parametrize("link", ["hardlink", "symlink", "copy"])
def test_should_protect_on_repeated_add(link, tmp_dir, dvc):
dvc.odb.local.cache_types = [link]
tmp_dir.dvc_gen({"foo": "foo"})
dvc.unprotect("foo")
dvc.add("foo")
assert not os.access(
os.path.join(".dvc", "cache", "ac", "bd18db4cc2f85cedef654fccc4a4d8"),
os.W_OK,
)
# NOTE: Windows symlink perms don't propagate to the target
if link == "copy" or (link == "symlink" and os.name == "nt"):
assert os.access("foo", os.W_OK)
else:
assert not os.access("foo", os.W_OK)
def test_escape_gitignore_entries(tmp_dir, scm, dvc):
fname = "file!with*weird#naming_[1].t?t"
ignored_fname = r"/file\!with\*weird\#naming_\[1\].t\?t"
if os.name == "nt":
# Some characters are not supported by Windows in the filename
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
fname = "file!with_weird#naming_[1].txt"
ignored_fname = r"/file\!with_weird\#naming_\[1\].txt"
tmp_dir.dvc_gen(fname, "...")
assert ignored_fname in get_gitignore_content()
@pytest.mark.xfail(reason="error message relpath")
def test_add_from_data_dir(tmp_dir, scm, dvc):
tmp_dir.dvc_gen({"dir": {"file1": "file1 content"}})
tmp_dir.gen({"dir": {"file2": "file2 content"}})
with pytest.raises(OverlappingOutputPathsError) as e:
dvc.add(os.path.join("dir", "file2"), fname="file2.dvc")
assert str(e.value) == (
"Cannot add '{out}', because it is overlapping with other DVC "
"tracked output: 'dir'.\n"
"To include '{out}' in 'dir', run 'dvc commit dir.dvc'"
).format(out=os.path.join("dir", "file2"))
def test_not_raises_on_re_add(tmp_dir, dvc):
tmp_dir.dvc_gen("file", "file content")
tmp_dir.gen({"file2": "file2 content", "file": "modified file"})
dvc.add(["file2", "file"])
@pytest.mark.parametrize("link", ["hardlink", "symlink", "copy"])
def test_add_empty_files(tmp_dir, dvc, link):
file = "foo"
dvc.odb.local.cache_types = [link]
stages = tmp_dir.dvc_gen(file, "")
assert (tmp_dir / file).exists()
assert (tmp_dir / (file + DVC_FILE_SUFFIX)).exists()
assert os.path.exists(stages[0].outs[0].cache_path)
def test_add_optimization_for_hardlink_on_empty_files(tmp_dir, dvc, mocker):
dvc.odb.local.cache_types = ["hardlink"]
tmp_dir.gen({"foo": "", "bar": "", "lorem": "lorem", "ipsum": "ipsum"})
m = mocker.spy(LocalFileSystem, "is_hardlink")
stages = dvc.add(["foo", "bar", "lorem", "ipsum"])
assert m.call_count == 4
assert m.call_args != call(tmp_dir / "foo")
assert m.call_args != call(tmp_dir / "bar")
for stage in stages[:2]:
# hardlinks are not created for empty files
assert not System.is_hardlink(stage.outs[0].fs_path)
for stage in stages[2:]:
assert System.is_hardlink(stage.outs[0].fs_path)
for stage in stages:
assert os.path.exists(stage.path)
assert os.path.exists(stage.outs[0].cache_path)
def test_output_duplication_for_pipeline_tracked(tmp_dir, dvc, run_copy):
tmp_dir.dvc_gen("foo", "foo")
run_copy("foo", "bar", name="copy-foo-bar")
with pytest.raises(OutputDuplicationError):
dvc.add("bar")
def test_add_pipeline_file(tmp_dir, dvc, run_copy):
from dvc.dvcfile import PIPELINE_FILE
tmp_dir.dvc_gen("foo", "foo")
run_copy("foo", "bar", name="copy-foo-bar")
with pytest.raises(OutputIsStageFileError):
dvc.add(PIPELINE_FILE)
def test_add_symlink_file(tmp_dir, dvc):
tmp_dir.gen({"dir": {"bar": "bar"}})
(tmp_dir / "dir" / "foo").symlink_to(os.path.join(".", "bar"))
dvc.add(os.path.join("dir", "foo"))
assert not (tmp_dir / "foo.dvc").exists()
assert (tmp_dir / "dir" / "foo.dvc").exists()
assert not (tmp_dir / "dir" / "foo").is_symlink()
assert not (tmp_dir / "dir" / "bar").is_symlink()
assert (tmp_dir / "dir" / "foo").read_text() == "bar"
assert (tmp_dir / "dir" / "bar").read_text() == "bar"
assert (tmp_dir / ".dvc" / "cache").read_text() == {
"37": {"b51d194a7513e45b56f6524f2d51f2": "bar"}
}
assert not (
tmp_dir / ".dvc" / "cache" / "37" / "b51d194a7513e45b56f6524f2d51f2"
).is_symlink()
# Test that subsequent add succeeds
# See https://github.com/iterative/dvc/issues/4654
dvc.add(os.path.join("dir", "foo"))
@pytest.mark.parametrize("external", [True, False])
def test_add_symlink_dir(make_tmp_dir, tmp_dir, dvc, external):
if external:
data_dir = make_tmp_dir("data")
data_dir.gen({"foo": "foo"})
target = os.fspath(data_dir)
else:
tmp_dir.gen({"data": {"foo": "foo"}})
target = os.path.join(".", "data")
tmp_dir.gen({"data": {"foo": "foo"}})
(tmp_dir / "dir").symlink_to(target)
with pytest.raises(DvcException):
dvc.add("dir")
@pytest.mark.parametrize("external", [True, False])
def test_add_file_in_symlink_dir(make_tmp_dir, tmp_dir, dvc, external):
if external:
data_dir = make_tmp_dir("data")
data_dir.gen({"dir": {"foo": "foo"}})
target = os.fspath(data_dir / "dir")
else:
tmp_dir.gen({"data": {"foo": "foo"}})
target = os.path.join(".", "data")
(tmp_dir / "dir").symlink_to(target)
with pytest.raises(DvcException):
dvc.add(os.path.join("dir", "foo"))
def test_add_with_cache_link_error(tmp_dir, dvc, mocker, capsys):
tmp_dir.gen("foo", "foo")
mocker.patch(
"dvc.objects.checkout.test_links",
return_value=[],
)
dvc.add("foo")
err = capsys.readouterr()[1]
assert "reconfigure cache types" in err
assert (tmp_dir / "foo").exists()
assert (tmp_dir / "foo.dvc").exists()
assert (tmp_dir / ".dvc" / "cache").read_text() == {
"ac": {"bd18db4cc2f85cedef654fccc4a4d8": "foo"}
}
def test_add_preserve_meta(tmp_dir, dvc):
text = textwrap.dedent(
"""\
# top comment
desc: top desc
outs:
- path: foo # out comment
desc: out desc
meta: some metadata
"""
)
tmp_dir.gen("foo.dvc", text)
tmp_dir.dvc_gen("foo", "foo")
assert (tmp_dir / "foo.dvc").read_text() == textwrap.dedent(
"""\
# top comment
desc: top desc
outs:
- path: foo # out comment
desc: out desc
md5: acbd18db4cc2f85cedef654fccc4a4d8
size: 3
meta: some metadata
"""
)
# NOTE: unless long paths are enabled on Windows, PATH_MAX and NAME_MAX
# are the same 260 chars, which makes the test unnecessarily complex
@pytest.mark.skipif(os.name == "nt", reason="unsupported on Windows")
def test_add_long_fname(tmp_dir, dvc):
name_max = os.pathconf(tmp_dir, "PC_NAME_MAX") # pylint: disable=no-member
name = "a" * name_max
tmp_dir.gen({"data": {name: "foo"}})
# nothing we can do in this case, as the resulting dvcfile
# will definitely exceed NAME_MAX
with pytest.raises(OSError) as info:
dvc.add(os.path.join("data", name))
assert info.value.errno == errno.ENAMETOOLONG
dvc.add("data")
assert (tmp_dir / "data").read_text() == {name: "foo"}
def test_add_to_remote(tmp_dir, dvc, local_cloud, local_remote):
local_cloud.gen("foo", "foo")
url = "remote://upstream/foo"
[stage] = dvc.add(url, to_remote=True)
assert not (tmp_dir / "foo").exists()
assert (tmp_dir / "foo.dvc").exists()
assert len(stage.deps) == 0
assert len(stage.outs) == 1
hash_info = stage.outs[0].hash_info
meta = stage.outs[0].meta
with open(
local_remote.hash_to_path(hash_info.value), encoding="utf-8"
) as stream:
assert stream.read() == "foo"
assert meta.size == len("foo")
def test_add_to_remote_absolute(tmp_dir, make_tmp_dir, dvc, local_remote):
tmp_abs_dir = make_tmp_dir("abs")
tmp_foo = tmp_abs_dir / "foo"
tmp_foo.write_text("foo")
dvc.add(str(tmp_foo), to_remote=True)
tmp_foo.unlink()
foo = tmp_dir / "foo"
assert foo.with_suffix(".dvc").exists()
assert not os.path.exists(tmp_foo)
dvc.pull("foo")
assert not os.path.exists(tmp_foo)
assert foo.read_text() == "foo"
with pytest.raises(StageExternalOutputsError):
tmp_bar = tmp_abs_dir / "bar"
dvc.add(str(tmp_foo), out=str(tmp_bar), to_remote=True)
@pytest.mark.parametrize(
"invalid_opt, kwargs",
[
("multiple targets", {"targets": ["foo", "bar", "baz"]}),
("--no-commit", {"targets": ["foo"], "no_commit": True}),
("--recursive", {"targets": ["foo"], "recursive": True}),
("--external", {"targets": ["foo"], "external": True}),
],
)
def test_add_to_remote_invalid_combinations(dvc, invalid_opt, kwargs):
with pytest.raises(InvalidArgumentError, match=invalid_opt):
dvc.add(to_remote=True, **kwargs)
def test_add_to_cache_dir(tmp_dir, dvc, local_cloud):
local_cloud.gen({"data": {"foo": "foo", "bar": "bar"}})
(stage,) = dvc.add(str(local_cloud / "data"), out="data")
assert len(stage.deps) == 0
assert len(stage.outs) == 1
assert stage.outs[0].meta.size == len("foo") + len("bar")
assert stage.outs[0].meta.nfiles == 2
data = tmp_dir / "data"
assert data.read_text() == {"foo": "foo", "bar": "bar"}
assert (tmp_dir / "data.dvc").exists()
shutil.rmtree(data)
status = dvc.checkout(str(data))
assert status["added"] == ["data" + os.sep]
assert data.read_text() == {"foo": "foo", "bar": "bar"}
def test_add_to_cache_file(tmp_dir, dvc, local_cloud):
local_cloud.gen("foo", "foo")
(stage,) = dvc.add(str(local_cloud / "foo"), out="foo")
assert len(stage.deps) == 0
assert len(stage.outs) == 1
foo = tmp_dir / "foo"
assert foo.read_text() == "foo"
assert (tmp_dir / "foo.dvc").exists()
foo.unlink()
status = dvc.checkout(str(foo))
assert status["added"] == ["foo"]
assert foo.read_text() == "foo"
def test_add_to_cache_different_name(tmp_dir, dvc, local_cloud):
local_cloud.gen({"data": {"foo": "foo", "bar": "bar"}})
dvc.add(str(local_cloud / "data"), out="not_data")
not_data = tmp_dir / "not_data"
assert not_data.read_text() == {"foo": "foo", "bar": "bar"}
assert (tmp_dir / "not_data.dvc").exists()
assert not (tmp_dir / "data").exists()
assert not (tmp_dir / "data.dvc").exists()
shutil.rmtree(not_data)
dvc.checkout(str(not_data))
assert not_data.read_text() == {"foo": "foo", "bar": "bar"}
assert not (tmp_dir / "data").exists()
def test_add_to_cache_not_exists(tmp_dir, dvc, local_cloud):
local_cloud.gen({"data": {"foo": "foo", "bar": "bar"}})
dest_dir = tmp_dir / "dir" / "that" / "does" / "not" / "exist"
with pytest.raises(StagePathNotFoundError):
dvc.add(str(local_cloud / "data"), out=str(dest_dir))
dest_dir.parent.mkdir(parents=True)
dvc.add(str(local_cloud / "data"), out=str(dest_dir))
assert dest_dir.read_text() == {"foo": "foo", "bar": "bar"}
assert dest_dir.with_suffix(".dvc").exists()
@pytest.mark.parametrize(
"invalid_opt, kwargs",
[
("multiple targets", {"targets": ["foo", "bar", "baz"]}),
("--no-commit", {"targets": ["foo"], "no_commit": True}),
("--recursive", {"targets": ["foo"], "recursive": True}),
],
)
def test_add_to_cache_invalid_combinations(dvc, invalid_opt, kwargs):
with pytest.raises(InvalidArgumentError, match=invalid_opt):
dvc.add(out="bar", **kwargs)
@pytest.mark.parametrize(
"workspace",
[
pytest.lazy_fixture("local_cloud"),
pytest.lazy_fixture("s3"),
pytest.param(
pytest.lazy_fixture("gs"), marks=pytest.mark.needs_internet
),
pytest.lazy_fixture("hdfs"),
pytest.param(
pytest.lazy_fixture("ssh"),
marks=pytest.mark.skipif(
os.name == "nt", reason="disabled on windows"
),
),
pytest.lazy_fixture("http"),
],
indirect=True,
)
def test_add_to_cache_from_remote(tmp_dir, dvc, workspace):
workspace.gen("foo", "foo")
url = "remote://workspace/foo"
dvc.add(url, out="foo")
foo = tmp_dir / "foo"
assert foo.read_text() == "foo"
assert (tmp_dir / "foo.dvc").exists()
# Change the contents of the remote location, in order to
# ensure it retrieves file from the cache and not re-fetches it
(workspace / "foo").write_text("bar")
foo.unlink()
dvc.checkout(str(foo))
assert foo.read_text() == "foo"
def test_add_ignored(tmp_dir, scm, dvc):
from dvc.dvcfile import FileIsGitIgnored
tmp_dir.gen({"dir": {"subdir": {"file": "content"}}, ".gitignore": "dir/"})
with pytest.raises(FileIsGitIgnored) as exc:
dvc.add(targets=[os.path.join("dir", "subdir")])
assert str(exc.value) == ("bad DVC file name '{}' is git-ignored.").format(
os.path.join("dir", "subdir.dvc")
)
def test_add_on_not_existing_file_should_not_remove_stage_file(tmp_dir, dvc):
(stage,) = tmp_dir.dvc_gen("foo", "foo")
(tmp_dir / "foo").unlink()
dvcfile_contents = (tmp_dir / stage.path).read_text()
with pytest.raises(OutputDoesNotExistError):
dvc.add("foo")
assert (tmp_dir / "foo.dvc").exists()
assert (tmp_dir / stage.path).read_text() == dvcfile_contents
@pytest.mark.parametrize(
"target",
[
"dvc.repo.index.Index.check_graph",
"dvc.stage.Stage.save",
"dvc.stage.Stage.commit",
],
)
def test_add_does_not_remove_stage_file_on_failure(
tmp_dir, dvc, mocker, target
):
(stage,) = tmp_dir.dvc_gen("foo", "foo")
tmp_dir.gen("foo", "foobar") # update file
dvcfile_contents = (tmp_dir / stage.path).read_text()
exc_msg = f"raising error from mocked '{target}'"
mocker.patch(
target,
side_effect=DvcException(exc_msg),
)
with pytest.raises(DvcException) as exc_info:
dvc.add("foo")
assert str(exc_info.value) == exc_msg
assert (tmp_dir / "foo.dvc").exists()
assert (tmp_dir / stage.path).read_text() == dvcfile_contents
def test_add_ignore_duplicated_targets(tmp_dir, dvc, capsys):
tmp_dir.gen({"foo": "foo", "bar": "bar", "foobar": "foobar"})
stages = dvc.add(["foo", "bar", "foobar", "bar", "foo"])
_, err = capsys.readouterr()
assert len(stages) == 3
assert "ignoring duplicated targets: foo, bar" in err
|
|
# -*- encoding: utf-8 -*-
"""Test the views at /api/v1/reviews."""
import uuid
import pytest
from ceraon.models.reviews import Review
from tests.utils import BaseViewTest
@pytest.mark.usefixtures('db')
class TestFindReview(BaseViewTest):
"""Test GET /api/v1/reviews/ID."""
base_url = '/api/v1/reviews/{}'
def test_nonexistent_get(self, testapp):
"""Test the a nonexistent get returns a 404."""
res = testapp.get(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
assert 'error_code' in res.json
assert 'error_message' in res.json
def test_successful_get(self, testapp, review):
"""Test that a normal GET works just fine."""
res = testapp.get(self.base_url.format(review.id))
assert res.status_code == 200
data = res.json['data']
assert 'reviewer' in data
assert 'description' in data
assert 'rating' in data
assert 'meal' in data
@pytest.mark.usefixtures('db')
class TestCreateReview(BaseViewTest):
"""Test POST /api/v1/reviews."""
endpoints = [
'/api/v1/reviews?meal_id={}',
'/api/v1/meals/{}/reviews'
]
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'description': 'this is my description',
'rating': 4.0,
}
@pytest.mark.parametrize('endpoint', endpoints)
def test_unauthenticated_create(self, testapp, past_meal, endpoint):
"""Test that we get a 401 if the user is not authenticated."""
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=401)
assert res.status_code == 401
@pytest.mark.parametrize('endpoint', endpoints)
def test_meal_not_joined(self, testapp, endpoint, user, past_meal):
"""Test that we get a 403 if we didn't join the meal."""
self.login(user, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=403)
assert res.status_code == 403
assert 'error_code' in res.json
assert 'error_message' in res.json
@pytest.mark.parametrize('endpoint', endpoints)
def test_meal_in_future(self, testapp, endpoint, user, meal):
"""Test that we get a 428 if the meal hasn't happened yet."""
self.login(user, testapp)
url = endpoint.format(meal.id)
res = testapp.post_json(url, self.valid_data, status=428)
assert res.status_code == 428
assert 'error_code' in res.json
assert 'error_message' in res.json
@pytest.mark.parametrize('endpoint', endpoints)
def test_guest_can_review(self, testapp, endpoint, past_guest, past_meal):
"""Test that a guest can review the meal just fine."""
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data)
assert res.status_code == 201
assert 'data' in res.json
assert 'message' in res.json
data = res.json['data']
assert 'reviewer' in data
assert 'description' in data
assert 'rating' in data
assert 'meal' in data
@pytest.mark.parametrize('endpoint', endpoints)
def test_review_needs_description(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a description."""
del self.valid_data['description']
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'description' in res.json['error_message']
@pytest.mark.parametrize('endpoint', endpoints)
def test_review_needs_rating(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a rating."""
del self.valid_data['rating']
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'rating' in res.json['error_message']
@pytest.mark.parametrize('endpoint', endpoints)
def test_reivew_rating_positive(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a positive rating."""
self.valid_data['rating'] = -1.5
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'rating' in res.json['error_message']
@pytest.mark.parametrize('endpoint', endpoints)
def test_reivew_rating_interval(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a rating divisible by 0.5."""
self.valid_data['rating'] = 1.7
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'rating' in res.json['error_message']
@pytest.mark.usefixtures('db')
class TestUpdateReview(BaseViewTest):
"""Test PATCH /api/v1/reviews/UUID."""
base_url = '/api/v1/reviews/{}'
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'description': 'this is my description',
'rating': 4.0,
}
def test_unauthenticated(self, testapp, review):
"""Test that unauthenticated gets a 401."""
res = testapp.patch_json(self.base_url.format(review.id),
self.valid_data, status=401)
assert res.status_code == 401
def test_no_review_found(self, testapp, guest):
"""Test that a nonexistent review gets a 404."""
self.login(guest, testapp)
res = testapp.patch_json(self.base_url.format(uuid.uuid4()),
self.valid_data, status=404)
assert res.status_code == 404
def test_unauthorized(self, testapp, host, review):
"""Test that unauthorized gets a 403."""
self.login(host, testapp)
res = testapp.patch_json(self.base_url.format(review.id),
self.valid_data, status=403)
assert res.status_code == 403
def test_update_works(self, testapp, past_guest, review):
"""Test that updating a review works."""
self.login(past_guest, testapp)
res = testapp.patch_json(self.base_url.format(review.id),
self.valid_data)
assert res.status_code == 200
assert review.rating == self.valid_data['rating']
def test_partial_update_works(self, testapp, past_guest, review):
"""Test that only partially updating a review works."""
self.login(past_guest, testapp)
res = testapp.patch_json(self.base_url.format(review.id),
{'rating': 4.00})
assert res.status_code == 200
assert review.rating == 4.00
@pytest.mark.usefixtures('db')
class TestReplaceReview(BaseViewTest):
"""Test PUT /api/v1/reviews/UUID."""
base_url = '/api/v1/reviews/{}'
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'description': 'this is my description',
'rating': 4.0,
}
def test_unauthenticated(self, testapp, review):
"""Test that unauthenticated gets a 401."""
res = testapp.put_json(self.base_url.format(review.id),
self.valid_data, status=401)
assert res.status_code == 401
def test_no_review_found(self, testapp, guest):
"""Test that a nonexistent review gets a 404."""
self.login(guest, testapp)
res = testapp.put_json(self.base_url.format(uuid.uuid4()),
self.valid_data, status=404)
assert res.status_code == 404
def test_unauthorized(self, testapp, review, host):
"""Test that unauthorized gets a 403."""
self.login(host, testapp)
res = testapp.put_json(self.base_url.format(review.id),
self.valid_data, status=403)
assert res.status_code == 403
def test_replace_works(self, testapp, past_guest, review):
"""Test that replacing a review works."""
self.login(past_guest, testapp)
res = testapp.put_json(self.base_url.format(review.id),
self.valid_data)
assert res.status_code == 200
assert review.rating == self.valid_data['rating']
def test_partial_replace_fails(self, testapp, past_guest, review):
"""Test that only partially replacing a review fails."""
self.login(past_guest, testapp)
res = testapp.put_json(self.base_url.format(review.id),
{'rating': 4.00}, status=422)
assert res.status_code == 422
assert 'description' in res.json['error_message']
@pytest.mark.usefixtures('db')
class TestDestroyReview(BaseViewTest):
"""Test DELETE /api/v1/reviews/UUID."""
base_url = '/api/v1/reviews/{}'
def test_unauthenticated(self, testapp, review):
"""Test that unauthenticated gets a 401."""
res = testapp.delete(self.base_url.format(review.id), status=401)
assert res.status_code == 401
def test_review_not_found(self, testapp, user):
"""Test that a review not found gets a 404."""
self.login(user, testapp)
res = testapp.delete(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
def test_not_reviewer(self, testapp, host, review):
"""Test that not being the reviewer gets a 403."""
self.login(host, testapp)
res = testapp.delete(self.base_url.format(review.id), status=403)
assert res.status_code == 403
def test_review_deleted(self, testapp, past_guest, review):
"""Test that a reviewer can delete a meal."""
self.login(past_guest, testapp)
res = testapp.delete(self.base_url.format(review.id))
assert res.status_code == 204
try_find_review = Review.find(review.id)
assert try_find_review is None
@pytest.mark.usefixtures('db')
class TestGetMyReviews(BaseViewTest):
"""Test GET /api/v1/reviews/mine/<role>."""
base_url = '/api/v1/reviews/mine/{}'
def test_unauthenticated(self, testapp, review):
"""Test that an unauthenticated user gets a 401."""
res = testapp.get(self.base_url.format('guest'), status=401)
assert res.status_code == 401
def test_see_reviewed_meals(self, testapp, past_guest, review):
"""Test that a user can see the reviews they wrote."""
self.login(past_guest, testapp)
res = testapp.get(self.base_url.format('guest'))
assert res.status_code == 200
assert res.json['data'][0]['id'] == str(review.id)
assert len(res.json['data']) == 1
def test_see_hosted_reviews(self, testapp, host, review):
"""Test that a user can see the reviews for meals they host."""
self.login(host, testapp)
res = testapp.get(self.base_url.format('host'))
assert res.status_code == 200
assert res.json['data'][0]['id'] == str(review.id)
assert len(res.json['data']) == 1
def test_see_hosts_reviewed_meals(self, testapp, host, review):
"""Check that the host has reviewed no meals... just a sanity check."""
self.login(host, testapp)
res = testapp.get(self.base_url.format('guest'))
assert res.status_code == 200
assert len(res.json['data']) == 0
def test_bad_role(self, testapp, user):
"""Test that you can only specify 'guest' or 'host' as a role."""
self.login(user, testapp)
res = testapp.get(self.base_url.format('somethingelse'), status=400)
assert res.status_code == 400
|
|
#!/usr/bin/env python
"""
mbed
Copyright (c) 2017-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import struct
import binascii
import argparse
import logging
import StringIO
import jinja2
from collections import namedtuple
from itertools import count
from elftools.common.py3compat import bytes2str
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def main():
parser = argparse.ArgumentParser(description="Algo Extracter")
parser.add_argument("input", help="File to extract flash algo from")
parser.add_argument("template", default="py_blob.tmpl",
help="Template to use")
parser.add_argument("output", help="Output file")
args = parser.parse_args()
with open(args.input, "rb") as file_handle:
data = file_handle.read()
algo = PackFlashAlgo(data)
algo.process_template(args.template, args.output)
class PackFlashAlgo(object):
"""
Class to wrap a flash algo
This class is intended to provide easy access to the information
provided by a flash algorithm, such as symbols and the flash
algorithm itself.
"""
REQUIRED_SYMBOLS = set([
"Init",
"UnInit",
"EraseSector",
"ProgramPage",
])
EXTRA_SYMBOLS = set([
"BlankCheck",
"EraseChip",
"Verify",
])
def __init__(self, data):
"""Construct a PackFlashAlgorithm from an ElfFileSimple"""
self.elf = ElfFileSimple(data)
self.flash_info = PackFlashInfo(self.elf)
self.flash_start = self.flash_info.start
self.flash_size = self.flash_info.size
self.page_size = self.flash_info.page_size
self.sector_sizes = self.flash_info.sector_info_list
symbols = {}
symbols.update(_extract_symbols(self.elf, self.REQUIRED_SYMBOLS))
symbols.update(_extract_symbols(self.elf, self.EXTRA_SYMBOLS,
default=0xFFFFFFFF))
self.symbols = symbols
sections_to_find = (
("PrgCode", "SHT_PROGBITS"),
("PrgData", "SHT_PROGBITS"),
("PrgData", "SHT_NOBITS"),
)
ro_rw_zi = _find_sections(self.elf, sections_to_find)
ro_rw_zi = _algo_fill_zi_if_missing(ro_rw_zi)
error_msg = _algo_check_for_section_problems(ro_rw_zi)
if error_msg is not None:
raise Exception(error_msg)
sect_ro, sect_rw, sect_zi = ro_rw_zi
self.ro_start = sect_ro["sh_addr"]
self.ro_size = sect_ro["sh_size"]
self.rw_start = sect_rw["sh_addr"]
self.rw_size = sect_rw["sh_size"]
self.zi_start = sect_zi["sh_addr"]
self.zi_size = sect_zi["sh_size"]
self.algo_data = _create_algo_bin(ro_rw_zi)
def format_algo_data(self, spaces, group_size, fmt):
""""
Return a string representing algo_data suitable for use in a template
The string is intended for use in a template.
:param spaces: The number of leading spaces for each line
:param group_size: number of elements per line (element type
depends of format)
:param fmt: - format to create - can be either "hex" or "c"
"""
padding = " " * spaces
if fmt == "hex":
blob = binascii.b2a_hex(self.algo_data)
line_list = []
for i in xrange(0, len(blob), group_size):
line_list.append('"' + blob[i:i + group_size] + '"')
return ("\n" + padding).join(line_list)
elif fmt == "c":
blob = self.algo_data[:]
pad_size = 0 if len(blob) % 4 == 0 else 4 - len(blob) % 4
blob = blob + "\x00" * pad_size
integer_list = struct.unpack("<" + "L" * (len(blob) / 4), blob)
line_list = []
for pos in range(0, len(integer_list), group_size):
group = ["0x%08x" % value for value in
integer_list[pos:pos + group_size]]
line_list.append(", ".join(group))
return (",\n" + padding).join(line_list)
else:
raise Exception("Unsupported format %s" % fmt)
def process_template(self, template_path, output_path, data_dict=None):
"""
Generate output from the supplied template
All the public methods and fields of this class can be accessed from
the template via "algo".
:param template_path: Relative or absolute file path to the template
:param output_path: Relative or absolute file path to create
:param data_dict: Additional data to use when generating
"""
if data_dict is None:
data_dict = {}
else:
assert isinstance(data_dict, dict)
data_dict = dict(data_dict)
assert "algo" not in data_dict, "algo already set by user data"
data_dict["algo"] = self
with open(template_path) as file_handle:
template_text = file_handle.read()
template = jinja2.Template(template_text)
target_text = template.render(data_dict)
with open(output_path, "wb") as file_handle:
file_handle.write(target_text)
def _extract_symbols(simple_elf, symbols, default=None):
"""Fill 'symbols' field with required flash algo symbols"""
to_ret = {}
for symbol in symbols:
if symbol not in simple_elf.symbols:
if default is not None:
to_ret[symbol] = default
continue
raise Exception("Missing symbol %s" % symbol)
to_ret[symbol] = simple_elf.symbols[symbol].value
return to_ret
def _find_sections(elf, name_type_pairs):
"""Return a list of sections the same length and order of the input list"""
sections = [None] * len(name_type_pairs)
for section in elf.iter_sections():
section_name = bytes2str(section.name)
section_type = section["sh_type"]
for i, name_and_type in enumerate(name_type_pairs):
if name_and_type != (section_name, section_type):
continue
if sections[i] is not None:
raise Exception("Elf contains duplicate section %s attr %s" %
(section_name, section_type))
sections[i] = section
return sections
def _algo_fill_zi_if_missing(ro_rw_zi):
"""Create an empty zi section if it is missing"""
s_ro, s_rw, s_zi = ro_rw_zi
if s_rw is None:
return ro_rw_zi
if s_zi is not None:
return ro_rw_zi
s_zi = {
"sh_addr": s_rw["sh_addr"] + s_rw["sh_size"],
"sh_size": 0
}
return s_ro, s_rw, s_zi
def _algo_check_for_section_problems(ro_rw_zi):
"""Return a string describing any errors with the layout or None if good"""
s_ro, s_rw, s_zi = ro_rw_zi
if s_ro is None:
return "RO section is missing"
if s_rw is None:
return "RW section is missing"
if s_zi is None:
return "ZI section is missing"
if s_ro["sh_addr"] != 0:
return "RO section does not start at address 0"
if s_ro["sh_addr"] + s_ro["sh_size"] != s_rw["sh_addr"]:
return "RW section does not follow RO section"
if s_rw["sh_addr"] + s_rw["sh_size"] != s_zi["sh_addr"]:
return "ZI section does not follow RW section"
return None
def _create_algo_bin(ro_rw_zi):
"""Create a binary blob of the flash algo which can execute from ram"""
sect_ro, sect_rw, sect_zi = ro_rw_zi
algo_size = sect_ro["sh_size"] + sect_rw["sh_size"] + sect_zi["sh_size"]
algo_data = bytearray(algo_size)
for section in (sect_ro, sect_rw):
start = section["sh_addr"]
size = section["sh_size"]
data = section.data()
assert len(data) == size
algo_data[start:start + size] = data
return algo_data
class PackFlashInfo(object):
"""Wrapper class for the non-executable information in an FLM file"""
FLASH_DEVICE_STRUCT = "<H128sHLLLLBxxxLL"
FLASH_SECTORS_STRUCT = "<LL"
FLASH_SECTORS_STRUCT_SIZE = struct.calcsize(FLASH_SECTORS_STRUCT)
SECTOR_END = 0xFFFFFFFF
def __init__(self, elf_simple):
dev_info = elf_simple.symbols["FlashDevice"]
info_start = dev_info.value
info_size = struct.calcsize(self.FLASH_DEVICE_STRUCT)
data = elf_simple.read(info_start, info_size)
values = struct.unpack(self.FLASH_DEVICE_STRUCT, data)
self.version = values[0]
self.name = values[1].strip("\x00")
self.type = values[2]
self.start = values[3]
self.size = values[4]
self.page_size = values[5]
self.value_empty = values[7]
self.prog_timeout_ms = values[8]
self.erase_timeout_ms = values[9]
sector_gen = self._sector_and_sz_itr(elf_simple,
info_start + info_size)
self.sector_info_list = list(sector_gen)
def __str__(self):
desc = ""
desc += "Flash Device:" + os.linesep
desc += " name=%s" % self.name + os.linesep
desc += " version=0x%x" % self.version + os.linesep
desc += " type=%i" % self.type + os.linesep
desc += " start=0x%x" % self.start + os.linesep
desc += " size=0x%x" % self.size + os.linesep
desc += " page_size=0x%x" % self.page_size + os.linesep
desc += " value_empty=0x%x" % self.value_empty + os.linesep
desc += " prog_timeout_ms=%i" % self.prog_timeout_ms + os.linesep
desc += " erase_timeout_ms=%i" % self.erase_timeout_ms + os.linesep
desc += " sectors:" + os.linesep
for sector_start, sector_size in self.sector_info_list:
desc += (" start=0x%x, size=0x%x" %
(sector_start, sector_size) + os.linesep)
return desc
def _sector_and_sz_itr(self, elf_simple, data_start):
"""Iterator which returns starting address and sector size"""
for entry_start in count(data_start, self.FLASH_SECTORS_STRUCT_SIZE):
data = elf_simple.read(entry_start, self.FLASH_SECTORS_STRUCT_SIZE)
size, start = struct.unpack(self.FLASH_SECTORS_STRUCT, data)
start_and_size = start, size
if start_and_size == (self.SECTOR_END, self.SECTOR_END):
return
yield start_and_size
SymbolSimple = namedtuple("SymbolSimple", "name, value, size")
class ElfFileSimple(ELFFile):
"""Wrapper for elf object which allows easy access to symbols and rom"""
def __init__(self, data):
"""Construct a ElfFileSimple from bytes or a bytearray"""
super(ElfFileSimple, self).__init__(StringIO.StringIO(data))
self.symbols = self._read_symbol_table()
def _read_symbol_table(self):
"""Read the symbol table into the field "symbols" for easy use"""
section = self.get_section_by_name(b".symtab")
if not section:
raise Exception("Missing symbol table")
if not isinstance(section, SymbolTableSection):
raise Exception("Invalid symbol table section")
symbols = {}
for symbol in section.iter_symbols():
name_str = bytes2str(symbol.name)
if name_str in symbols:
logging.debug("Duplicate symbol %s", name_str)
symbols[name_str] = SymbolSimple(name_str, symbol["st_value"],
symbol["st_size"])
return symbols
def read(self, addr, size):
"""Read program data from the elf file
:param addr: physical address (load address) to read from
:param size: number of bytes to read
:return: Requested data or None if address is unmapped
"""
for segment in self.iter_segments():
seg_addr = segment["p_paddr"]
seg_size = min(segment["p_memsz"], segment["p_filesz"])
if addr >= seg_addr + seg_size:
continue
if addr + size <= seg_addr:
continue
# There is at least some overlap
if addr >= seg_addr and addr + size <= seg_addr + seg_size:
# Region is fully contained
data = segment.data()
start = addr - seg_addr
return data[start:start + size]
if __name__ == '__main__':
main()
|
|
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# Import copy of _thread_local.py from Python 2.4
from django.utils._threading_local import local
try:
set
except NameError:
# Python 2.3 compat
from sets import Set as set
try:
import decimal
except ImportError:
# Python 2.3 fallback
from django.utils import _decimal as decimal
from django.db.backends import util
from django.utils import datetime_safe
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
def __init__(self, settings_dict):
# `settings_dict` should be a dictionary containing keys such as
# DATABASE_NAME, DATABASE_USER, etc. It's called `settings_dict`
# instead of `settings` to disambiguate it from Django settings
# modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
from django.conf import settings
cursor = self._cursor()
if settings.DEBUG:
return self.make_debug_cursor(cursor)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
uses_custom_query_class = False
empty_fetchmany_value = []
update_can_self_select = True
interprets_empty_strings_as_nulls = False
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True)
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def query_class(self, DefaultQueryClass):
"""
Given the default Query class, returns a custom Query class
to use for this backend. Returns None if a custom Query isn't used.
See also BaseDatabaseFeatures.uses_custom_query_class, which regulates
whether this method is called at all.
"""
return None
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
tables = [t for t in tables if self.table_name_converter(t) in self.table_names()]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
all_models.append(model)
return set([m for m in all_models
if self.table_name_converter(m._meta.db_table) in map(self.table_name_converter, tables)
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
|
|
# -*- coding: utf-8 -*-
import abc
import logging
import datetime
import functools
import httplib as http
import urlparse
import uuid
from flask import request
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
from requests.exceptions import HTTPError as RequestsHTTPError
from modularodm import fields, Q
from modularodm.storage.base import KeyExistsException
from modularodm.validators import MaxLengthValidator, URLValidator
from requests_oauthlib import OAuth1Session
from requests_oauthlib import OAuth2Session
from framework.auth import cas
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo import ObjectId, StoredObject
from framework.mongo.utils import unique_on
from framework.mongo.validators import string_required
from framework.sessions import session
from website import settings
from website.oauth.utils import PROVIDER_LOOKUP
from website.security import random_string
from website.util import web_url_for
from api.base.utils import absolute_reverse
logger = logging.getLogger(__name__)
OAUTH1 = 1
OAUTH2 = 2
generate_client_secret = functools.partial(random_string, length=40)
@unique_on(['provider', 'provider_id'])
class ExternalAccount(StoredObject):
"""An account on an external service.
Note that this object is not and should not be aware of what other objects
are associated with it. This is by design, and this object should be kept as
thin as possible, containing only those fields that must be stored in the
database.
The ``provider`` field is a de facto foreign key to an ``ExternalProvider``
object, as providers are not stored in the database.
"""
_id = fields.StringField(default=lambda: str(ObjectId()), primary=True)
# The OAuth credentials. One or both of these fields should be populated.
# For OAuth1, this is usually the "oauth_token"
# For OAuth2, this is usually the "access_token"
oauth_key = fields.StringField()
# For OAuth1, this is usually the "oauth_token_secret"
# For OAuth2, this is not used
oauth_secret = fields.StringField()
# Used for OAuth2 only
refresh_token = fields.StringField()
expires_at = fields.DateTimeField()
scopes = fields.StringField(list=True, default=lambda: list())
# The `name` of the service
# This lets us query for only accounts on a particular provider
provider = fields.StringField(required=True)
# The proper 'name' of the service
# Needed for account serialization
provider_name = fields.StringField(required=True)
# The unique, persistent ID on the remote service.
provider_id = fields.StringField()
# The user's name on the external service
display_name = fields.StringField()
# A link to the user's profile on the external service
profile_url = fields.StringField()
def __repr__(self):
return '<ExternalAccount: {}/{}>'.format(self.provider,
self.provider_id)
class ExternalProviderMeta(abc.ABCMeta):
"""Keeps track of subclasses of the ``ExternalProvider`` object"""
def __init__(cls, name, bases, dct):
super(ExternalProviderMeta, cls).__init__(name, bases, dct)
if not isinstance(cls.short_name, abc.abstractproperty):
PROVIDER_LOOKUP[cls.short_name] = cls
class ExternalProvider(object):
"""A connection to an external service (ex: GitHub).
This object contains no credentials, and is not saved in the database.
It provides an unauthenticated session with the provider, unless ``account``
has been set - in which case, it provides a connection authenticated as the
``ExternalAccount`` instance.
Conceptually, this can be thought of as an extension of ``ExternalAccount``.
It's a separate object because this must be subclassed for each provider,
and ``ExternalAccount`` instances are stored within a single collection.
"""
__metaclass__ = ExternalProviderMeta
# Default to OAuth v2.0.
_oauth_version = OAUTH2
def __init__(self, account=None):
super(ExternalProvider, self).__init__()
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.provider_id if self.account else 'anonymous'
)
@abc.abstractproperty
def auth_url_base(self):
"""The base URL to begin the OAuth dance"""
pass
@property
def auth_url(self):
"""The URL to begin the OAuth dance.
This property method has side effects - it at least adds temporary
information to the session so that callbacks can be associated with
the correct user. For OAuth1, it calls the provider to obtain
temporary credentials to start the flow.
"""
# create a dict on the session object if it's not already there
if session.data.get("oauth_states") is None:
session.data['oauth_states'] = {}
if self._oauth_version == OAUTH2:
# build the URL
oauth = OAuth2Session(
self.client_id,
redirect_uri=web_url_for('oauth_callback',
service_name=self.short_name,
_absolute=True),
scope=self.default_scopes,
)
url, state = oauth.authorization_url(self.auth_url_base)
# save state token to the session for confirmation in the callback
session.data['oauth_states'][self.short_name] = {'state': state}
elif self._oauth_version == OAUTH1:
# get a request token
oauth = OAuth1Session(
client_key=self.client_id,
client_secret=self.client_secret,
)
# request temporary credentials from the provider
response = oauth.fetch_request_token(self.request_token_url)
# store them in the session for use in the callback
session.data['oauth_states'][self.short_name] = {
'token': response.get('oauth_token'),
'secret': response.get('oauth_token_secret'),
}
url = oauth.authorization_url(self.auth_url_base)
return url
@abc.abstractproperty
def callback_url(self):
"""The provider URL to exchange the code for a token"""
pass
@abc.abstractproperty
def client_id(self):
"""OAuth Client ID. a/k/a: Application ID"""
pass
@abc.abstractproperty
def client_secret(self):
"""OAuth Client Secret. a/k/a: Application Secret, Application Key"""
pass
default_scopes = list()
@abc.abstractproperty
def name(self):
"""Human-readable name of the service. e.g.: ORCiD, GitHub"""
pass
@abc.abstractproperty
def short_name(self):
"""Name of the service to be used internally. e.g.: orcid, github"""
pass
def auth_callback(self, user, **kwargs):
"""Exchange temporary credentials for permanent credentials
This is called in the view that handles the user once they are returned
to the OSF after authenticating on the external service.
"""
if 'error' in request.args:
return False
# make sure the user has temporary credentials for this provider
try:
cached_credentials = session.data['oauth_states'][self.short_name]
except KeyError:
raise PermissionsError("OAuth flow not recognized.")
if self._oauth_version == OAUTH1:
request_token = request.args.get('oauth_token')
# make sure this is the same user that started the flow
if cached_credentials.get('token') != request_token:
raise PermissionsError("Request token does not match")
response = OAuth1Session(
client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=cached_credentials.get('token'),
resource_owner_secret=cached_credentials.get('secret'),
verifier=request.args.get('oauth_verifier'),
).fetch_access_token(self.callback_url)
elif self._oauth_version == OAUTH2:
state = request.args.get('state')
# make sure this is the same user that started the flow
if cached_credentials.get('state') != state:
raise PermissionsError("Request token does not match")
try:
response = OAuth2Session(
self.client_id,
redirect_uri=web_url_for(
'oauth_callback',
service_name=self.short_name,
_absolute=True
),
).fetch_token(
self.callback_url,
client_secret=self.client_secret,
code=request.args.get('code'),
)
except (MissingTokenError, RequestsHTTPError):
raise HTTPError(http.SERVICE_UNAVAILABLE)
# pre-set as many values as possible for the ``ExternalAccount``
info = self._default_handle_callback(response)
# call the hook for subclasses to parse values from the response
info.update(self.handle_callback(response))
return self._set_external_account(user, info)
def _set_external_account(self, user, info):
try:
# create a new ``ExternalAccount`` ...
self.account = ExternalAccount(
provider=self.short_name,
provider_id=info['provider_id'],
provider_name=self.name,
)
self.account.save()
except KeyExistsException:
# ... or get the old one
self.account = ExternalAccount.find_one(
Q('provider', 'eq', self.short_name) &
Q('provider_id', 'eq', info['provider_id'])
)
assert self.account is not None
# ensure that provider_name is correct
self.account.provider_name = self.name
# required
self.account.oauth_key = info['key']
# only for OAuth1
self.account.oauth_secret = info.get('secret')
# only for OAuth2
self.account.expires_at = info.get('expires_at')
self.account.refresh_token = info.get('refresh_token')
# additional information
self.account.display_name = info.get('display_name')
self.account.profile_url = info.get('profile_url')
self.account.save()
# add it to the user's list of ``ExternalAccounts``
if self.account not in user.external_accounts:
user.external_accounts.append(self.account)
user.save()
return True
def _default_handle_callback(self, data):
"""Parse as much out of the key exchange's response as possible.
This should not be over-ridden in subclasses.
"""
if self._oauth_version == OAUTH1:
key = data.get('oauth_token')
secret = data.get('oauth_token_secret')
values = {}
if key:
values['key'] = key
if secret:
values['secret'] = secret
return values
elif self._oauth_version == OAUTH2:
key = data.get('access_token')
refresh_token = data.get('refresh_token')
expires_at = data.get('expires_at')
scopes = data.get('scope')
values = {}
if key:
values['key'] = key
if scopes:
values['scope'] = scopes
if refresh_token:
values['refresh_token'] = refresh_token
if expires_at:
values['expires_at'] = datetime.datetime.fromtimestamp(
float(expires_at)
)
return values
@abc.abstractmethod
def handle_callback(self, response):
"""Hook for allowing subclasses to parse information from the callback.
Subclasses should implement this method to provide `provider_id`
and `profile_url`.
Values provided by ``self._default_handle_callback`` can be over-ridden
here as well, in the unexpected case that they are parsed incorrectly
by default.
:param response: The JSON returned by the provider during the exchange
:return dict:
"""
pass
class ApiOAuth2Scope(StoredObject):
"""
Store information about recognized OAuth2 scopes. Only scopes registered under this database model can
be requested by third parties.
"""
_id = fields.StringField(primary=True,
default=lambda: str(ObjectId()))
name = fields.StringField(unique=True, required=True, index=True)
description = fields.StringField(required=True)
is_active = fields.BooleanField(default=True, index=True) # TODO: Add mechanism to deactivate a scope?
class ApiOAuth2Application(StoredObject):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
_id = fields.StringField(
primary=True,
default=lambda: str(ObjectId())
)
# Client ID and secret. Use separate ID field so ID format doesn't have to be restricted to database internals.
client_id = fields.StringField(default=lambda: uuid.uuid4().hex, # Not *guaranteed* unique, but very unlikely
unique=True,
index=True)
client_secret = fields.StringField(default=generate_client_secret)
is_active = fields.BooleanField(default=True, # Set to False if application is deactivated
index=True)
owner = fields.ForeignField('User',
backref='created',
index=True,
required=True)
# User-specified application descriptors
name = fields.StringField(index=True, required=True, validate=[string_required, MaxLengthValidator(200)])
description = fields.StringField(required=False, validate=MaxLengthValidator(1000))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow,
editable=False)
home_url = fields.StringField(required=True,
validate=URLValidator())
callback_url = fields.StringField(required=True,
validate=URLValidator())
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a flag that hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails, which will also stop setting of active=False.
resp = client.revoke_application_tokens(self.client_id, self.client_secret) # noqa
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
return absolute_reverse('applications:application-detail', kwargs={'client_id': self.client_id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
class ApiOAuth2PersonalToken(StoredObject):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
_id = fields.StringField(primary=True,
default=lambda: str(ObjectId()))
# Name of the field being `token_id` is a CAS requirement.
# This is the actual value of the token that's used to authenticate
token_id = fields.StringField(default=functools.partial(random_string, length=70),
unique=True)
owner = fields.ForeignField('User',
backref='created',
index=True,
required=True)
name = fields.StringField(required=True, index=True)
# This field is a space delimited list of scopes, e.g. "osf.full_read osf.full_write"
scopes = fields.StringField(required=True)
is_active = fields.BooleanField(default=True, index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails for any reason other than the token
# not yet being created. This will also stop setting of active=False.
try:
resp = client.revoke_tokens({'token': self.token_id}) # noqa
except cas.CasHTTPError as e:
if e.code == 400:
pass # Token hasn't been used yet, so not created in cas
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
return absolute_reverse('tokens:token-detail', kwargs={'_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
|
|
# -*- coding: utf-8 -*-
from webob import Response, Request
from tg.controllers import TGController, RestController
from tg.decorators import expose
from tg.util import no_warn
from tests.base import (
TestWSGIController, make_app, setup_session_dir, teardown_session_dir)
def setup():
setup_session_dir()
def teardown():
teardown_session_dir()
def wsgi_app(environ, start_response):
req = Request(environ)
if req.method == 'POST':
resp = Response(req.POST['data'])
else:
resp = Response("Hello from %s/%s"%(req.script_name, req.path_info))
return resp(environ, start_response)
class LookupHelper:
def __init__(self, var):
self.var = var
@expose()
def index(self):
return self.var
class LookupController(TGController):
@expose()
def _lookup(self, a, *args):
return LookupHelper(a), args
class DeprecatedLookupController(TGController):
@expose()
def _lookup(self, a, *args):
return LookupHelper(a), args
class LookupAlwaysHelper:
"""for testing _dispatch"""
def __init__(self, var):
self.var = var
def _setup_wsgiorg_routing_args(self, url_path, remainder, params):
pass
@expose()
def always(self, *args, **kwargs):
return 'always go here'
def _dispatch(self, state, remainder):
state.add_method(self.always, remainder)
return state
class LookupAlwaysController(TGController):
@expose()
def _lookup(self, a, *args):
return LookupAlwaysHelper(a), args
class CustomDispatchingSubController(TGController):
@expose()
def always(self, *args, **kwargs):
return 'always go here'
def _dispatch(self, state, remainder):
state.add_method(self.always, remainder)
return state
class OptionalArgumentRestController(RestController):
@expose()
def get_one(self, optional=None):
return "SUBREST GET ONE"
@expose()
def put(self, optional=None):
return "subrest put"
@expose()
def post(self, optional=None):
return "subrest post"
@expose()
def edit(self, optional=None):
return "subrest edit"
@expose()
def new(self, optional=None):
return "subrest new"
@expose()
def get_delete(self, optional=None):
return "subrest get delete"
@expose()
def post_delete(self, optional=None):
return "subrest post delete"
class RequiredArgumentRestController(RestController):
@expose()
def get_one(self, something):
return "subrest get one"
@expose()
def put(self, something):
return "subrest put"
@expose()
def post(self, something):
return "subrest post"
@expose()
def edit(self, something):
return "subrest edit"
@expose()
def new(self):
return "subrest new"
@expose()
def get_delete(self, something):
return "subrest get delete"
@expose()
def post_delete(self, something):
return "subrest post delete"
class VariableSubRestController(RestController):
@expose()
def get_one(self, *args):
return "subrest get one"
@expose()
def put(self, *args):
return "subrest put"
@expose()
def edit(self, *args):
return "subrest edit"
@expose()
def new(self, *args):
return "subrest new"
@expose()
def get_delete(self, *args):
return "subrest get delete"
@expose()
def post_delete(self, *args):
return "subrest post delete"
class SubRestController(RestController):
@expose()
def get_all(self):
return "subrest get all"
@expose()
def get_one(self, nr):
return "subrest get one %s" % nr
@expose()
def new(self):
return "subrest new"
@expose()
def edit(self, nr):
return "subrest edit %s" % nr
@expose()
def post(self):
return "subrest post"
@expose()
def put(self, nr):
return "subrest put %s" % nr
@expose()
def fxn(self):
return "subrest fxn"
@expose()
def get_delete(self, nr):
return "subrest get delete %s" % nr
@expose()
def post_delete(self, nr):
return "subrest post delete %s" % nr
class VariableRestController(RestController):
subrest = SubRestController()
vsubrest = VariableSubRestController()
@expose()
def get_all(self):
return "rest get all"
@expose()
def get_one(self, *args):
return "rest get onE"
@expose()
def get_delete(self, *args):
return "rest get delete"
@expose()
def post_delete(self, *args):
return "rest post delete"
class ExtraRestController(RestController):
@expose()
def get_all(self):
return "rest get all"
@expose()
def get_one(self, nr):
return "rest get one %s" % nr
@expose()
def get_delete(self, nr):
return "rest get delete %s" % nr
@expose()
def post_delete(self, nr):
return "rest post delete %s" % nr
class SubClass(TGController):
@expose()
def index(self):
return "rest sub index"
sub = SubClass()
subrest = SubRestController()
optsubrest = OptionalArgumentRestController()
reqsubrest = RequiredArgumentRestController()
@expose()
def post_archive(self):
return 'got to post archive'
@expose()
def get_archive(self):
return 'got to get archive'
class BasicRestController(RestController):
@expose()
def get(self):
return "rest get"
@expose()
def post(self):
return "rest post"
@expose()
def put(self):
return "rest put"
@expose()
def delete(self):
return "rest delete"
@expose()
def new(self):
return "rest new"
@expose()
def edit(self, *args, **kw):
return "rest edit"
@expose()
def other(self):
return "rest other"
@expose()
def archive(self):
return 'got to archive'
class EmptyRestController(RestController):
pass
class SubController(TGController):
rest = BasicRestController()
@expose()
def sub_method(self, arg):
return 'sub %s'%arg
class BasicTGController(TGController):
sub = SubController()
custom_dispatch = CustomDispatchingSubController()
lookup = LookupController()
deprecated_lookup = LookupController()
lookup_dispatch = LookupAlwaysController()
rest = BasicRestController()
rest2 = ExtraRestController()
rest3 = VariableRestController()
empty = EmptyRestController()
@expose()
def index(self, **kwargs):
return 'hello world'
@expose()
def _default(self, *remainder):
return "Main default page called for url /%s" % [str(r) for r in remainder]
@expose()
def hello(self, name, silly=None):
return "Hello %s" % name
class BasicTGControllerNoDefault(TGController):
@expose()
def index(self, **kwargs):
return 'hello world'
class TestTGControllerRoot(TestWSGIController):
def __init__(self, *args, **kargs):
TestWSGIController.__init__(self, *args, **kargs)
self.app = make_app(BasicTGControllerNoDefault)
def test_root_default_dispatch(self):
self.app.get('/i/am/not/a/sub/controller', status=404)
class TestTGController(TestWSGIController):
def __init__(self, *args, **kargs):
TestWSGIController.__init__(self, *args, **kargs)
self.app = make_app(BasicTGController)
def test_lookup(self):
r = self.app.get('/lookup/eye')
msg = 'eye'
assert msg in r, r
def test_deprecated_lookup(self):
r = self.app.get('/deprecated_lookup/eye')
msg = 'eye'
assert msg in r, r
def test_lookup_with_dispatch(self):
r = self.app.get('/lookup_dispatch/eye')
msg = 'always'
assert msg in r, r
def test_root_method_dispatch(self):
resp = self.app.get('/hello/Bob')
assert "Hello Bob" in resp, resp
def test_root_index_dispatch(self):
resp = self.app.get('/')
assert "hello world" in resp, resp
def test_no_sub_index_dispatch(self):
resp = self.app.get('/sub/')
assert "['sub']" in resp, resp
def test_root_default_dispatch(self):
resp = self.app.get('/i/am/not/a/sub/controller')
assert "['i', 'am', 'not', 'a', 'sub', 'controller']" in resp, resp
def test_default_dispatch_not_found_in_sub_controller(self):
resp = self.app.get('/sub/no/default/found')
assert "['sub', 'no', 'default', 'found']" in resp, resp
def test_root_method_dispatch_with_trailing_slash(self):
resp = self.app.get('/hello/Bob/')
assert "Hello Bob" in resp, resp
def test_sub_method_dispatch(self):
resp = self.app.get('/sub/sub_method/army of darkness')
assert "sub army" in resp, resp
def test_custom_dispatch(self):
resp = self.app.get('/custom_dispatch/army of darkness')
assert "always" in resp, resp
class TestRestController(TestWSGIController):
def __init__(self, *args, **kargs):
TestWSGIController.__init__(self, *args, **kargs)
self.app = make_app(BasicTGController)
def test_post(self):
r = self.app.post('/rest/')
assert 'rest post' in r, r
def _test_non_resty(self):
r = self.app.post('/rest/non_resty_thing')
assert 'non_resty' in r, r
def test_custom_action_simple_get(self):
r = self.app.get('/rest/archive')
assert 'got to archive' in r, r
def test_custom_action_simple_post(self):
r = self.app.post('/rest/archive')
assert 'got to archive' in r, r
def test_custom_action_simple_post_args(self):
r = self.app.post('/rest?_method=archive')
assert 'got to archive' in r, r
def test_custom_action_get(self):
r = self.app.get('/rest2/archive')
assert 'got to get archive' in r, r
def test_custom_action_post(self):
r = self.app.post('/rest2?_method=archive')
assert 'got to post archive' in r, r
def test_get(self):
r = self.app.get('/rest/')
assert 'rest get' in r, r
def test_put(self):
r = self.app.put('/rest/')
assert 'rest put' in r, r
def test_put_post(self):
r = self.app.post('/rest?_method=PUT')
assert 'rest put' in r, r
def test_put_post_params(self):
r = self.app.post('/rest', params={'_method':'PUT'})
assert 'rest put' in r, r
def test_put_get(self):
self.app.get('/rest?_method=PUT', status=405)
def test_get_delete_bad(self):
self.app.get('/rest?_method=DELETE', status=405)
def test_delete(self):
r = self.app.delete('/rest/')
assert 'rest delete' in r, r
def test_post_delete(self):
r = self.app.post('/rest/', params={'_method':'DELETE'})
assert 'rest delete' in r, r
def test_get_all(self):
r = self.app.get('/rest2/')
assert 'rest get all' in r, r
def test_get_one(self):
r = self.app.get('/rest2/1')
assert 'rest get one 1' in r, r
def test_get_delete(self):
r = self.app.get('/rest2/1/delete')
assert 'rest get delete' in r, r
def test_post_delete_params(self):
r = self.app.post('/rest2/1', params={'_method':'DELETE'})
assert 'rest post delete' in r, r
def test_post_delete_var(self):
r = self.app.post('/rest3/a/b/c', params={'_method':'DELETE'})
assert 'rest post delete' in r, r
def test_get_delete_var(self):
r = self.app.get('/rest3/a/b/c/delete')
assert 'rest get delete' in r, r
def test_get_method(self):
r = self.app.get('/rest/other')
assert 'rest other' in r, r
@no_warn
def test_get_sub_controller(self):
r = self.app.get('/rest2/sub')
assert 'rest sub index' in r, r
@no_warn
def test_put_sub_controller(self):
r = self.app.put('/rest2/sub')
assert 'rest sub index' in r, r
def test_post_sub_controller(self):
r = self.app.post('/rest2/sub')
assert 'rest sub index' in r, r
def test_post_miss(self):
r = self.app.post('/rest2/something')
assert "/['rest2', 'something']" in r, r
def test_get_empty(self):
r = self.app.get('/empty/')
assert "/['empty']" in r, r
def test_post_empty(self):
r = self.app.post('/empty/')
assert "/['empty']" in r, r
def test_put_empty(self):
r = self.app.put('/empty/')
assert "/['empty']" in r, r
@no_warn
def test_delete_empty(self):
r = self.app.delete('/empty/')
assert "/['empty']" in r, r
def test_put_miss(self):
r = self.app.put('/rest/something')
assert "/['rest', 'something']" in r, r
def test_delete_miss(self):
r = self.app.delete('/rest/something')
assert "/['rest', 'something']" in r, r
def test_get_miss(self):
r = self.app.get('/rest2/something/else')
assert "/['rest2', 'something', 'else']" in r, r
def test_post_method(self):
r = self.app.post('/rest/other')
assert 'rest other' in r, r
def test_new_method(self):
r = self.app.post('/rest/new')
assert 'rest new' in r, r
def test_edit_method(self):
r = self.app.get('/rest/1/edit')
assert 'rest edit' in r, r
def test_delete_method(self):
self.app.delete('/rest/other', status=405)
def test_sub_with_rest_delete(self):
r = self.app.delete('/sub/rest/')
assert 'rest delete' in r, r
def test_put_method(self):
r = self.app.put('/rest/other')
assert 'rest other' in r, r
def test_sub_get_all_method(self):
r = self.app.get('/rest2/1/subrest')
assert 'subrest get all' in r, r
def test_var_sub_get_all_method(self):
r = self.app.get('/rest3/1/3/3/subrest')
assert 'subrest get all' in r, r
r = self.app.get('/rest3/1/3/subrest')
assert 'subrest get all' in r, r
r = self.app.get('/rest3/subrest')
assert 'subrest get all' in r, r
def test_var_sub_get_one_method(self):
r = self.app.get('/rest3/1/3/3/subrest/1')
assert 'subrest get one' in r, r
r = self.app.get('/rest3/1/3/subrest/1')
assert 'subrest get one' in r, r
r = self.app.get('/rest3/subrest/1')
assert 'subrest get one' in r, r
def test_var_sub_edit_method(self):
r = self.app.get('/rest3/1/3/3/subrest/1/edit')
assert 'subrest edit' in r, r
r = self.app.get('/rest3/1/3/subrest/1/edit')
assert 'subrest edit' in r, r
r = self.app.get('/rest3/subrest/1/edit')
assert 'subrest edit' in r, r
def test_var_sub_edit_var_method(self):
r = self.app.get('/rest3/1/3/3/vsubrest/1/edit')
assert 'subrest edit' in r, r
r = self.app.get('/rest3/1/3/vsubrest/1/a/edit')
assert 'subrest edit' in r, r
r = self.app.get('/rest3/vsubrest/edit')
assert 'subrest edit' in r, r
def test_var_sub_delete_method(self):
r = self.app.get('/rest3/1/3/3/subrest/1/delete')
assert 'subrest get delete' in r, r
r = self.app.get('/rest3/1/3/subrest/1/delete')
assert 'subrest get delete' in r, r
r = self.app.get('/rest3/subrest/1/delete')
assert 'subrest get delete' in r, r
def test_var_sub_new_method(self):
r = self.app.get('/rest3/1/3/3/subrest/new')
assert 'subrest new' in r, r
r = self.app.get('/rest3/1/3/subrest/new')
assert 'subrest new' in r, r
r = self.app.get('/rest3/subrest/new')
assert 'subrest new' in r, r
def test_var_sub_var_get_one_method(self):
r = self.app.get('/rest3/1/3/3/vsubrest/1')
assert 'subrest get one' in r, r
r = self.app.get('/rest3/1/3/vsubrest/1/a')
assert 'subrest get one' in r, r
r = self.app.get('/rest3/vsubrest/')
assert 'subrest get one' in r, r
def test_var_sub_var_put_method(self):
r = self.app.put('/rest3/1/3/3/vsubrest/1')
assert 'subrest put' in r, r
r = self.app.put('/rest3/1/3/vsubrest/1/asdf')
assert 'subrest put' in r, r
r = self.app.put('/rest3/vsubrest/')
assert 'subrest put' in r, r
def test_var_sub_post_method(self):
r = self.app.post('/rest3/1/3/3/subrest/')
assert 'subrest post' in r, r
r = self.app.post('/rest3/1/3/subrest/')
assert 'subrest post' in r, r
r = self.app.post('/rest3/subrest/')
assert 'subrest post' in r, r
def test_var_sub_post_delete_method(self):
r = self.app.delete('/rest3/1/3/3/subrest/1')
assert 'subrest post delete' in r, r
r = self.app.delete('/rest3/1/3/subrest/1')
assert 'subrest post delete' in r, r
def test_var_sub_put_method(self):
r = self.app.put('/rest3/1/3/3/subrest/1')
assert 'subrest put' in r, r
r = self.app.put('/rest3/1/3/subrest/1')
assert 'subrest put' in r, r
r = self.app.put('/rest3/subrest/1')
assert 'subrest put' in r, r
def test_var_sub_put_hack_method(self):
r = self.app.post('/rest3/1/3/3/subrest/1?_method=PUT')
assert 'subrest put' in r, r
r = self.app.post('/rest3/1/3/subrest/1?_method=put')
assert 'subrest put' in r, r
r = self.app.post('/rest3/subrest/1?_method=put')
assert 'subrest put' in r, r
def test_var_sub_var_delete_method(self):
r = self.app.delete('/rest3/1/3/3/vsubrest/1')
assert 'subrest post delete' in r, r
r = self.app.delete('/rest3/1/3/vsubrest/1')
assert 'subrest post delete' in r, r
r = self.app.delete('/rest3/vsubrest/')
assert 'subrest post delete' in r, r
def test_var_sub_delete_var_hack_method(self):
r = self.app.post('/rest3/1/3/3/vsubrest/1?_method=DELETE')
assert 'subrest post delete' in r, r
r = self.app.post('/rest3/1/3/vsubrest/1?_method=delete')
assert 'subrest post delete' in r, r
r = self.app.post('/rest3/vsubrest?_method=delete')
assert 'subrest post delete' in r, r
def test_var_sub_var_put_hack_method(self):
r = self.app.post('/rest3/1/3/3/vsubrest/1?_method=PUT')
assert 'subrest put' in r, r
r = self.app.post('/rest3/1/3/vsubrest/1/a?_method=put')
assert 'subrest put' in r, r
r = self.app.post('/rest3/vsubrest/?_method=put')
assert 'subrest put' in r, r
def test_var_sub_delete_hack_method(self):
r = self.app.post('/rest3/1/3/3/subrest/1?_method=DELETE')
assert 'subrest post delete' in r, r
r = self.app.post('/rest3/1/3/subrest/1?_method=delete')
assert 'subrest post delete' in r, r
r = self.app.post('/rest3/subrest/1?_method=delete')
assert 'subrest post delete' in r, r
def test_sub_new(self):
r = self.app.get('/rest2/1/subrest/new')
assert 'subrest new' in r, r
def test_sub_edit(self):
r = self.app.get('/rest2/1/subrest/1/edit')
assert 'subrest edit' in r, r
def test_sub_post(self):
r = self.app.post('/rest2/1/subrest/')
assert 'subrest post' in r, r
def test_sub_put(self):
r = self.app.put('/rest2/1/subrest/2')
assert 'subrest put' in r, r
def test_sub_post_opt(self):
r = self.app.post('/rest2/1/optsubrest/1')
assert 'subrest post' in r, r
def test_sub_put_opt(self):
r = self.app.put('/rest2/1/optsubrest/1')
assert 'subrest put' in r, r
def test_sub_put_opt_hack(self):
r = self.app.post('/rest2/1/optsubrest/1?_method=PUT')
assert 'subrest put' in r, r
def test_sub_delete_opt_hack(self):
r = self.app.post('/rest2/1/optsubrest/1?_method=DELETE')
assert 'subrest ' in r, r
def test_put_post_req(self):
r = self.app.post('/rest2/reqsubrest', params={'something':'required'})
assert 'subrest post' in r, r
def test_sub_put_req(self):
r = self.app.post('/rest2/reqsubrest', params={'_method':'PUT', 'something':'required'})
assert 'subrest put' in r, r
def test_sub_post_req_bad(self):
r = self.app.post('/rest2/reqsubrest',)
assert "['rest2', 'reqsubrest']" in r, r
def test_sub_delete_hack(self):
r = self.app.post('/rest2/1/subrest/2?_method=DELETE')
assert 'subrest post delete' in r, r
def test_sub_get_delete(self):
r = self.app.get('/rest2/1/subrest/2/delete')
assert 'subrest get delete' in r, r
def test_sub_post_delete(self):
r = self.app.delete('/rest2/1/subrest/2')
assert 'subrest post delete' in r, r
def test_sub_get_fxn(self):
r = self.app.get('/rest2/1/subrest/fxn')
assert 'subrest fxn' in r, r
def test_sub_post_fxn(self):
r = self.app.post('/rest2/1/subrest/fxn')
assert 'subrest fxn' in r, r
|
|
import urllib2
import ast
import psycopg2
import ConfigParser
import json
import threading
import sys
import re
import time
import datetime
import boto
import os
import boto.dynamodb2
from boto import kinesis
from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, AllIndex
from boto.dynamodb2.table import Table
from boto.dynamodb2.items import Item
from boto.dynamodb2.types import NUMBER
from boto.s3.connection import S3Connection
from boto.sts.credentials import Credentials, FederationToken, AssumedRole
from boto.sts.credentials import DecodeAuthorizationMessage
from boto.kinesis.exceptions import ProvisionedThroughputExceededException
directory = "/home/ec2-user/.aws"
if not os.path.exists(directory):
os.makedirs(directory)
#fetch config
config = ConfigParser.ConfigParser()
try:
config.read('/home/ec2-user/defaults.cfg')
# build vars out of config
dbUser = config.get('db','dbUser')
dbPass = config.get('db','dbPassword')
dbHost = config.get('db','dbHost')
dbName = config.get('db','dbName')
#dbPort = config.get('db01','port')
stream_name = config.get('kenisis','steamName')
tableName = config.get('dynamodb2','tableName')
IAMRole = config.get('iam','roleName')
except:
raise Exception('config not readable - see fetch config section')
respURL = "http://169.254.169.254/latest/meta-data/iam/security-credentials/%s" % (IAMRole)
instURL = "http://169.254.169.254/latest/dynamic/instance-identity/document"
#metavirtual answers to many questions
inst = urllib2.urlopen(instURL).read()
inst = json.loads(inst)
region = str(inst['region'])
availabilityZone = str(inst['availabilityZone'])
instanceId = str(inst['instanceId'])
# replaces sts to get keys for instance
resp=urllib2.urlopen(respURL).read()
resp=ast.literal_eval(resp)
id = str(resp['AccessKeyId'])
key = str(resp['SecretAccessKey'])
token = str(resp['Token'])
def makeauth():
# replaces sts to get keys for instance
resp=urllib2.urlopen(respURL).read()
resp=ast.literal_eval(resp)
id = str(resp['AccessKeyId'])
key = str(resp['SecretAccessKey'])
token = str(resp['Token'])
f = open('/home/ec2-user/.aws/credentials','w')
f.write('[default]\n')
f.write("aws_access_key_id = " + resp['AccessKeyId'])
f.write("\naws_secret_access_key = " + resp['SecretAccessKey'])
f.write("\naws_security_token = "+ resp['Token'])
f.write("\n\n")
f.write("[DynamoDB]\n")
f.write("region = " + region)
f.write("\n\n")
f.close()
#write out keys for kinesis and dynamodb2 - auth didnt I get this to work without this file?
f = open('/home/ec2-user/.boto','w')
f.write('[Credentials]\n')
f.write("aws_access_key_id = " + resp['AccessKeyId'])
f.write("\naws_secret_access_key = " + resp['SecretAccessKey'])
f.write("\naws_security_token = "+ resp['Token'])
f.write("\n\n")
f.write("[DynamoDB]\n")
f.write("region = " + region)
f.write("\n\n")
f.close()
# END SETUP
iter_type_at = 'AT_SEQUENCE_NUMBER'
iter_type_after = 'AFTER_SEQUENCE_NUMBER'
iter_type_trim = 'TRIM_HORIZON'
iter_type_latest = 'LATEST'
db_args = {"host" : dbHost , "database" : dbName , "user" : dbUser , "password" : dbPass , "port" : "5439" }
ddb_args = "region_name='%s',aws_access_key_id='%s',aws_secret_access_key='%s',aws_security_token='%s'" % (region,id,key,token)
kin_args = {"aws_access_key_id" : id ,"aws_secret_access_key" : key }
# is the dynamodb table there
try:
print "Tring dynamodb connetion "
ddb = boto.dynamodb.connect_to_region(ddb_args)
print "connected to dynamodb\n"
except:
print "dynamodb2 table is not ready\n"
# are the redshift hosts there
try:
print "Tring Redshift endpint at : " + dbHost
conn=psycopg2.connect(**db_args)
cur = conn.cursor()
print "Connected to redshift endpoint!\n"
except:
print "Cannot connect to the redshift database.\n"
# connect to kinesis
try:
print "Tring kinesis connection at: " + stream_name
k_conn = boto.kinesis.connect_to_region(region, **kin_args)
print "connected to kinesis\n"
except:
print "failed to connect to stream\n"
raise
#Existing redshift pointer in dynamodb?
tries = 0
result = []
try:
bookmark= Table(tableName)
db = bookmark.get_item(redShiftEndpoint=dbHost)
sequenceNumber = db['sequenceNumber']
shard_iterator_type=iter_type_after
print "found a starting point - continuing for host :" + db['redShiftEndpoint']
tries = 0
while tries < 10:
tries += 1
time.sleep(1)
try:
makeauth()
k_conn = boto.kinesis.connect_to_region(region)
response = k_conn.describe_stream(stream_name)
if response['StreamDescription']['StreamStatus'] == "ACTIVE":
break
except :
print "error while trying to describe kinesis stream " + stream_name
raise
else:
raise TimeoutError('Stream is still not active, aborting...')
shard_ids = []
stream_name = None
if response and 'StreamDescription' in response:
stream_name = response['StreamDescription']['StreamName']
for shard_id in response['StreamDescription']['Shards']:
shard_id = shard_id['ShardId']
shard_iterator = k_conn.get_shard_iterator(stream_name, shard_id, shard_iterator_type, sequenceNumber)
next_iterator = shard_iterator['ShardIterator']
shard_ids.append({'shard_id' : shard_id ,'shard_iterator' : shard_iterator['ShardIterator'] })
tries = 0
result = []
while tries < 100:
try:
response = k_conn.get_records(next_iterator, limit=1)
next_iterator = response['NextShardIterator']
bookmark= Table(tableName)
if len(response['Records'])> 0:
for res in response['Records']:
dbrecord = bookmark.get_item(redShiftEndpoint=dbHost)
print res['Data']
dbrecord['next_iterator'] = next_iterator
dbrecord['sequenceNumber'] = res['SequenceNumber']
dbrecord.partial_save()
try:
with psycopg2.connect(**db_args) as conn:
with conn.cursor() as curs:
curs.execute(res['Data'])
except:
pass
else :
print tries
except ProvisionedThroughputExceededException as ptee:
print (ptee.message)
time.sleep(5)
except:
print "No redshift tracks found - new redshift end point"
print "Fresh install - starting from the top"
try:
bookmark= Table(tableName)
dbrecord = Item(bookmark, data={"redShiftEndpoint" : dbHost , "next_iterator" : "0000", "sequenceNumber" : "0000"})
dbrecord.save()
except:
pass
shard_iterator_type=iter_type_trim
tries = 0
while tries < 10:
tries += 1
time.sleep(1)
try:
makeauth()
k_conn = boto.kinesis.connect_to_region(region)
response = k_conn.describe_stream(stream_name)
if response['StreamDescription']['StreamStatus'] == "ACTIVE":
break
except :
print "error while trying to describe kinesis stream " + stream_name
raise
else:
raise TimeoutError('Stream is still not active, aborting...')
shard_ids = []
stream_name = None
if response and 'StreamDescription' in response:
stream_name = response['StreamDescription']['StreamName']
for shard_id in response['StreamDescription']['Shards']:
shard_id = shard_id['ShardId']
shard_iterator = k_conn.get_shard_iterator(stream_name, shard_id, shard_iterator_type)
next_iterator = shard_iterator['ShardIterator']
shard_ids.append({'shard_id' : shard_id ,'shard_iterator' : shard_iterator['ShardIterator'] })
tries = 0
result = []
while tries < 100:
tries += 1
try:
response = k_conn.get_records(next_iterator, limit=1)
next_iterator = response['NextShardIterator']
bookmark= Table(tableName)
if len(response['Records'])> 0:
for res in response['Records']:
dbrecord = bookmark.get_item(redShiftEndpoint=dbHost)
dbrecord['next_iterator'] = next_iterator
print res['Data']
dbrecord['sequenceNumber'] = res['SequenceNumber']
dbrecord.partial_save()
try:
with psycopg2.connect(**db_args) as conn:
with conn.cursor() as curs:
curs.execute(res['Data'])
except:
pass
else :
print tries
except ProvisionedThroughputExceededException as ptee:
print (ptee.message)
time.sleep(5)
|
|
"""
sentry.conf.server
~~~~~~~~~~~~~~~~~~
These settings act as the default (base) settings for the Sentry-provided web-server
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.conf.global_settings import * # NOQA
import hashlib
import os
import os.path
import socket
import sys
import urlparse
DEBUG = False
TEMPLATE_DEBUG = True
ADMINS = ()
INTERNAL_IPS = ('127.0.0.1',)
MANAGERS = ADMINS
APPEND_SLASH = True
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.insert(0, os.path.normpath(os.path.join(PROJECT_ROOT, os.pardir)))
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sentry.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
if 'DATABASE_URL' in os.environ:
url = urlparse.urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'postgres':
DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
EMAIL_SUBJECT_PREFIX = '[Sentry] '
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = hashlib.md5(socket.gethostname() + ')*)&8a36)6%74e@-ne5(-!8a(vv#tkv)(eyg&@0=zd^pl!7=y@').hexdigest()
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'sentry.middleware.SentryMiddleware',
'sentry.middleware.SentrySocialAuthExceptionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'sentry.conf.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'social_auth.context_processors.social_auth_by_name_backends',
'social_auth.context_processors.social_auth_backends',
'social_auth.context_processors.social_auth_by_type_backends',
'social_auth.context_processors.social_auth_login_redirect'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crispy_forms',
'djcelery',
'gunicorn',
'kombu.transport.django',
'raven.contrib.django.raven_compat',
'sentry',
'sentry.plugins.sentry_interface_types',
'sentry.plugins.sentry_mail',
'sentry.plugins.sentry_urls',
'sentry.plugins.sentry_useragents',
'social_auth',
'south',
'static_compiler',
)
STATIC_ROOT = os.path.realpath(os.path.join(PROJECT_ROOT, 'static'))
STATIC_URL = '/_static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"static_compiler.finders.StaticCompilerFinder",
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
# Auth configuration
try:
from django.core.urlresolvers import reverse_lazy
except ImportError:
LOGIN_REDIRECT_URL = '/login-redirect/'
LOGIN_URL = '/login/'
else:
LOGIN_REDIRECT_URL = reverse_lazy('sentry-login-redirect')
LOGIN_URL = reverse_lazy('sentry-login')
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'social_auth.backends.facebook.FacebookBackend',
# TODO: migrate to GoogleOAuth2Backend
'social_auth.backends.google.GoogleBackend',
'social_auth.backends.contrib.github.GithubBackend',
'social_auth.backends.contrib.bitbucket.BitbucketBackend',
'social_auth.backends.contrib.trello.TrelloBackend',
'sentry.utils.auth.EmailAuthBackend',
)
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL = 'auth.User'
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
FACEBOOK_EXTENDED_PERMISSIONS = ['email']
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
BITBUCKET_CONSUMER_KEY = ''
BITBUCKET_CONSUMER_SECRET = ''
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.user.get_username',
'sentry.utils.social_auth.create_user_if_enabled',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details'
)
SOCIAL_AUTH_CREATE_USERS = True
# Auth engines and the settings required for them to be listed
AUTH_PROVIDERS = {
'twitter': ('TWITTER_CONSUMER_KEY', 'TWITTER_CONSUMER_SECRET'),
'facebook': ('FACEBOOK_APP_ID', 'FACEBOOK_API_SECRET'),
'github': ('GITHUB_APP_ID', 'GITHUB_API_SECRET'),
'google': ('GOOGLE_OAUTH2_CLIENT_ID', 'GOOGLE_OAUTH2_CLIENT_SECRET'),
'trello': ('TRELLO_API_KEY', 'TRELLO_API_SECRET'),
'bitbucket': ('BITBUCKET_CONSUMER_KEY', 'BITBUCKET_CONSUMER_SECRET'),
}
import random
SOCIAL_AUTH_DEFAULT_USERNAME = lambda: random.choice(['Darth Vader', 'Obi-Wan Kenobi', 'R2-D2', 'C-3PO', 'Yoda'])
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
# Queue configuration
from kombu import Queue
BROKER_URL = "django://"
CELERY_ALWAYS_EAGER = True
CELERY_IGNORE_RESULT = True
CELERY_SEND_EVENTS = False
CELERY_RESULT_BACKEND = None
CELERY_TASK_RESULT_EXPIRES = 1
CELERY_DISABLE_RATE_LIMITS = True
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
CELERY_QUEUES = (
Queue('default', routing_key='default'),
Queue('celery', routing_key='celery'),
Queue('alerts', routing_key='alerts'),
Queue('cleanup', routing_key='cleanup'),
Queue('sourcemaps', routing_key='sourcemaps'),
Queue('search', routing_key='search'),
Queue('counters', routing_key='counters'),
Queue('events', routing_key='events'),
Queue('triggers', routing_key='triggers'),
Queue('update', routing_key='update'),
)
# Disable South in tests as it is sending incorrect create signals
SOUTH_TESTS_MIGRATE = True
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler'
},
'sentry': {
'class': 'raven.contrib.django.handlers.SentryHandler',
}
},
'formatters': {
'client_info': {
'format': '%(name)s %(levelname)s %(project_slug)s/%(team_slug)s %(message)s'
}
},
'loggers': {
'()': {
'handlers': ['console', 'sentry'],
},
'root': {
'handlers': ['console', 'sentry'],
},
'sentry': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
'sentry.coreapi': {
'formatter': 'client_info',
},
'sentry.errors': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'django.request': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
}
}
NPM_ROOT = os.path.abspath(os.path.join(PROJECT_ROOT, os.pardir, os.pardir, 'node_modules'))
# We only define static bundles if NPM has been setup
if os.path.exists(NPM_ROOT):
STATIC_BUNDLES = {
"packages": {
"sentry/scripts/global.min.js": {
"src": [
"sentry/scripts/core.js",
"sentry/scripts/models.js",
"sentry/scripts/templates.js",
"sentry/scripts/utils.js",
"sentry/scripts/collections.js",
"sentry/scripts/charts.js",
"sentry/scripts/views.js",
"sentry/scripts/app.js",
],
},
"sentry/scripts/legacy.min.js": {
"src": [
"sentry/scripts/sentry.core.js",
"sentry/scripts/sentry.charts.js",
"sentry/scripts/sentry.stream.js",
],
},
"sentry/scripts/lib.min.js": {
"src": [
"sentry/scripts/lib/jquery.js",
"sentry/scripts/lib/jquery-migrate.js",
"sentry/scripts/lib/jquery.animate-colors.js",
"sentry/scripts/lib/jquery.clippy.min.js",
"sentry/scripts/lib/jquery.cookie.js",
"sentry/scripts/lib/jquery.flot.js",
"sentry/scripts/lib/jquery.flot.resize.js",
"sentry/scripts/lib/jquery.flot.stack.js",
"sentry/scripts/lib/jquery.flot.time.js",
"sentry/scripts/lib/jquery.flot.dashes.js",
"sentry/scripts/lib/simple-slider.js",
"sentry/scripts/lib/json2.js",
"sentry/scripts/lib/underscore.js",
"sentry/scripts/lib/backbone.js",
"sentry/scripts/lib/select2/select2.js",
],
},
"sentry/scripts/bootstrap.min.js": {
"src": [
"sentry/bootstrap/js/bootstrap-transition.js",
"sentry/bootstrap/js/bootstrap-alert.js",
"sentry/bootstrap/js/bootstrap-button.js",
"sentry/bootstrap/js/bootstrap-carousel.js",
"sentry/bootstrap/js/bootstrap-collapse.js",
"sentry/bootstrap/js/bootstrap-dropdown.js",
"sentry/bootstrap/js/bootstrap-modal.js",
"sentry/bootstrap/js/bootstrap-tooltip.js",
"sentry/bootstrap/js/bootstrap-popover.js",
"sentry/bootstrap/js/bootstrap-scrollspy.js",
"sentry/bootstrap/js/bootstrap-tab.js",
"sentry/bootstrap/js/bootstrap-typeahead.js",
"sentry/bootstrap/js/bootstrap-affix.js",
"sentry/scripts/lib/bootstrap-datepicker.js"
],
},
"sentry/styles/global.min.css": {
"src": {
"sentry/less/sentry.less": "sentry/styles/sentry.css",
},
},
"sentry/styles/wall.min.css": {
"src": {
"sentry/less/wall.less": "sentry/styles/wall.css",
},
},
},
"postcompilers": {
"*.js": ["node_modules/uglify-js/bin/uglifyjs {input} --source-map-root={relroot}/ --source-map-url={name}.map{ext} --source-map={relpath}/{name}.map{ext} -o {output}"],
},
"preprocessors": {
"*.less": ["node_modules/less/bin/lessc {input} {output}"],
},
}
# Sentry and Raven configuration
SENTRY_PUBLIC = False
SENTRY_PROJECT = 1
SENTRY_CACHE_BACKEND = 'default'
SENTRY_FILTERS = (
'sentry.filters.StatusFilter',
)
SENTRY_KEY = None
# Absolute URL to the sentry root directory. Should not include a trailing slash.
SENTRY_URL_PREFIX = ''
# Allow access to Sentry without authentication.
SENTRY_PUBLIC = False
# Login url (defaults to LOGIN_URL)
SENTRY_LOGIN_URL = None
# Default project ID (for internal errors)
SENTRY_PROJECT = 1
# Only store a portion of all messages per unique group.
SENTRY_SAMPLE_DATA = True
# The following values control the sampling rates
SENTRY_SAMPLE_RATES = (
(50, 1),
(1000, 2),
(10000, 10),
(100000, 50),
(1000000, 300),
(10000000, 2000),
)
SENTRY_MAX_SAMPLE_RATE = 10000
SENTRY_SAMPLE_TIMES = (
(3600, 1),
(360, 10),
(60, 60),
)
SENTRY_MAX_SAMPLE_TIME = 10000
# Web Service
SENTRY_WEB_HOST = 'localhost'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
'workers': 3,
'limit_request_line': 0, # required for raven-js
}
# UDP Service
SENTRY_UDP_HOST = 'localhost'
SENTRY_UDP_PORT = 9001
# Queue (Kombu)
SENTRY_QUEUE = {
'transport': 'kombu.transport.django.Transport',
}
SENTRY_ALLOWED_INTERFACES = set([
'sentry.interfaces.Exception',
'sentry.interfaces.Message',
'sentry.interfaces.Stacktrace',
'sentry.interfaces.Template',
'sentry.interfaces.Query',
'sentry.interfaces.Http',
'sentry.interfaces.User',
])
# Should users without 'sentry.add_project' permissions be allowed
# to create new projects
SENTRY_ALLOW_PROJECT_CREATION = False
# Should users without 'sentry.add_team' permissions be allowed
# to create new projects
SENTRY_ALLOW_TEAM_CREATION = False
# Should users without superuser permissions be allowed to
# make projects public
SENTRY_ALLOW_PUBLIC_PROJECTS = True
# Should users be allowed to register an account? If this is disabled
# accounts can only be created when someone is invited or added
# manually.
SENTRY_ALLOW_REGISTRATION = True
# Instructs Sentry to utilize it's internal search indexer on all incoming
# events..
SENTRY_USE_SEARCH = True
# Enable trend results. These can be expensive and are calculated in real-time.
# When disabled they will be replaced w/ a default priority sort.
SENTRY_USE_TRENDING = True
# Default to not sending the Access-Control-Allow-Origin header on api/store
SENTRY_ALLOW_ORIGIN = None
# Enable scraping of javascript context for source code
SENTRY_SCRAPE_JAVASCRIPT_CONTEXT = True
# Redis connection information (see Nydus documentation)
SENTRY_REDIS_OPTIONS = {}
# Buffer backend to use
SENTRY_BUFFER = 'sentry.buffer.Buffer'
SENTRY_BUFFER_OPTIONS = {}
SENTRY_QUOTAS = 'sentry.quotas.Quota'
SENTRY_QUOTA_OPTIONS = {}
# The default value for project-level quotas
SENTRY_DEFAULT_MAX_EVENTS_PER_MINUTE = '90%'
# The maximum number of events per minute the system should accept.
SENTRY_SYSTEM_MAX_EVENTS_PER_MINUTE = 0
SENTRY_RAVEN_JS_URL = 'd3nslu0hdya83q.cloudfront.net/dist/1.0/raven.min.js'
# URI Prefixes for generating DSN URLs
# (Defaults to URL_PREFIX by default)
SENTRY_ENDPOINT = None
SENTRY_PUBLIC_ENDPOINT = None
# Early draft features. Not slated or public release yet.
SENTRY_ENABLE_EXPLORE_CODE = False
SENTRY_ENABLE_EXPLORE_USERS = True
# Configure celery
import djcelery
djcelery.setup_loader()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Args:
arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the output
batch.
contiguous: Boolean flag indicating whether the indices are contiguous.
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tensor_util.is_tf_type(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [array_ops.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
if converted_to_list:
slices = slices[0]
return slices
def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes,
check_all_flat=False):
"""Adds 1.0 as sample weights for the outputs for which there is no weight.
Args:
outputs: List of model outputs.
sample_weights: List of sample weight inputs.
sample_weight_modes: List of sample weight modes or None.
check_all_flat: Ensure that inputs are not nested structures. This is not
a free check, so we may not want to run it eagerly every iteration.
Returns:
Tuple of sample weights, one sample weight for every output, and booleans
describing the raw sample weights.
"""
any_sample_weight = sample_weights is not None and any(
w is not None for w in sample_weights)
partial_sample_weight = any_sample_weight and any(
w is None for w in sample_weights)
if not any_sample_weight:
return None, any_sample_weight, partial_sample_weight
if not partial_sample_weight:
return sample_weights, any_sample_weight, partial_sample_weight
if check_all_flat:
nest.assert_same_structure(
list_to_tuple(sample_weights),
list_to_tuple(nest.flatten(sample_weights)))
nest.assert_same_structure(
list_to_tuple(outputs),
list_to_tuple(nest.flatten(outputs)))
if sample_weight_modes is not None:
nest.assert_same_structure(
sample_weight_modes, nest.flatten(sample_weight_modes))
new_sample_weights = []
for i, sw in enumerate(sample_weights):
if sw is None:
as_numpy = isinstance(outputs[i], np.ndarray)
output = outputs[i]
output_shape = output.shape if as_numpy else array_ops.shape(output)
is_temporal = (
sample_weight_modes is not None and
sample_weight_modes[i] == 'temporal')
sw_shape = (output_shape[0],
output_shape[1]) if is_temporal else (output_shape[0],)
new_sample_weights.append(
np.ones(sw_shape) if as_numpy else array_ops.ones(sw_shape))
else:
new_sample_weights.append(sw)
return (list_to_tuple(new_sample_weights),
any_sample_weight, partial_sample_weight)
class RespectCompiledTrainableState(object):
"""Set and restore trainable state if it has changed since compile.
The keras API guarantees that the value of each Layer's `trainable` property
at `Model.compile` time will be used when training that model. In order to
respect this requirement, it may be necessary to set the trainable value of
layers to their compile time values before beginning a training endpoint and
restore the values before returing from said endpoint. This scope checks if
any layer's trainable state has changed since Model compile, and performs this
set and un-set bookkeeping.
However, the trainable state of a layer changes quite infrequently, if ever,
for many kinds of workflows. Moreover, updating every layer in a model is an
expensive operation. As a result, we will only explicitly set and unset the
trainable state of a model if a trainable value has changed since compile.
"""
def __init__(self, model):
self._model = model
self._current_trainable_state = None
self._compiled_trainable_state = None
self._should_set_trainable = False
def __enter__(self):
self._current_trainable_state = self._model._get_trainable_state() # pylint: disable=protected-access
self._compiled_trainable_state = self._model._compiled_trainable_state # pylint: disable=protected-access
# Check to see if any layer's trainable state has changed since `compile`.
for layer, trainable in self._compiled_trainable_state.items():
if (layer in self._current_trainable_state and
trainable != self._current_trainable_state[layer]):
self._should_set_trainable = True
break
# If so, restore the model to its compiled state.
if self._should_set_trainable:
self._model._set_trainable_state(self._compiled_trainable_state) # pylint: disable=protected-access
def __exit__(self, type_arg, value_arg, traceback_arg):
# If we set the values to their compiled state in __enter__, we need to
# restore the original values before leaving the scope.
if self._should_set_trainable:
self._model._set_trainable_state(self._current_trainable_state) # pylint: disable=protected-access
return False # False values do not suppress exceptions
# Allow use of methods not exposed to the user.
# pylint: disable=protected-access
def get_input_shape_and_dtype(layer):
"""Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
"""
def _is_graph_model(layer):
return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or
layer.__class__.__name__ == 'Sequential')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
# Subclassed Models may not have been built so can't be checked.
while _is_graph_model(layer):
if not layer.layers:
raise ValueError('An empty Model cannot be used as a Layer.')
layer = layer.layers[0]
if getattr(layer, '_batch_input_shape', None):
return layer._batch_input_shape, layer.dtype
return None, None
# pylint: enable=protected-access
def get_static_batch_size(layer):
"""Gets the static batch size of a Layer.
Args:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
"""
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tensor_shape.Dimension(batch_input_shape[0]).value
return None
def list_to_tuple(maybe_list):
"""Datasets will stack the list of tensor, so switch them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
|
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Functions used to load commonly available datasets.
"""
import cPickle
import gzip
import logging
import numpy as np
import os
import sys
import tarfile
import urllib2
logger = logging.getLogger(__name__)
def _valid_path_append(path, *args):
"""
Helper to validate passed path directory and append any subsequent
filename arguments.
Arguments:
path (str): Initial filesystem path. Should expand to a valid
directory.
*args (list, optional): Any filename or path suffices to append to path
for returning.
Returns:
(list, str): path prepended list of files from args, or path alone if
no args specified.
Raises:
ValueError: if path is not a valid directory on this filesystem.
"""
full_path = os.path.expanduser(path)
res = []
if not os.path.exists(full_path):
os.makedirs(full_path)
if not os.path.isdir(full_path):
raise ValueError("path: {0} is not a valid directory".format(path))
for suffix_path in args:
res.append(os.path.join(full_path, suffix_path))
if len(res) == 0:
return path
elif len(res) == 1:
return res[0]
else:
return res
def fetch_dataset(url, sourcefile, destfile, totalsz):
"""
Download the file specified by the given URL.
Args:
url (str): Base URL of the file to be downloaded.
sourcefile (str): Name of the source file.
destfile (str): Path to the destination.
totalsz (int): Size of the file to be downloaded.
"""
cloudfile = urllib2.urlopen(os.path.join(url, sourcefile))
print("Downloading file: {}".format(destfile))
blockchar = u'\u2588' # character to display in progress bar
with open(destfile, 'wb') as f:
data_read = 0
chunksz = 1024**2
while 1:
data = cloudfile.read(chunksz)
if not data:
break
data_read = min(totalsz, data_read + chunksz)
progress_string = u'Download Progress |{:<50}| '.format(
blockchar * int(float(data_read) / totalsz * 50))
sys.stdout.write('\r')
sys.stdout.write(progress_string.encode('utf-8'))
sys.stdout.flush()
f.write(data)
print("Download Complete")
def load_mnist(path=".", normalize=True):
"""
Fetch the MNIST dataset and load it into memory.
Args:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
normalize (bool, optional): whether to scale values between 0 and 1.
Defaults to True.
Returns:
tuple: Both training and test sets are returned.
"""
mnist = dataset_meta['mnist']
filepath = _valid_path_append(path, mnist['file'])
if not os.path.exists(filepath):
fetch_dataset(mnist['url'], mnist['file'], filepath, mnist['size'])
with gzip.open(filepath, 'rb') as mnist:
(X_train, y_train), (X_test, y_test) = cPickle.load(mnist)
X_train = X_train.reshape(-1, 784)
X_test = X_test.reshape(-1, 784)
if normalize:
X_train = X_train / 255.
X_test = X_test / 255.
return (X_train, y_train), (X_test, y_test), 10
def _compute_zca_transform(imgs, filter_bias=0.1):
"""
Compute the zca whitening transform matrix
"""
logger.info("Computing ZCA transform matrix")
meanX = np.mean(imgs, 0)
covX = np.cov(imgs.T)
D, E = np.linalg.eigh(covX)
assert not np.isnan(D).any()
assert not np.isnan(E).any()
assert D.min() > 0
D = D ** -.5
W = np.dot(E, np.dot(np.diag(D), E.T))
return meanX, W
def zca_whiten(train, test, cache=None):
"""
Use train set statistics to apply the ZCA whitening transform to
both train and test sets.
"""
if cache and os.path.isfile(cache):
with open(cache, 'rb') as f:
(meanX, W) = cPickle.load(f)
else:
meanX, W = _compute_zca_transform(train)
if cache:
logger.info("Caching ZCA transform matrix")
with open(cache, 'wb') as f:
cPickle.dump((meanX, W), f)
logger.info("Applying ZCA whitening transform")
train_w = np.dot(train - meanX, W)
test_w = np.dot(test - meanX, W)
return train_w, test_w
def global_contrast_normalize(X, scale=1., min_divisor=1e-8):
"""
Subtract mean and normalize by vector norm
"""
X = X - X.mean(axis=1)[:, np.newaxis]
normalizers = np.sqrt((X ** 2).sum(axis=1)) / scale
normalizers[normalizers < min_divisor] = 1.
X /= normalizers[:, np.newaxis]
return X
def load_cifar10(path=".", normalize=True, contrast_normalize=False, whiten=False):
"""
Fetch the CIFAR-10 dataset and load it into memory.
Args:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
normalize (bool, optional): Whether to scale values between 0 and 1.
Defaults to True.
Returns:
tuple: Both training and test sets are returned.
"""
cifar = dataset_meta['cifar-10']
workdir, filepath = _valid_path_append(path, '', cifar['file'])
batchdir = os.path.join(workdir, 'cifar-10-batches-py')
if not os.path.exists(os.path.join(batchdir, 'data_batch_1')):
if not os.path.exists(filepath):
fetch_dataset(cifar['url'], cifar['file'], filepath, cifar['size'])
with tarfile.open(filepath, 'r:gz') as f:
f.extractall(workdir)
train_batches = [os.path.join(batchdir, 'data_batch_' + str(i)) for i in range(1, 6)]
Xlist, ylist = [], []
for batch in train_batches:
with open(batch, 'rb') as f:
d = cPickle.load(f)
Xlist.append(d['data'])
ylist.append(d['labels'])
X_train = np.vstack(Xlist)
y_train = np.vstack(ylist)
with open(os.path.join(batchdir, 'test_batch'), 'rb') as f:
d = cPickle.load(f)
X_test, y_test = d['data'], d['labels']
y_train = y_train.reshape(-1, 1)
y_test = np.array(y_test).reshape(-1, 1)
if contrast_normalize:
norm_scale = 55.0 # Goodfellow
X_train = global_contrast_normalize(X_train, scale=norm_scale)
X_test = global_contrast_normalize(X_test, scale=norm_scale)
if normalize:
X_train = X_train / 255.
X_test = X_test / 255.
if whiten:
zca_cache = os.path.join(workdir, 'cifar-10-zca-cache.pkl')
X_train, X_test = zca_whiten(X_train, X_test, cache=zca_cache)
return (X_train, y_train), (X_test, y_test), 10
def load_babi(path=".", task='qa1_single-supporting-fact', subset='en'):
"""
Fetch the Facebook bAbI dataset and load it to memory.
Args:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
task (str): bAbI task to load
Returns:
tuple: training and test files are returned
"""
babi = dataset_meta['babi']
workdir, filepath = _valid_path_append(path, '', babi['file'])
if not os.path.exists(filepath):
fetch_dataset(babi['url'], babi['file'], filepath, babi['size'])
babi_dir_name = babi['file'].split('.')[0]
task = babi_dir_name + '/' + subset + '/' + task + '_{}.txt'
train_file = os.path.join(workdir, task.format('train'))
test_file = os.path.join(workdir, task.format('test'))
if os.path.exists(train_file) is False or os.path.exists(test_file):
with tarfile.open(filepath, 'r:gz') as f:
f.extractall(workdir)
return train_file, test_file
def load_text(dataset, path="."):
"""
Fetch the specified dataset.
Args:
dataset (str): A key that may be used to retrieve metadata associated
with the dataset.
path (str, optional): Working directory in which to cache loaded data.
Defaults to current dir if not specified.
Returns:
str: Path to the downloaded dataset.
"""
text_meta = dataset_meta[dataset]
workdir, filepath = _valid_path_append(path, '', text_meta['file'])
if not os.path.exists(filepath):
fetch_dataset(text_meta['url'], text_meta['file'], filepath,
text_meta['size'])
if '.zip' in filepath:
import zipfile
zip_ref = zipfile.ZipFile(filepath)
zip_ref.extractall(workdir)
zip_ref.close()
filepath = filepath.split('.zip')[0]
return filepath
def load_ptb_train(path):
return load_text('ptb-train', path)
def load_ptb_valid(path):
return load_text('ptb-valid', path)
def load_ptb_test(path):
return load_text('ptb-test', path)
def load_hutter_prize(path):
return load_text('hutter-prize', path)
def load_shakespeare(path):
return load_text('shakespeare', path)
def load_flickr8k(path):
return load_text('flickr8k', path)
def load_flickr30k(path):
return load_text('flickr30k', path)
def load_coco(path):
return load_text('coco', path)
def load_i1kmeta(path):
return load_text('i1kmeta', path)
def load_imdb(path):
return load_text('imdb', path)
dataset_meta = {
'mnist': {
'size': 15296311,
'file': 'mnist.pkl.gz',
'url': 'https://s3.amazonaws.com/img-datasets',
'func': load_mnist
},
'cifar-10': {
'size': 170498071,
'file': 'cifar-10-python.tar.gz',
'url': 'http://www.cs.toronto.edu/~kriz',
'func': load_cifar10
},
'babi': {
'size': 11745123,
'file': 'tasks_1-20_v1-2.tar.gz',
'url': 'http://www.thespermwhale.com/jaseweston/babi',
'func': load_babi
},
'ptb-train': {
'size': 5101618,
'file': 'ptb.train.txt',
'url': 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data',
'func': load_ptb_train
},
'ptb-valid': {
'size': 399782,
'file': 'ptb.valid.txt',
'url': 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data',
'func': load_ptb_valid
},
'ptb-test': {
'size': 449945,
'file': 'ptb.test.txt',
'url': 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data',
'func': load_ptb_test
},
'hutter-prize': {
'size': 35012219,
'file': 'enwik8.zip',
'url': 'http://mattmahoney.net/dc',
'func': load_hutter_prize
},
'shakespeare': {
'size': 4573338,
'file': 'shakespeare_input.txt',
'url': 'http://cs.stanford.edu/people/karpathy/char-rnn',
'func': load_shakespeare
},
'flickr8k': {
'size': 49165563,
'file': 'flickr8k.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/image-caption',
'func': load_flickr8k
},
'flickr30k': {
'size': 195267563,
'file': 'flickr30k.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/image-caption',
'func': load_flickr30k
},
'coco': {
'size': 738051031,
'file': 'coco.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/image-caption',
'func': load_coco
},
'i1kmeta': {
'size': 758648,
'file': 'neon_ILSVRC2012_devmeta.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/imagenet',
'func': load_i1kmeta
},
'imdb': {
'size': 33213513,
'file': 'imdb.pkl',
'url': ' https://s3.amazonaws.com/text-datasets',
'func': load_imdb,
}
}
def load_dataset(name, path=".", **kwargs):
"""
Fetch the specified dataset.
Args:
name (str): A key that may be used to retrieve the function that
can be used to load the dataset.
path (str, optional): Local cache directory to load the dataset into.
Defaults to current working directory.
Returns:
tuple: Both training and test sets are returned. The return value
also contains the number of classes in the dataset.
"""
if name in dataset_meta:
if 'func' not in dataset_meta[name]:
raise ValueError('function not specified for loading %s' % name)
func = dataset_meta[name]['func']
else:
try:
dataset_module = __import__(name)
except ImportError:
raise ValueError('dataset handler not found: %s' % name)
func = dataset_module.load_data
return func(path, **kwargs)
|
|
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
docker_base_image=None,
extra_docker_args=None):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
if docker_base_image is not None:
docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
if extra_docker_args is not None:
docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
else:
environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_version):
self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', platform, arch, py_version]
self.py_version = py_version
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux_extra':
# Raspberry Pi build
environ['PYTHON'] = '/usr/local/bin/python{}'.format(
self.py_version)
environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
# https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
# A QEMU bug causes submodule update to hang, so we copy directly
environ['RELATIVE_COPY_PATH'] = '.'
extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60 * 5,
docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
extra_docker_args=extra_args)
elif self.platform == 'linux':
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
# Platform autodetection for the manylinux1 image breaks so we set the
# defines ourselves.
# TODO(atash) get better platform-detection support in core so we don't
# need to do this manually...
environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_python_manylinux_%s' %
self.arch,
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60,
docker_base_image='quay.io/pypa/manylinux1_i686'
if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
elif self.platform == 'windows':
if 'Python27' in self.py_version or 'Python34' in self.py_version:
environ['EXT_COMPILER'] = 'mingw32'
else:
environ['EXT_COMPILER'] = 'msvc'
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = ''.join(
random.choice(string.ascii_uppercase) for _ in range(10))
return create_jobspec(
self.name, [
'tools\\run_tests\\artifacts\\build_artifact_python.bat',
self.py_version, '32' if self.arch == 'x86' else '64'
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
environ['SKIP_PIP_INSTALL'] = 'TRUE'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
# Ruby build uses docker internally and docker cannot be nested.
# We are using a custom workspace instead.
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
use_workspace=True,
timeout_seconds=45 * 60)
class CSharpExtArtifact:
"""Builds C# native extension library"""
def __init__(self, platform, arch):
self.name = 'csharp_ext_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'csharp', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'windows':
cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
return create_jobspec(
self.name, [
'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
cmake_arch_option
],
use_workspace=True)
else:
environ = {
'CONFIG': 'opt',
'EMBED_OPENSSL': 'true',
'EMBED_ZLIB': 'true',
'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
'LDFLAGS': ''
}
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
'tools/run_tests/artifacts/build_artifact_csharp.sh',
environ=environ)
else:
archflag = _ARCH_FLAG_MAP[self.arch]
environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
environ['LDFLAGS'] += ' %s' % archflag
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp.sh'],
environ=environ,
use_workspace=True)
def __str__(self):
return self.name
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch):
self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'php', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(
self.arch),
'tools/run_tests/artifacts/build_artifact_php.sh')
else:
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_php.sh'],
use_workspace=True)
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch):
self.name = 'protoc_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'protoc', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform != 'windows':
cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
if self.platform != 'macos':
ldflags += ' -static-libgcc -static-libstdc++ -s'
environ = {
'CONFIG': 'opt',
'CXXFLAGS': cxxflags,
'LDFLAGS': ldflags,
'PROTOBUF_LDFLAGS_EXTRA': ldflags
}
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_protoc',
'tools/run_tests/artifacts/build_artifact_protoc.sh',
environ=environ)
else:
environ[
'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_protoc.sh'],
environ=environ,
timeout_seconds=60 * 60,
use_workspace=True)
else:
generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
return create_jobspec(
self.name,
['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
environ={'generator': generator},
use_workspace=True)
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return ([
Cls(platform, arch)
for Cls in (CSharpExtArtifact, ProtocArtifact)
for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
] + [
PythonArtifact('linux', 'x86', 'cp27-cp27m'),
PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
PythonArtifact('linux', 'x86', 'cp34-cp34m'),
PythonArtifact('linux', 'x86', 'cp35-cp35m'),
PythonArtifact('linux', 'x86', 'cp36-cp36m'),
PythonArtifact('linux_extra', 'armv7', '2.7'),
PythonArtifact('linux_extra', 'armv7', '3.4'),
PythonArtifact('linux_extra', 'armv7', '3.5'),
PythonArtifact('linux_extra', 'armv7', '3.6'),
PythonArtifact('linux_extra', 'armv6', '2.7'),
PythonArtifact('linux_extra', 'armv6', '3.4'),
PythonArtifact('linux_extra', 'armv6', '3.5'),
PythonArtifact('linux_extra', 'armv6', '3.6'),
PythonArtifact('linux', 'x64', 'cp27-cp27m'),
PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
PythonArtifact('linux', 'x64', 'cp34-cp34m'),
PythonArtifact('linux', 'x64', 'cp35-cp35m'),
PythonArtifact('linux', 'x64', 'cp36-cp36m'),
PythonArtifact('macos', 'x64', 'python2.7'),
PythonArtifact('macos', 'x64', 'python3.4'),
PythonArtifact('macos', 'x64', 'python3.5'),
PythonArtifact('macos', 'x64', 'python3.6'),
PythonArtifact('windows', 'x86', 'Python27_32bits'),
PythonArtifact('windows', 'x86', 'Python34_32bits'),
PythonArtifact('windows', 'x86', 'Python35_32bits'),
PythonArtifact('windows', 'x86', 'Python36_32bits'),
PythonArtifact('windows', 'x64', 'Python27'),
PythonArtifact('windows', 'x64', 'Python34'),
PythonArtifact('windows', 'x64', 'Python35'),
PythonArtifact('windows', 'x64', 'Python36'),
RubyArtifact('linux', 'x64'),
RubyArtifact('macos', 'x64'),
PHPArtifact('linux', 'x64'),
PHPArtifact('macos', 'x64')
])
|
|
from abc import ABCMeta, abstractmethod
from pywps._compat import text_type, StringIO
import tempfile, os
from pywps.inout.literaltypes import LITERAL_DATA_TYPES
from pywps import OWS, OGCUNIT, NAMESPACES
from pywps.validator.mode import MODE
from pywps.validator.base import emptyvalidator
from pywps.validator import get_validator
from pywps.validator.literalvalidator import validate_anyvalue,\
validate_allowed_values
from pywps.exceptions import InvalidParameterValue
import base64
class SOURCE_TYPE:
MEMORY = 0
FILE = 1
STREAM = 2
DATA = 3
class DataTypeAbstract(object):
"""LiteralObject data_type abstract class
"""
__metaclass__ = ABCMeta
@abstractmethod
def convert(self, value):
return value
class IOHandler(object):
"""Basic IO class. Provides functions, to accept input data in file,
memory object and stream object and give them out in all three types
>>> # setting up
>>> import os
>>> from io import RawIOBase
>>> from io import FileIO
>>> import types
>>>
>>> ioh_file = IOHandler(workdir=tmp)
>>> assert isinstance(ioh_file, IOHandler)
>>>
>>> # Create test file input
>>> fileobj = open(os.path.join(tmp, 'myfile.txt'), 'w')
>>> fileobj.write('ASDF ASFADSF ASF ASF ASDF ASFASF')
>>> fileobj.close()
>>>
>>> # testing file object on input
>>> ioh_file.file = fileobj.name
>>> assert ioh_file.source_type == SOURCE_TYPE.FILE
>>> file = ioh_file.file
>>> stream = ioh_file.stream
>>>
>>> assert file == fileobj.name
>>> assert isinstance(stream, RawIOBase)
>>> # skipped assert isinstance(ioh_file.memory_object, POSH)
>>>
>>> # testing stream object on input
>>> ioh_stream = IOHandler(workdir=tmp)
>>> assert ioh_stream.workdir == tmp
>>> ioh_stream.stream = FileIO(fileobj.name,'r')
>>> assert ioh_stream.source_type == SOURCE_TYPE.STREAM
>>> file = ioh_stream.file
>>> stream = ioh_stream.stream
>>>
>>> assert open(file).read() == ioh_file.stream.read()
>>> assert isinstance(stream, RawIOBase)
>>> # skipped assert isinstance(ioh_stream.memory_object, POSH)
>>>
>>> # testing in memory object object on input
>>> # skipped ioh_mo = IOHandler(workdir=tmp)
>>> # skipped ioh_mo.memory_object = POSH
>>> # skipped assert ioh_mo.source_type == SOURCE_TYPE.MEMORY
>>> # skipped file = ioh_mo.file
>>> # skipped stream = ioh_mo.stream
>>> # skipped posh = ioh_mo.memory_object
>>> #
>>> # skipped assert open(file).read() == ioh_file.stream.read()
>>> # skipped assert isinstance(ioh_mo.stream, RawIOBase)
>>> # skipped assert isinstance(ioh_mo.memory_object, POSH)
"""
def __init__(self, workdir=None, mode=MODE.NONE):
self.source_type = None
self.source = None
self._tempfile = None
self._validated = False
self.workdir = workdir
self.valid_mode = mode
def _check_valid(self):
"""Validate this input usig given validator
"""
if not self._validated:
validate = self.validator
_valid = validate(self, self.valid_mode)
if not _valid:
raise InvalidParameterValue('Input data not valid using '
'mode %s' % (self.valid_mode))
else:
self._validated = True
def set_file(self, filename):
"""Set source as file name"""
self.source_type = SOURCE_TYPE.FILE
self.source = os.path.abspath(filename)
self._check_valid()
def set_workdir(self, workdirpath):
"""Set working temporary directory for files to be stored in"""
if workdirpath is not None and not os.path.exists(workdirpath):
os.makedirs(workdirpath)
self._workdir = workdirpath
def set_memory_object(self, memory_object):
"""Set source as in memory object"""
self.source_type = SOURCE_TYPE.MEMORY
self._check_valid()
def set_stream(self, stream):
"""Set source as stream object"""
self.source_type = SOURCE_TYPE.STREAM
self.source = stream
self._check_valid()
def set_data(self, data):
"""Set source as simple datatype e.g. string, number"""
self.source_type = SOURCE_TYPE.DATA
self.source = data
self._check_valid()
def set_base64(self, data):
"""Set data encoded in base64"""
self.data = base64.b64decode(data)
self._check_valid()
def get_file(self):
"""Get source as file name"""
if self.source_type == SOURCE_TYPE.FILE:
return self.source
elif self.source_type == SOURCE_TYPE.STREAM or\
self.source_type == SOURCE_TYPE.DATA:
if self._tempfile:
return self._tempfile
else:
(opening, stream_file_name) = tempfile.mkstemp(dir=self.workdir)
stream_file = open(stream_file_name, 'w')
if self.source_type == SOURCE_TYPE.STREAM:
stream_file.write(self.source.read())
else:
stream_file.write(self.source)
stream_file.close()
self._tempfile = str(stream_file_name)
return self._tempfile
def get_workdir(self):
"""Return working directory name
"""
return self._workdir
def get_memory_object(self):
"""Get source as memory object"""
raise Exception("setmemory_object not implemented, Soeren promissed to implement at WPS Workshop on 23rd of January 2014")
def get_stream(self):
"""Get source as stream object"""
if self.source_type == SOURCE_TYPE.FILE:
from io import FileIO
return FileIO(self.source, mode='r', closefd=True)
elif self.source_type == SOURCE_TYPE.STREAM:
return self.source
elif self.source_type == SOURCE_TYPE.DATA:
return StringIO(text_type(self.source))
def get_data(self):
"""Get source as simple data object"""
if self.source_type == SOURCE_TYPE.FILE:
file_handler = open(self.source, mode='r')
content = file_handler.read()
file_handler.close()
return content
elif self.source_type == SOURCE_TYPE.STREAM:
return self.source.read()
elif self.source_type == SOURCE_TYPE.DATA:
return self.source
@property
def validator(self):
"""Return the function suitable for validation
This method should be overridden by class children
:return: validating function
"""
return emptyvalidator
def get_base64(self):
return base64.b64encode(self.data)
# Properties
file = property(fget=get_file, fset=set_file)
memory_object = property(fget=get_memory_object, fset=set_memory_object)
stream = property(fget=get_stream, fset=set_stream)
data = property(fget=get_data, fset=set_data)
base64 = property(fget=get_base64, fset=set_base64)
workdir = property(fget=get_workdir, fset=set_workdir)
class SimpleHandler(IOHandler):
"""Data handler for Literal In- and Outputs
>>> class Int_type(object):
... @staticmethod
... def convert(value): return int(value)
>>>
>>> class MyValidator(object):
... @staticmethod
... def validate(inpt): return 0 < inpt.data < 3
>>>
>>> inpt = SimpleHandler(data_type = Int_type)
>>> inpt.validator = MyValidator
>>>
>>> inpt.data = 1
>>> inpt.validator.validate(inpt)
True
>>> inpt.data = 5
>>> inpt.validator.validate(inpt)
False
"""
def __init__(self, workdir=None, data_type=None, mode=MODE.NONE):
IOHandler.__init__(self, workdir=workdir, mode=mode)
self.data_type = data_type
def get_data(self):
return IOHandler.get_data(self)
def set_data(self, data):
"""Set data value. input data are converted into target format
"""
if self.data_type:
# TODO: check datatypeabstract class somethings missing here
# check if it is a valid data_type
if self.data_type.lower() in LITERAL_DATA_TYPES:
if self.data_type.lower() == 'string':
data = text_type(data)
elif self.data_type.lower() == 'integer':
data = int(data)
elif self.data_type.lower() == 'float':
data = float(data)
elif self.data_type.lower() == 'boolean':
if data.lower() == 'true':
data = True
else:
data = False
#data = self.data_type.convert(data)
_valid = self.validator(self, self.valid_mode)
if not _valid:
raise InvalidParameterValue('Input data not valid using '
'mode %s' % (self.valid_mode))
IOHandler.set_data(self, data)
data = property(fget=get_data, fset=set_data)
class BasicIO:
"""Basic Input or Ouput class
"""
def __init__(self, identifier, title=None, abstract=None):
self.identifier = identifier
self.title = title
self.abstract = abstract
class BasicLiteral:
"""Basic literal input/output class
"""
def __init__(self, data_type=None, uoms=None):
if not data_type:
data_type = LITERAL_DATA_TYPES[2]
assert data_type in LITERAL_DATA_TYPES
self.data_type = data_type
# list of uoms
self.uoms = []
# current uom
self._uom = None
# add all uoms (upcasting to UOM)
if uoms is not None:
for uom in uoms:
if not isinstance(uom, UOM):
uom = UOM(uom)
self.uoms.append(uom)
if self.uoms:
# default/current uom
self.uom = self.uoms[0]
@property
def uom(self):
return self._uom
@uom.setter
def uom(self, uom):
self._uom = uom
class BasicComplex(object):
"""Basic complex input/output class
"""
def __init__(self, data_format=None, supported_formats=None):
self._data_format = None
self._supported_formats = None
if supported_formats:
self.supported_formats = supported_formats
if self.supported_formats:
# not an empty list, set the default/current format to the first
self.data_format = supported_formats[0]
def get_format(self, mime_type):
"""
:param mime_type: given mimetype
:return: Format
"""
for frmt in self.supported_formats:
if frmt.mime_type == mime_type:
return frmt
else:
return None
@property
def validator(self):
"""Return the proper validator for given data_format
"""
return self.data_format.validate
@property
def supported_formats(self):
return self._supported_formats
@supported_formats.setter
def supported_formats(self, supported_formats):
"""Setter of supported formats
"""
def set_validator(supported_format):
if not supported_format.validate or \
supported_format.validate == emptyvalidator:
supported_format.validate =\
get_validator(supported_format.mime_type)
return supported_format
self._supported_formats = list(map(set_validator, supported_formats))
@property
def data_format(self):
return self._data_format
@data_format.setter
def data_format(self, data_format):
"""self data_format setter
"""
if self._is_supported(data_format):
self._data_format = data_format
if not data_format.validate or\
data_format.validate == emptyvalidator:
data_format.validate = get_validator(data_format.mime_type)
else:
raise InvalidParameterValue("Requested format "
"%s, %s, %s not supported" %\
(data_format.mime_type,
data_format.encoding,
data_format.schema),
'mimeType')
def _is_supported(self, data_format):
if self.supported_formats:
for frmt in self.supported_formats:
if frmt.same_as(data_format):
return True
return False
class BasicBoundingBox(object):
"""Basic BoundingBox input/output class
"""
def __init__(self, crss=None, dimensions=2):
self.crss = crss or ['epsg:4326']
self.crs = self.crss[0]
self.dimensions = dimensions
self.ll = []
self.ur = []
class LiteralInput(BasicIO, BasicLiteral, SimpleHandler):
"""LiteralInput input abstract class
"""
def __init__(self, identifier, title=None, abstract=None,
data_type=None, workdir=None, allowed_values=None, uoms=None,
mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract)
BasicLiteral.__init__(self, data_type, uoms)
SimpleHandler.__init__(self, workdir, data_type, mode=mode)
self.allowed_values = allowed_values
self.any_value = self.allowed_values is None
@property
def validator(self):
"""Get validator for any value as well as allowed_values
"""
if self.any_value:
return validate_anyvalue
else:
return validate_allowed_values
class LiteralOutput(BasicIO, BasicLiteral, SimpleHandler):
"""Basic LiteralOutput class
"""
def __init__(self, identifier, title=None, abstract=None,
data_type=None, workdir=None, uoms=None, validate=None,
mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract)
BasicLiteral.__init__(self, data_type, uoms)
SimpleHandler.__init__(self, workdir=None, data_type=data_type,
mode=mode)
self._storage = None
@property
def storage(self):
return self._storage
@storage.setter
def storage(self, storage):
self._storage = storage
@property
def validator(self):
"""Get validator for any value as well as allowed_values
"""
return validate_anyvalue
class BBoxInput(BasicIO, BasicBoundingBox, IOHandler):
"""Basic Bounding box input abstract class
"""
def __init__(self, identifier, title=None, abstract=None, crss=None,
dimensions=None, workdir=None, mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract)
BasicBoundingBox.__init__(self, crss, dimensions)
IOHandler.__init__(self, workdir=None, mode=mode)
class BBoxOutput(BasicIO, BasicBoundingBox, SimpleHandler):
"""Basic BoundingBox output class
"""
def __init__(self, identifier, title=None, abstract=None, crss=None,
dimensions=None, workdir=None, mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract)
BasicBoundingBox.__init__(self, crss, dimensions)
SimpleHandler.__init__(self, workdir=None, mode=mode)
self._storage = None
@property
def storage(self):
return self._storage
@storage.setter
def storage(self, storage):
self._storage = storage
class ComplexInput(BasicIO, BasicComplex, IOHandler):
"""Complex input abstract class
>>> ci = ComplexInput()
>>> ci.validator = 1
>>> ci.validator
1
"""
def __init__(self, identifier, title=None, abstract=None,
workdir=None, data_format=None, supported_formats=None,
mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract)
IOHandler.__init__(self, workdir=workdir, mode=mode)
BasicComplex.__init__(self, data_format, supported_formats)
class ComplexOutput(BasicIO, BasicComplex, IOHandler):
"""Complex output abstract class
>>> # temporary configuration
>>> import ConfigParser
>>> from pywps.storage import *
>>> config = ConfigParser.RawConfigParser()
>>> config.add_section('FileStorage')
>>> config.set('FileStorage', 'target', './')
>>> config.add_section('server')
>>> config.set('server', 'outputurl', 'http://foo/bar/filestorage')
>>>
>>> # create temporary file
>>> tiff_file = open('file.tiff', 'w')
>>> tiff_file.write("AA")
>>> tiff_file.close()
>>>
>>> co = ComplexOutput()
>>> co.set_file('file.tiff')
>>> fs = FileStorage(config)
>>> co.storage = fs
>>>
>>> url = co.get_url() # get url, data are stored
>>>
>>> co.get_stream().read() # get data - nothing is stored
'AA'
"""
def __init__(self, identifier, title=None, abstract=None,
workdir=None, data_format=None, supported_formats=None,
mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract)
IOHandler.__init__(self, workdir=workdir, mode=mode)
BasicComplex.__init__(self, data_format, supported_formats)
self._storage = None
@property
def storage(self):
return self._storage
@storage.setter
def storage(self, storage):
self._storage = storage
def get_url(self):
"""Return URL pointing to data
"""
(outtype, storage, url) = self.storage.store(self)
return url
class UOM(object):
"""
:param uom: unit of measure
"""
def __init__(self, uom=''):
self.uom = uom
def describe_xml(self):
elem = OWS.UOM(
self.uom
)
elem.attrib['{%s}reference' % NAMESPACES['ows']] = OGCUNIT[self.uom]
return elem
def execute_attribute(self):
return OGCUNIT[self.uom]
if __name__ == "__main__":
import doctest
import os
from pywps.wpsserver import temp_dir
with temp_dir() as tmp:
os.chdir(tmp)
doctest.testmod()
|
|
""" Test module for the command-line interface of SLAM.
It imports the differents functions used by the CLI and call them directly with
various arguments simulating a call from a shell.
Note: This module decrease the logging level to minimize the message flow to
the console during the test. It also replaces stdout by a StringIO which enable
the capture of stdout in a string and allows us to compare it to a reference
output.
"""
import os
import sys
import argparse
import StringIO
import logging
from nose.tools import assert_raises
from slam.models import Pool, Host, Address, Property
from slam import generator
import slam_cli
def setup():
logging.basicConfig(level=logging.CRITICAL)
def test_parse_args():
ap = slam_cli.init_argparser()
ns = slam_cli.parse_args(ap, "-a list".split())
assert ns.action == "list"
assert not ns.random
ns = slam_cli.parse_args(ap, "-a create -pn foo -r -H slamserver".split())
assert ns.random
assert_raises(SystemExit, slam_cli.parse_args, ap, "".split())
assert_raises(SystemExit, slam_cli.parse_args, ap,
"--action list -a delete".split())
assert_raises(SystemExit, slam_cli.parse_args, ap, "-a wrong".split())
assert_raises(SystemExit, slam_cli.parse_args, ap,
"-a list -a create".split())
assert_raises(SystemExit, slam_cli.parse_args, ap, "--po poolname".split())
assert_raises(SystemExit, slam_cli.parse_args, ap,
"-pn poolname -pn bar".split())
def test_list():
saved_out = sys.stdout
ap = slam_cli.init_argparser()
args = slam_cli.parse_args(ap, "-a list -pn inexistant".split())
assert_raises(SystemExit, slam_cli.list_, args)
args = slam_cli.parse_args(ap, "-a list -H inexistant".split())
assert_raises(SystemExit, slam_cli.list_, args)
args = slam_cli.parse_args(ap, "-a list -A inexistant".split())
assert_raises(SystemExit, slam_cli.list_, args)
args = slam_cli.parse_args(ap,
"-a create -pn test1 -p 192.168.0.0/16".split())
slam_cli.create(args)
args = slam_cli.parse_args(ap, ("-a create -H host1 -pn test1 -m mac-1 "
+ "--alias alias1 --alias alias2 --serial srlnm "
+ "--inventory invnum --nodns").split())
slam_cli.create(args)
args = slam_cli.parse_args(ap,
"-a modify -A 192.168.0.0 --comment comment".split())
slam_cli.modify(args)
args = slam_cli.parse_args(ap, "-a list".split())
slam_cli.list_(args)
sys.stdout = StringIO.StringIO()
args = slam_cli.parse_args(ap, "-a list -pn test1".split())
slam_cli.list_(args)
assert(sys.stdout.getvalue() ==
'Pool: test1 (range: 192.168.0.0/16), 1/65536 (0%)\n' +
'192.168.0.0\t\thost1\n')
sys.stdout = StringIO.StringIO()
args = slam_cli.parse_args(ap, "-a list -H host1".split())
slam_cli.list_(args)
assert(sys.stdout.getvalue() == 'Host host1, mac: mac-1'
+ '\nSerial number: srlnm\nInventory number: invnum\n'
+ 'Alias: alias1, alias2\nNODNS\nAddress 192.168.0.0 (pool: test1)\n')
sys.stdout = StringIO.StringIO()
args = slam_cli.parse_args(ap, "-a list -A 192.168.0.0".split())
slam_cli.list_(args)
assert(sys.stdout.getvalue() ==
'Address: 192.168.0.0\n'
+ '\tPool: test1 (range: 192.168.0.0/16)\n'
+ '\tHost: host1\n'
+ '\tComment:\ncomment\n')
args = slam_cli.parse_args(ap, "-a setprop -pn test1 building=200".split())
slam_cli.set_(args)
args = slam_cli.parse_args(ap, "-a setprop -H host1 building=333".split())
slam_cli.set_(args)
sys.stdout = StringIO.StringIO()
args = slam_cli.parse_args(ap, "-a list building".split())
slam_cli.list_(args)
args = slam_cli.parse_args(ap, "-a list building=333".split())
slam_cli.list_(args)
args = slam_cli.parse_args(ap, "-a list building=200".split())
slam_cli.list_(args)
assert(sys.stdout.getvalue() ==
"Pool: test1 (range: 192.168.0.0/16)\n"
+ "Host: host1\n"
+ "Host: host1\n"
+ "Pool: test1 (range: 192.168.0.0/16)\n")
args = slam_cli.parse_args(ap, "-a create -g quatgen -o - quattor".split())
slam_cli.create(args)
arglist = "-a list -g".split()
arglist.append("")
args = slam_cli.parse_args(ap, arglist)
sys.stdout = StringIO.StringIO()
slam_cli.list_(args)
assert(sys.stdout.getvalue() ==
"quatgen (quatt), output file: \"-\"\n")
sys.stdout = saved_out
def test_list_generator():
saved_out = sys.stdout
ap = slam_cli.init_argparser()
generator.Config.objects.all().delete()
args = slam_cli.parse_args(ap, "-a create -g bindgen bind -o -".split())
slam_cli.create(args)
args = slam_cli.parse_args(ap,
("-a create -g quatgen quattor -o /tmp/out --header hdr --footer ftr "
+ "--checkfile checkf1 --checkfile checkf2").split())
slam_cli.create(args)
sys.stdout = StringIO.StringIO()
args = slam_cli.parse_args(ap, "-a list -g quatgen".split())
slam_cli.list_(args)
assert(sys.stdout.getvalue() == "quatt generator: quatgen\n"
+ ' output file: "/tmp/out"\n'
+ ' header file: "hdr"\n'
+ ' footer file: "ftr"\n'
+ ' check files: "checkf1, checkf2"\n')
sys.stdout = StringIO.StringIO()
arglist = "-a list -g".split()
arglist.append("")
args = slam_cli.parse_args(ap, arglist)
slam_cli.list_(args)
assert(sys.stdout.getvalue() == "bindgen (bind), output file: \"-\"\n"
+ "quatgen (quatt), output file: \"/tmp/out\"\n")
sys.stdout = saved_out
def test_create():
ap = slam_cli.init_argparser()
args = slam_cli.parse_args(ap,
"-a create -pn inexistant -H hostrandom".split())
assert_raises(SystemExit, slam_cli.create, args)
args = slam_cli.parse_args(ap, "-a create -pn testfail -p 192.168".split())
assert_raises(SystemExit, slam_cli.create, args)
args = slam_cli.parse_args(ap, "-a create -H newhost".split())
slam_cli.create(args)
assert Host.objects.filter(name="newhost").count() == 1
args = slam_cli.parse_args(ap, "-a create -pn test2 -p 10.0.0.0/8".split())
slam_cli.create(args)
assert Pool.objects.filter(name="test2").count() == 1
args = slam_cli.parse_args(ap, "-a create -H host2 -pn test2".split())
slam_cli.create(args)
assert Address.objects.filter(host__name="host2").count() == 1
args = slam_cli.parse_args(ap,
"-a get -H host2 -pn test2 -A 10.50.50.50".split())
slam_cli.get(args)
assert Address.objects.filter(host__name="host2").count() == 2
args = slam_cli.parse_args(ap, "-a get -H host2 -pn test2".split())
slam_cli.get(args)
assert Address.objects.filter(host__name="host2").count() == 3
assert Address.objects.filter(addr="10.100.10.100").count() == 0
args = slam_cli.parse_args(ap,
"-a create -pn test2 -A 10.100.10.100".split())
slam_cli.create(args)
assert Address.objects.filter(addr="10.100.10.100").count() == 1
args = slam_cli.parse_args(ap,
"-a create -pn testcat -p 4.5.0.0/16 -c server".split())
slam_cli.create(args)
assert Pool.objects.filter(category="server").count() == 1
args = slam_cli.parse_args(ap, "-a create -H hostcat -c server".split())
slam_cli.create(args)
assert Address.objects.get(host__name="hostcat").pool.category == "server"
args = slam_cli.parse_args(ap, "-a get -pn inexistant".split())
assert_raises(SystemExit, slam_cli.create, args)
args = slam_cli.parse_args(ap, "-a get".split())
assert_raises(SystemExit, slam_cli.create, args)
def test_delete():
ap = slam_cli.init_argparser()
args = slam_cli.parse_args(ap, "-a delete -pn inexistant".split())
assert_raises(SystemExit, slam_cli.delete, args)
return
args = slam_cli.parse_args(ap,
"-a create -pn test3 -p 172.16.0.0/12".split())
slam_cli.create(args)
assert Pool.objects.filter(name="test3").count() == 1
args = slam_cli.parse_args(ap, "-a delete -pn test3".split())
slam_cli.delete(args)
assert Pool.objects.filter(name="test3").count() == 0
args = slam_cli.parse_args(ap,
"-a create -pn test3 -p 172.16.0.0/12".split())
slam_cli.create(args)
args = slam_cli.parse_args(ap, "-a create -pn test3 -A 172.16.0.3".split())
slam_cli.create(args)
assert Address.objects.filter(addr="172.16.0.3").count() == 1
args = slam_cli.parse_args(ap, "-a delete -pn test3 -A 172.16.0.3".split())
slam_cli.delete(args)
assert Address.objects.filter(addr="172.16.0.3").count() == 0
args = slam_cli.parse_args(ap, "-a create -pn test3 -H host3-1".split())
slam_cli.create(args)
args = slam_cli.parse_args(ap, "-a get -pn test3 -H host3-1".split())
slam_cli.create(args)
assert Address.objects.filter(pool__name="test3").count() == 2
args = slam_cli.parse_args(ap, "-a delete -pn test3 -H host3-1".split())
slam_cli.delete(args)
assert Address.objects.filter(pool__name="test3").count() == 0
def test_generate():
saved_out = sys.stdout
ap = slam_cli.init_argparser()
args = slam_cli.parse_args(ap, "-a createconf -pn inexistant".split())
assert_raises(SystemExit, slam_cli.generate, args)
args = slam_cli.parse_args(ap, "-a create -pn test4 -p 1.2.3.0/24".split())
slam_cli.create(args)
args = slam_cli.parse_args(ap,
"-a create -pn test4 -H host4-1 -H host4-2".split())
slam_cli.create(args)
sys.stdout = StringIO.StringIO()
args = slam_cli.parse_args(ap, "-a createconf -pn test4 bind -o -".split())
slam_cli.generate(args)
saved_out.write("@@@@" + sys.stdout.getvalue() + "@@@")
assert(sys.stdout.getvalue() ==
"\n; This section will be automatically generated by SLAM any manual "
+ "change will\n; be overwritten on the next generation of this "
+ "file.\n; Pool test4 (range: 1.2.3.0/24)\n"
+ "host4-1\t1D\tIN\tA\t1.2.3.0\n"
+ "host4-2\t1D\tIN\tA\t1.2.3.1\n"
+ "; END of section automatically generated by SLAM\n")
args = slam_cli.parse_args(ap, "-a createconf bind -o /tmp".split())
assert_raises(SystemExit, slam_cli.generate, args)
sys.stdout = saved_out
def test_modify():
ap = slam_cli.init_argparser()
args = slam_cli.parse_args(ap,
"-a create -pn modifytest1 -p fe80::/64".split())
slam_cli.create(args)
assert(Pool.objects.filter(name="modifytest1").count() == 1
and Pool.objects.filter(name="modifytest2").count() == 0)
args = slam_cli.parse_args(ap,
"-a modify -pn modifytest1 modifytest2".split())
slam_cli.modify(args)
assert(Pool.objects.filter(name="modifytest1").count() == 0
and Pool.objects.filter(name="modifytest2").count() == 1)
args = slam_cli.parse_args(ap,
"-a create -pn modifytest2 -H modifyhost1".split())
slam_cli.create(args)
assert(Host.objects.filter(name="modifyhost1").count() == 1
and Host.objects.filter(name="modifyhost2").count() == 0)
args = slam_cli.parse_args(ap,
"-a modify -H modifyhost1 modifyhost2".split())
slam_cli.modify(args)
assert(Host.objects.filter(name="modifyhost1").count() == 0
and Host.objects.filter(name="modifyhost2").count() == 1)
args = slam_cli.parse_args(ap, "-a modify -H modifyhost2".split())
assert_raises(SystemExit, slam_cli.modify, args)
args = slam_cli.parse_args(ap, "-a modify -pn inexistant".split())
assert_raises(SystemExit, slam_cli.modify, args)
def test_property():
ap = slam_cli.init_argparser()
args = slam_cli.parse_args(ap,
"-a create -pn prop-pool -p 10.250.0.0/16".split())
slam_cli.create(args)
args = slam_cli.parse_args(ap,
"-a setprop -pn prop-pool prop1=val1".split())
slam_cli.set_(args)
prop = Property.objects.get(pool__name="prop-pool", name="prop1")
assert prop.value == "val1"
args = slam_cli.parse_args(ap,
"-a setprop -pn prop-pool prop1=val2".split())
slam_cli.set_(args)
prop = Property.objects.get(pool__name="prop-pool", name="prop1")
assert str(prop) == "prop1: val2"
args = slam_cli.parse_args(ap,
"-a create -pn prop-pool -H hostprop".split())
slam_cli.create(args)
args = slam_cli.parse_args(ap, "-a setprop -H hostprop prop3=val3".split())
slam_cli.set_(args)
prop = Property.objects.get(host__name="hostprop", name="prop3")
assert prop.value == "val3"
args = slam_cli.parse_args(ap, "-a setprop -H hostprop prop3=val4".split())
slam_cli.set_(args)
prop = Property.objects.get(host__name="hostprop", name="prop3")
assert prop.value == "val4"
assert Property.objects.filter(host__name="hostprop").count() == 1
args = slam_cli.parse_args(ap, "-a rmprop -H hostprop prop3".split())
slam_cli.set_(args)
assert Property.objects.filter(host__name="hostprop").count() == 0
assert Property.objects.filter(pool__name="prop-pool").count() == 1
args = slam_cli.parse_args(ap, "-a rmprop -pn prop-pool prop1".split())
slam_cli.set_(args)
assert Property.objects.filter(pool__name="prop-pool").count() == 0
args = slam_cli.parse_args(ap, "-a rmprop foo".split())
assert_raises(SystemExit, slam_cli.set_, args)
args = slam_cli.parse_args(ap, "-a rmprop -pn inexistant foo".split())
assert_raises(SystemExit, slam_cli.set_, args)
args = slam_cli.parse_args(ap, "-a rmprop -H inexistant foo".split())
assert_raises(SystemExit, slam_cli.set_, args)
args = slam_cli.parse_args(ap, "-a setprop -H whatever foo".split())
assert_raises(SystemExit, slam_cli.set_, args)
args = slam_cli.parse_args(ap, "-a setprop -pn inexistant foo=bar".split())
assert_raises(SystemExit, slam_cli.set_, args)
|
|
# # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from ctypes import *
from itertools import count, takewhile
from pkg_resources import resource_filename
from shutil import copyfileobj
import tempfile
import pytest
from six.moves import map
from six import text_type
from libtcd.compat import bytes_
from libtcd.util import remove_if_exists
TEST_TCD = resource_filename('libtcd.tests', 'test.tcd')
@pytest.fixture
def test_tcdfile(request):
from libtcd._libtcd import open_tide_db, close_tide_db, ENCODING
from libtcd.compat import bytes_
# Copy original so make sure it doesn't get mutated
tmpfp = tempfile.NamedTemporaryFile()
with open(TEST_TCD, "rb") as infp:
copyfileobj(infp, tmpfp)
tmpfp.flush()
def fin():
close_tide_db()
tmpfp.close()
request.addfinalizer(fin)
open_tide_db(bytes_(tmpfp.name, ENCODING))
return tmpfp.name
@pytest.fixture
def empty_tcdfile(request):
from libtcd._libtcd import (
create_tide_db, close_tide_db, ENCODING,
c_char_p, c_float32, c_float64, POINTER)
from libtcd.compat import bytes_
filename = tempfile.NamedTemporaryFile(delete=False).name
def fin():
close_tide_db()
remove_if_exists(filename)
request.addfinalizer(fin)
contituents = (c_char_p * 0)()
speeds = (c_float64 * 0)()
equilibriums = epochs = (POINTER(c_float32) * 0)()
create_tide_db(bytes_(filename, ENCODING), 0, contituents, speeds,
1970, 0, equilibriums, epochs)
return filename
@pytest.fixture(params=['test_tcdfile', 'empty_tcdfile'])
def any_tcdfile(request):
fixture = request.param
return request.getfuncargvalue(fixture)
def test_get_tide_db_header(test_tcdfile):
from libtcd._libtcd import get_tide_db_header
header = get_tide_db_header()
assert b'v2.2' in header.version
assert header.major_rev == 2
assert header.minor_rev == 2
assert header.number_of_records == 2
assert header.start_year == 1970
assert header.number_of_years == 68
@pytest.mark.parametrize("method,string0,contains", [
('get_level_units', b'Unknown', b'knots^2'),
('get_dir_units', b'Unknown', b'degrees'),
('get_restriction', b'Public Domain', b'DoD/DoD Contractors Only'),
('get_country', b'Unknown', b'United States'),
('get_legalese', b'NULL', None),
('get_datum', b'Unknown', b'Mean Lower Low Water'),
('get_tzfile', b'Unknown', b':America/Los_Angeles'),
])
def test_get_string(any_tcdfile, method, string0, contains):
from libtcd import _libtcd
getter = getattr(_libtcd, method)
assert getter(0) == string0
assert getter(-1) == b'Unknown'
if contains is not None:
strings = takewhile(lambda s: s != b'Unknown', map(getter, count(1)))
assert contains in strings
else:
assert getter(1) == b'Unknown'
@pytest.mark.parametrize("method,str,expected", [
('find_level_units', b'knots^2', 4),
('find_dir_units', b'degrees', 2),
('find_restriction', b'Non-commercial use only', 2),
('find_country', b'United States', 224),
('find_legalese', b'NULL', 0),
('find_datum', b'Mean Lower Low Water', 3),
('find_tzfile', b':America/Los_Angeles', 115),
])
def test_find_string(test_tcdfile, method, str, expected):
from libtcd import _libtcd
finder = getattr(_libtcd, method)
assert finder(str) == expected
assert finder(b'does not exist') == -1
@pytest.mark.parametrize(
"table", ['restriction', 'country', 'legalese', 'datum', 'tzfile'])
def test_add_string(any_tcdfile, table):
from libtcd import _libtcd
get = getattr(_libtcd, 'get_%s' % table)
add = getattr(_libtcd, 'add_%s' % table)
s = b'some string'
i = add(s)
assert i > 0
assert get(i) == s
j = add(s)
assert j != i
assert get(j) == s
@pytest.mark.parametrize(
"table", ['restriction', 'country', 'legalese', 'datum', 'tzfile'])
def test_find_or_add_string(any_tcdfile, table):
from libtcd import _libtcd
get = getattr(_libtcd, 'get_%s' % table)
find = getattr(_libtcd, 'find_%s' % table)
find_or_add = getattr(_libtcd, 'find_or_add_%s' % table)
s = b'does not exist'
assert find(s) == -1
i = find_or_add(s)
assert i > 0
assert get(i) == s
assert find(s) == i
assert find_or_add(s) == i
def test_level_units(any_tcdfile):
from libtcd._libtcd import get_level_units, get_tide_db_header
header = get_tide_db_header()
level_units = map(get_level_units, range(header.level_unit_types))
assert list(level_units) == [
b'Unknown', b'feet', b'meters', b'knots', b'knots^2']
def test_dir_units(any_tcdfile):
from libtcd._libtcd import get_dir_units, get_tide_db_header
header = get_tide_db_header()
dir_units = map(get_dir_units, range(header.dir_unit_types))
assert list(dir_units) == [b'Unknown', b'degrees true', b'degrees']
def test_get_partial_tide_record(test_tcdfile):
from libtcd._libtcd import get_partial_tide_record
header = get_partial_tide_record(0)
assert header.name.startswith(b'Seattle,')
assert get_partial_tide_record(42) is None
def test_get_next_partial_tide_record(test_tcdfile):
from libtcd._libtcd import (
get_partial_tide_record,
get_next_partial_tide_record,
)
headers = [get_partial_tide_record(0)]
next_header = get_next_partial_tide_record()
while next_header is not None:
headers.append(next_header)
next_header = get_next_partial_tide_record()
assert len(headers) == 2
def test_open_tide_db_failure(tmpdir):
from libtcd import _libtcd
missing_tcd = text_type(tmpdir.join('missing.tcd'))
bmissing_tcd = bytes_(missing_tcd, _libtcd.ENCODING)
with pytest.raises(_libtcd.Error) as excinfo:
_libtcd.open_tide_db(bmissing_tcd)
assert 'open_tide_db failed' in excinfo.exconly()
def test_create_tide_db_failure(tmpdir):
from libtcd import _libtcd
test_tcd = text_type(tmpdir.join('missing', 'test.tcd'))
btest_tcd = bytes_(test_tcd, _libtcd.ENCODING)
constituents = c_uint32(0)
constituent = pointer(c_char_p(b"Foo1"))
speed = (_libtcd.c_float64 * 1)(1.234)
start_year = c_int32(1970)
num_years = c_uint32(1)
x = (_libtcd.c_float32 * 1)(1.0)
equilibrium = (POINTER(_libtcd.c_float32) * 1)(x)
node_factor = (POINTER(_libtcd.c_float32) * 1)(x)
with pytest.raises(_libtcd.Error) as excinfo:
_libtcd.create_tide_db(btest_tcd,
constituents, constituent, speed,
start_year, num_years, equilibrium, node_factor)
assert 'create_tide_db failed' in excinfo.exconly()
def test_add_tide_record_failure(empty_tcdfile):
from libtcd import _libtcd
header = _libtcd.get_tide_db_header()
_libtcd.close_tide_db()
rec = _libtcd.TIDE_RECORD()
with pytest.raises(_libtcd.Error) as excinfo:
_libtcd.add_tide_record(pointer(rec), header)
assert 'add_tide_record failed' in excinfo.exconly()
def test_update_tide_record_failure(empty_tcdfile):
from libtcd import _libtcd
header = _libtcd.get_tide_db_header()
rec = _libtcd.TIDE_RECORD()
with pytest.raises(_libtcd.Error) as excinfo:
_libtcd.update_tide_record(c_int32(1), pointer(rec), header)
assert 'update_tide_record failed' in excinfo.exconly()
def test_delete_tide_record_failure(empty_tcdfile):
from libtcd import _libtcd
header = _libtcd.get_tide_db_header()
with pytest.raises(_libtcd.Error) as excinfo:
_libtcd.delete_tide_record(c_int32(0), header)
assert 'delete_tide_record failed' in excinfo.exconly()
|
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" Diagram module
Provides:
o Diagram - Container for information concerning the tracks to be
drawn in a diagram, and the interface for defining the
diagram (possibly split these functions in later version?)
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects - namely SeqRecord ojbects containing SeqFeature objects.
"""
#------------------------------------------------------------------------------
# IMPORTS
# ReportLab
from reportlab.graphics import renderPS, renderPDF, renderSVG
try:
from reportlab.graphics import renderPM
except ImportError:
#This is an optional part of ReportLab, so may not be installed.
renderPM=None
from reportlab.lib import pagesizes
# GenomeDiagram
from _LinearDrawer import LinearDrawer
from _CircularDrawer import CircularDrawer
from _Track import Track
# Builtins
import sys
from Bio.Graphics import _write
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------
# Diagram
class Diagram(object):
""" Diagram
Provides:
Attributes:
o name String, identifier for the diagram
o tracks List of Track objects comprising the diagram
o format String, format of the diagram (circular/linear)
o pagesize String, the pagesize of output
o orientation String, the page orientation (landscape/portrait)
o x Float, the proportion of the page to take up with even
X margins
o y Float, the proportion of the page to take up with even
Y margins
o xl Float, the proportion of the page to take up with the
left X margin
o xr Float, the proportion of the page to take up with the
right X margin
o yt Float, the proportion of the page to take up with the
top Y margin
o yb Float, the proportion of the page to take up with the
bottom Y margin
o circle_core Float, the proportion of the available radius to leave
empty at the center of a circular diagram (0 to 1).
o start Int, the base/aa position to start the diagram at
o end Int, the base/aa position to end the diagram at
o tracklines Boolean, True if track guidelines are to be drawn
o fragments Int, for a linear diagram, the number of equal divisions
into which the sequence is divided
o fragment_size Float, the proportion of the space available to each
fragment that should be used in drawing
o track_size Float, the proportion of the space available to each
track that should be used in drawing
o circular Boolean, True if the genome/sequence to be drawn is, in
reality, circular.
Methods:
o __init__(self, name=None) Called on instantiation
o draw(self, format='circular', ...) Instructs the package to draw
the diagram
o write(self, filename='test1.ps', output='PS') Writes the drawn
diagram to a specified file, in a specified format.
o add_track(self, track, track_level) Adds a Track object to the
diagram, with instructions to place it at a particular level on
the diagram
o del_track(self, track_level) Removes the track that is to be drawn
at a particular level on the diagram
o get_tracks(self) Returns the list of Track objects to be drawn
contained in the diagram
o renumber_tracks(self, low=1) Renumbers all tracks consecutively,
optionally from a passed lowest number
o get_levels(self) Returns a list of levels currently occupied by
Track objects
o get_drawn_levels(self) Returns a list of levels currently occupied
by Track objects that will be shown in the drawn diagram (i.e.
are not hidden)
o range(self) Returns the lowest- and highest-numbered positions
contained within features in all tracks on the diagram as a tuple.
o __getitem__(self, key) Returns the track contained at the level of
the passed key
o __str__(self) Returns a formatted string describing the diagram
"""
def __init__(self, name=None, format='circular', pagesize='A3',
orientation='landscape', x=0.05, y=0.05, xl=None,
xr=None, yt=None, yb=None, start=None, end=None,
tracklines=False, fragments=10, fragment_size=0.9,
track_size=0.75, circular=True, circle_core=0.0):
""" __init__(self, name=None)
o name String describing the diagram
o format String: 'circular' or 'linear', depending on the sort of
diagram required
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating
tracks on the diagram
o fragments Int, for linear diagrams, the number of sections into
which to break the sequence being drawn
o fragment_size Float (0->1), for linear diagrams, describing
the proportion of space in a fragment to take
up with tracks
o track_size Float (0->1) describing the proportion of space
in a track to take up with sigils
o circular Boolean flag to indicate whether the sequence being
drawn is circular
"""
self.tracks = {} # Holds all Track objects, keyed by level
self.name = name # Description of the diagram
# Diagram page setup attributes
self.format = format
self.pagesize = pagesize
self.orientation = orientation
self.x = x
self.y = y
self.xl = xl
self.xr = xr
self.yt = yt
self.yb = yb
self.start = start
self.end = end
self.tracklines = tracklines
self.fragments = fragments
self.fragment_size = fragment_size
self.track_size = track_size
self.circular = circular
self.circle_core = circle_core
self.cross_track_links = []
def set_all_tracks(self, attr, value):
""" set_all_tracks(self, attr, value)
o attr An attribute of the Track class
o value The value to set that attribute
Set the passed attribute of all tracks in the set to the
passed value
"""
for track in self.tracks.values():
if hasattr(track, attr): # If the feature has the attribute
if getattr(track, attr) != value:
setattr(track, attr, value) # set it to the passed value
def draw(self, format=None, pagesize=None, orientation=None,
x=None, y=None, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=None, fragments=None,
fragment_size=None, track_size=None, circular=None,
circle_core=None, cross_track_links=None):
"""Draw the diagram, with passed parameters overriding existing attributes.
"""
# Pass the parameters to the drawer objects that will build the
# diagrams. At the moment, we detect overrides with an or in the
# Instantiation arguments, but I suspect there's a neater way to do
# this.
if format == 'linear':
drawer = LinearDrawer(self, pagesize or self.pagesize,
orientation or self.orientation,
x or self.x, y or self.y, xl or self.xl,
xr or self.xr, yt or self.yt,
yb or self.yb, start or self.start,
end or self.end,
tracklines or self.tracklines,
fragments or self.fragments,
fragment_size or self.fragment_size,
track_size or self.track_size,
cross_track_links or self.cross_track_links)
else:
drawer = CircularDrawer(self, pagesize or self.pagesize,
orientation or self.orientation,
x or self.x, y or self.y, xl or self.xl,
xr or self.xr, yt or self.yt,
yb or self.yb, start or self.start,
end or self.end,
tracklines or self.tracklines,
track_size or self.track_size,
circular or self.circular,
circle_core or self.circle_core,
cross_track_links or self.cross_track_links)
drawer.draw() # Tell the drawer to complete the drawing
self.drawing = drawer.drawing # Get the completed drawing
def write(self, filename='test1.ps', output='PS', dpi=72):
""" write(self, filename='test1.ps', output='PS', dpi=72)
o filename String indicating the name of the output file,
or a handle to write to.
o output String indicating output format, one of PS, PDF,
SVG, or provided the ReportLab renderPM module is
installed, one of the bitmap formats JPG, BMP,
GIF, PNG, TIFF or TIFF. The format can be given
in upper or lower case.
o dpi Resolution (dots per inch) for bitmap formats.
Write the completed drawing out to a file in a prescribed format
No return value.
"""
return _write(self.drawing, filename, output, dpi=dpi)
def write_to_string(self, output='PS', dpi=72):
""" write(self, output='PS')
o output String indicating output format, one of PS, PDF,
SVG, JPG, BMP, GIF, PNG, TIFF or TIFF (as
specified for the write method).
o dpi Resolution (dots per inch) for bitmap formats.
Return the completed drawing as a string in a prescribed format
"""
#The ReportLab drawToString method, which this function used to call,
#just uses a cStringIO or StringIO handle with the drawToFile method.
#In order to put all our complicated file format specific code in one
#place we'll just use a StringIO handle here:
from StringIO import StringIO
handle = StringIO()
self.write(handle, output, dpi)
return handle.getvalue()
def add_track(self, track, track_level):
""" add_track(self, track, track_level)
o track Track object to draw
o track_level Int, the level at which the track will be drawn
(above an arbitrary baseline)
Add a pre-existing Track to the diagram at a given level
"""
if track is None:
raise ValueError("Must specify track")
if track_level not in self.tracks: # No track at that level
self.tracks[track_level] = track # so just add it
else: # Already a track there, so shunt all higher tracks up one
occupied_levels = self.get_levels() # Get list of occupied levels...
occupied_levels.sort() # ...sort it...
occupied_levels.reverse() # ...reverse it (highest first)
for val in occupied_levels:
# If track value >= that to be added
if val >= track.track_level:
self.tracks[val+1] = self.tracks[val] # ...increment by 1
self.tracks[track_level] = track # And put the new track in
self.tracks[track_level].track_level = track_level
def new_track(self, track_level, **args):
""" new_track(self, track_level) -> Track
o track_level Int, the level at which the track will be drawn
(above an arbitrary baseline)
Add a new Track to the diagram at a given level and returns it for
further user manipulation.
"""
newtrack = Track()
for key in args:
setattr(newtrack, key, args[key])
if track_level not in self.tracks: # No track at that level
self.tracks[track_level] = newtrack # so just add it
else: # Already a track there, so shunt all higher tracks up one
occupied_levels = self.get_levels() # Get list of occupied levels...
occupied_levels.sort() # ...sort it...
occupied_levels.reverse() # ...reverse (highest first)...
for val in occupied_levels:
if val >= track_level: # Track value >= that to be added
self.tracks[val+1] = self.tracks[val] # ..increment by 1
self.tracks[track_level] = newtrack # And put the new track in
self.tracks[track_level].track_level = track_level
return newtrack
def del_track(self, track_level):
""" del_track(self, track_level)
o track_level Int, the level of the track on the diagram to delete
Remove the track at the passed level from the diagram
"""
del self.tracks[track_level]
def get_tracks(self):
""" get_tracks(self) -> list
Returns a list of the tracks contained in the diagram
"""
return self.tracks.values()
def move_track(self, from_level, to_level):
""" move_track(self, from_level, to_level)
o from_level Int, the level at which the track to be moved is
found
o to_level Int, the level to move the track to
Moves a track from one level on the diagram to another
"""
aux = self.tracks[from_level]
del self.tracks[from_level]
self.add_track(aux, to_level)
def renumber_tracks(self, low=1, step=1):
""" renumber_tracks(self, low=1, step=1)
o low Int, the track number to start from
o step Int, the track interval for separation of tracks
Reassigns all the tracks to run consecutively from the lowest
value (low)
"""
track = low # Start numbering from here
levels = self.get_levels() #
conversion = {} # Holds new set of levels
for level in levels: # Starting at low...
conversion[track] = self.tracks[level] # Add old tracks to new set
conversion[track].track_level = track
track += step # step interval
self.tracks = conversion # Replace old set of levels with new set
def get_levels(self):
""" get_levels(self) -> [int, int, ...]
Return a sorted list of levels occupied by tracks in the diagram
"""
levels = self.tracks.keys()
levels.sort()
return levels
def get_drawn_levels(self):
""" get_drawn_levels(self) -> [int, int, ...]
Return a sorted list of levels occupied by tracks that are not
explicitly hidden
"""
drawn_levels = [key for key in self.tracks.keys() if \
not self.tracks[key].hide] # get list of shown levels
drawn_levels.sort()
return drawn_levels
def range(self):
""" range(self) -> (int, int)
Returns the lowest and highest base (or mark) numbers containd in
track features as a tuple
"""
lows, highs = [], []
for track in self.tracks.values(): # Get ranges for each track
low, high = track.range()
lows.append(low)
highs.append(high)
return (min(lows), max(highs)) # Return extremes from all tracks
def __getitem__(self, key):
""" __getitem__(self, key) -> Track
o key The id of a track in the diagram
Return the Track object with the passed id
"""
return self.tracks[key]
def __str__(self):
""" __str__(self) -> ""
Returns a formatted string with information about the diagram
"""
outstr = ["\n<%s: %s>" % (self.__class__, self.name)]
outstr.append("%d tracks" % len(self.tracks))
for level in self.get_levels():
outstr.append("Track %d: %s\n" % (level, self.tracks[level]))
outstr = '\n'.join(outstr)
return outstr
|
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic linux scsi subsystem and Multipath utilities.
Note, this is not iSCSI.
"""
import os
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _, _LW, _LE
from cinder.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$")
MULTIPATH_WWID_REGEX = re.compile("\((?P<wwid>.+)\)")
class LinuxSCSI(executor.Executor):
def __init__(self, root_helper, execute=putils.execute,
*args, **kwargs):
super(LinuxSCSI, self).__init__(root_helper, execute,
*args, **kwargs)
def echo_scsi_command(self, path, content):
"""Used to echo strings to scsi subsystem."""
args = ["-a", path]
kwargs = dict(process_input=content,
run_as_root=True,
root_helper=self._root_helper)
self._execute('tee', *args, **kwargs)
def get_name_from_path(self, path):
"""Translates /dev/disk/by-path/ entry to /dev/sdX."""
name = os.path.realpath(path)
if name.startswith("/dev/"):
return name
else:
return None
def remove_scsi_device(self, device):
"""Removes a scsi device based upon /dev/sdX name."""
path = "/sys/block/%s/device/delete" % device.replace("/dev/", "")
if os.path.exists(path):
# flush any outstanding IO first
self.flush_device_io(device)
LOG.debug("Remove SCSI device(%s) with %s" % (device, path))
self.echo_scsi_command(path, "1")
def wait_for_volume_removal(self, volume_path):
"""This is used to ensure that volumes are gone."""
def _wait_for_volume_removal(volume_path):
LOG.debug("Waiting for SCSI mount point %s to be removed.",
volume_path)
if os.path.exists(volume_path):
if self.tries >= self.scan_attempts:
msg = _LE("Exceeded the number of attempts to detect "
"volume removal.")
LOG.error(msg)
raise exception.VolumePathNotRemoved(
volume_path=volume_path)
LOG.debug("%(path)s still exists, rescanning. Try number: "
"%(tries)s",
{'path': volume_path, 'tries': self.tries})
self.tries = self.tries + 1
else:
LOG.debug("SCSI mount point %s has been removed.", volume_path)
raise loopingcall.LoopingCallDone()
# Setup a loop here to give the kernel time
# to remove the volume from /dev/disk/by-path/
self.tries = 0
self.scan_attempts = 3
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_volume_removal, volume_path)
timer.start(interval=2).wait()
def get_device_info(self, device):
(out, _err) = self._execute('sg_scan', device, run_as_root=True,
root_helper=self._root_helper)
dev_info = {'device': device, 'host': None,
'channel': None, 'id': None, 'lun': None}
if out:
line = out.strip()
line = line.replace(device + ": ", "")
info = line.split(" ")
for item in info:
if '=' in item:
pair = item.split('=')
dev_info[pair[0]] = pair[1]
elif 'scsi' in item:
dev_info['host'] = item.replace('scsi', '')
return dev_info
def remove_multipath_device(self, multipath_name):
"""This removes LUNs associated with a multipath device
and the multipath device itself.
"""
LOG.debug("remove multipath device %s" % multipath_name)
mpath_dev = self.find_multipath_device(multipath_name)
if mpath_dev:
devices = mpath_dev['devices']
LOG.debug("multipath LUNs to remove %s" % devices)
for device in devices:
self.remove_scsi_device(device['device'])
self.flush_multipath_device(mpath_dev['id'])
def flush_device_io(self, device):
"""This is used to flush any remaining IO in the buffers."""
try:
LOG.debug("Flushing IO for device %s" % device)
self._execute('blockdev', '--flushbufs', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
msg = _("Failed to flush IO buffers prior to removing"
" device: (%(code)s)") % {'code': exc.exit_code}
LOG.warn(msg)
def flush_multipath_device(self, device):
try:
LOG.debug("Flush multipath device %s" % device)
self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def flush_multipath_devices(self):
try:
self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def find_multipath_device(self, device):
"""Find a multipath device associated with a LUN device name.
device can be either a /dev/sdX entry or a multipath id.
"""
mdev = None
devices = []
out = None
try:
(out, _err) = self._execute('multipath', '-l', device,
run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
if out:
lines = out.strip()
lines = lines.split("\n")
lines = [line for line in lines
if not re.match(MULTIPATH_ERROR_REGEX, line)]
if lines:
# Use the device name, be it the WWID, mpathN or custom alias
# of a device to build the device path. This should be the
# first item on the first line of output from `multipath -l
# ${path}` or `multipath -l ${wwid}`..
mdev_name = lines[0].split(" ")[0]
mdev = '/dev/mapper/%s' % mdev_name
# Find the WWID for the LUN if we are using mpathN or aliases.
wwid_search = MULTIPATH_WWID_REGEX.search(lines[0])
if wwid_search is not None:
mdev_id = wwid_search.group('wwid')
else:
mdev_id = mdev_name
# Confirm that the device is present.
try:
os.stat(mdev)
except OSError:
LOG.warn(_LW("Couldn't find multipath device %s"), mdev)
return None
LOG.debug("Found multipath device = %(mdev)s"
% {'mdev': mdev})
device_lines = lines[3:]
for dev_line in device_lines:
if dev_line.find("policy") != -1:
continue
dev_line = dev_line.lstrip(' |-`')
dev_info = dev_line.split()
address = dev_info[0].split(":")
dev = {'device': '/dev/%s' % dev_info[1],
'host': address[0], 'channel': address[1],
'id': address[2], 'lun': address[3]
}
devices.append(dev)
if mdev is not None:
info = {"device": mdev,
"id": mdev_id,
"name": mdev_name,
"devices": devices}
return info
return None
|
|
import sublime
import sublime_plugin
import re
import os
import time
import ntpath
import glob
SETTINGS_FILE = 'FileConcatenator.sublime-settings'
MESSAGE_HEADER = 'Sublime File Concatenator\n===========================\n\n'
MSG_TYPE = {}
MSG_TYPE['INFO'] = 1
MSG_TYPE['WARNING'] = 2
MSG_TYPE['ERROR'] = 3
MSG_TYPE['FATAL'] = 4
#
# Concatenator
#
# Todo:
# "Brand" output-file to prevent bad overwrites?
# Only concat changed files? Some how
# Add "Increment output file", up to x pieces of them. 0 = don't increment, always overwrite
# Warn about @(unknown_setting_key, value)
#
# BUGS
class ConcatenatorCommand(sublime_plugin.TextCommand):
re_template = re.compile(r'''
(?P<outermatch>
{{
(?P<namespace>\w+)
.
(?P<key>\w+)
}}
)
''', re.VERBOSE | re.IGNORECASE)
# We'll use the exact same regex for matching @imports and @partofs to keep it concistent.
re_method = re.compile(r'''
(\r\n|\n)? # Match any linebreak zero or on time
( # Full replace part
([ \t]*) # Capture any whitespace or tab (intendation)
(?://|\#|\/\*)? # Non-capture any possible beginning of comment
(?:[ \t]*) # Non-capture any possible whitespace
@(import|partof|option|saveto) # Match method
(?:[ \t|url])* # Non-capture whitespace or 'url'
\(([^\)]+)\) # Match "(" and capture everything up until ")"
(?:\;|\*/)? # Non-capture ending comment, or end of line,
)
(\r\n|\n)? # Match any linebreak zero or one time
''', re.VERBOSE | re.IGNORECASE)
# Instance variables
jit_settings_dict = {}
jit_rec_settings_dict = {}
log_list = []
log_list_types = {}
log_list_types[1] = 0 # Info
log_list_types[2] = 0 # Warning
log_list_types[3] = 0 # Error
log_list_types[4] = 0 # Fatal
# Function for resetting all instance variables
def reset_instance (self):
self.jit_settings_dict.clear()
del self.log_list[:]
self.log_list_types[1] = 0 # Info
self.log_list_types[2] = 0 # Warning
self.log_list_types[3] = 0 # Error
self.log_list_types[4] = 0 # Fatal
# The logging method used throughout the plugin
def log (self, msg_type, message, file_dict = 0):
log_entry = ''
if msg_type == MSG_TYPE['INFO']:
if file_dict:
log_entry += file_dict['filename'] + ', '
log_entry += 'INFO: '
log_entry += message
else:
log_entry += '\n'
if file_dict:
log_entry += file_dict['filename'] + ', '
if msg_type == MSG_TYPE['WARNING']:
log_entry += 'WARNING: '
elif msg_type == MSG_TYPE['ERROR']:
log_entry += 'ERROR: '
elif msg_type == MSG_TYPE['FATAL']:
log_entry += 'FATAL: '
log_entry += message
log_entry += '\n'
self.log_list.append(log_entry)
self.log_list_types[msg_type] += 1
def get_jit_setting (self, key, file_dict):
if not file_dict:
return
file_key = file_dict['realpath']
if file_key in self.jit_settings_dict and key in self.jit_settings_dict[file_key]:
return self.jit_settings_dict[file_key][key]
else:
for file_key in self.jit_rec_settings_dict:
if key in self.jit_rec_settings_dict[file_key]:
return self.jit_rec_settings_dict[file_key][key]
def push_jit_setting (self, key, value, recursive, file_dict = 0):
if not file_dict:
return
file_key = file_dict['realpath']
log_msgtype = MSG_TYPE['INFO']
log_message = ''
overwrote = False
if recursive:
if not file_key in self.jit_rec_settings_dict:
self.jit_rec_settings_dict[file_key] = {}
if key in self.jit_rec_settings_dict[file_key]:
overwrote = True
overwrote_val = self.jit_rec_settings_dict[file_key][key]
overwrote_rec = 'True'
self.jit_rec_settings_dict[file_key][key] = value
else:
if not file_key in self.jit_settings_dict:
self.jit_settings_dict[file_key] = {}
if key in self.jit_settings_dict[file_key]:
overwrote = True
overwrote_val = self.jit_settings_dict[file_key][key]
overwrote_rec = 'False'
self.jit_settings_dict[file_key][key] = value
if overwrote:
log_message = 'Overwrote JIT-setting {"' + key + '": "' + overwrote_val + '", recursive="' + overwrote_rec + '"}, {"' + key + '": "' + value + '", recursive="' + str(recursive) + '"}'
else:
log_message = 'Pushed JIT-setting {"' + key + '": "' + value + '", recursive="' + str(recursive) + '"}'
self.log(log_msgtype, log_message, file_dict)
def clear_jit_setting (self, key, recursive, file_dict = 0):
if not file_dict:
return
file_key = file_dict['realpath']
if not key == '*':
log_msgtype = MSG_TYPE['WARNING']
log_message = 'Tried to clear non-existing JIT-setting "' + key + '"'
else:
log_msgtype = MSG_TYPE['INFO']
log_message = ''
if recursive:
if file_key in self.jit_rec_settings_dict:
if key == '*':
self.jit_rec_settings_dict[file_key].clear()
log_message = 'Cleared all JIT-settings recursively'
elif key in self.jit_rec_settings_dict[file_key]:
log_message = 'Cleared recursive JIT-setting "' + key + '"'
self.jit_rec_settings_dict[file_key].pop(key, None)
else:
if file_key in self.jit_settings_dict:
if key == '*':
self.jit_settings_dict[file_key].clear()
log_message = 'Cleared all non-recursive JIT-settings recursively'
elif key in self.jit_settings_dict[file_key]:
log_message = 'Cleared non-recursive JIT-setting "' + key + '"'
self.jit_settings_dict[file_key].pop(key, None)
if log_message:
self.log(log_msgtype, log_message, file_dict)
# A helper function to retrieve the behaviour of this plugin.
# Returns a JIT-setting if available, otherwise a global
def setting (self, file_dict, key, fallback_value = False):
jit_setting = self.get_jit_setting(key, file_dict)
if not jit_setting == None:
return jit_setting
return sublime.load_settings(SETTINGS_FILE).get(key, fallback_value)
# A helper function for formatting size
def format_bytes (self, bytes):
for unit in ['B', 'KB', 'MB', 'GB']:
if bytes < 1024.0:
return '%3.1f %s' % (bytes, unit)
bytes /= 1024.0
return '%3.1f %s' % (bytes, 'TB')
def parse_string_literals (self, string):
return (
string
.replace('\\\\', '{!~db~!}') # Temporarily rename escaped \\ backslashes
.replace('\\\'', "'") # Single quote (')
.replace('\\\"', '"') # Double quote (")
.replace('\\a', '\a') # ASCII Bell (BEL)
.replace('\\b', '\b') # ASCII Backspace (BS)
.replace('\\f', '\f') # ASCII Formfeed (FF)
.replace('\\n', '\n') # ASCII Linefeed (LF)
.replace('\\r', '\r') # ASCII Carriage Return (CR)
.replace('\\t', '\t') # ASCII Horizontal Tab (TAB)
.replace('\\v', '\v') # ASCII Vertical Tab (VT)
.replace('{!~db~!}', '\\') # Revert escaped backslashes
)
# Helper method to return the contents of a file
def file_get_contents(self, filepath, splitLines):
content = ''
try:
handle = open(filepath, 'r')
try:
if splitLines:
content = handle.readlines()
else:
content = handle.read()
finally:
handle.close()
except IOError:
# if exc.errno != errno.ENOENT: raise
# http://stackoverflow.com/questions/82831/how-do-i-check-if-a-file-exists-using-python
self.log(MSG_TYPE['FATAL'], 'Something')
pass
return content
def get_path_info (self, path, working_dir = ''):
info = {}
# Convert to absolute path if needed
if not os.path.isabs(path):
info['dirname'] = os.path.abspath(ntpath.join(working_dir, path))
else:
info['dirname'] = path
info['dirname'] = ntpath.dirname(info['dirname']) # (C:\source)\file.js
info['filename'] = ntpath.basename(path) # C:\source\(file.js)
split = ntpath.splitext(info['filename'])
info['fileroot'] = split[0] # C:\source\(file).js
info['extension'] = split[1][1:] # C:\source\file.(js)
info['realpath'] = ntpath.join(info['dirname'], info['filename']) # (C:\source\file.js)
info['working_dir'] = working_dir # As specified in the argument. The directory to start from.
info['is_child'] = True
return info
def template (self, file_dict, string, valueDict):
if string:
# Find all {{template_variables}} in the header/footer
for tpl_match in re.finditer(self.re_template, string):
value = False
outermatch = tpl_match.group('outermatch')
namespace = tpl_match.group('namespace')
key = tpl_match.group('key')
# (source/target/referer).*
if namespace == 'this' or namespace == 'source' or namespace == 'referer':
owner = valueDict[namespace]
if key in owner:
value = owner[key]
# Say that the .lastmod_date and .lastmod_time exists in both the header and the footer.
# This means that we'll have to call os.path.getmtime 4 times.
# An alternative would be to cache the os.path.getmtime temporarily for all files during the concatenation,
# but that would add more complexity to the plugin. I do not know the performance implications for calling for example getmtime
# and does at the time of this writing not want to optimize prematurely.
elif key == 'filesize':
value = str(self.format_bytes(os.path.getsize(owner['realpath'])))
elif key == 'lastmod_date':
value = time.strftime(self.setting(file_dict, 'date_format'), time.gmtime(os.path.getmtime(owner['realpath'])))
elif key == 'lastmod_time':
value = time.strftime(self.setting(file_dict, 'time_format'), time.gmtime(os.path.getmtime(owner['realpath'])))
else:
self.log(MSG_TYPE['WARNING'], 'Unknown template key' + '"' + key + '"', file_dict)
# system.*
elif namespace == 'system':
if key == 'time':
value = time.strftime(self.setting(file_dict, 'time_format'))
elif key == 'date':
value = time.strftime(self.setting(file_dict, 'date_format'))
elif key == 'platform':
value = sublime.platform()
elif key == 'arch':
value = sublime.arch()
elif key == 'version':
value = sublime.version()
else:
self.log(MSG_TYPE['WARNING'], 'Unknown template key' + '"' + key + '"', file_dict)
# result.*
elif namespace == 'result':
owner = valueDict[namespace]
tmp = 0
display_x_files = 3
if key == 'num_referenced_files':
tmp = len(owner['referenced_file_dicts'])
value = str(tmp) + (' files' if tmp > 1 else ' file')
elif key == 'referenced_files_size':
value = self.format_bytes(owner['referenced_file_bytes'])
elif key == 'written_filenames':
tmp = len(owner['written_file_dicts'])
value = ', '.join(["'" + fdict['output_filename'] + "'" for fdict in owner['written_file_dicts'][:display_x_files]])
value += (' and ' + str(tmp - display_x_files) + ' more') if tmp > display_x_files else ''
elif key == 'referenced_filenames':
tmp = len(owner['referenced_file_dicts'])
value = ', '.join(["'" + fdict['filename'] + "'" for fdict in owner['referenced_file_dicts'][:display_x_files]])
value += (' and ' + str(tmp - display_x_files) + ' more') if tmp > display_x_files else ''
elif key == 'runtime':
value = "{0:.2f}".format(owner['runtime_end'] - owner['runtime_start'])
elif key == 'num_reused_files':
value = str(owner['num_reused_files'])
else:
self.log(MSG_TYPE['WARNING'], 'Unknown template key' + '"' + key + '"', file_dict)
# ?.*
else:
self.log(MSG_TYPE['WARNING'], 'Unknown namespace key' + '"' + namespace + '"', file_dict)
# If we got a value, replace the outermatch ({{template_var}}) with the value
if not value == False:
string = string.replace(outermatch, value)
return self.parse_string_literals(string)
# Writes a file_dict to disc
def write (self, source_file_dict, target_file_dict, referer_file_dict, content, saveto_file_dict = False):
filename = saveto_file_dict['filename'] if saveto_file_dict else ''
dirname = saveto_file_dict['dirname'] if saveto_file_dict else target_file_dict['dirname']
output_filename = filename if filename else self.setting(target_file_dict, 'tpl_output_filename')
output_filename = self.template(target_file_dict, output_filename, {
'this': target_file_dict,
'source': source_file_dict,
'referer': referer_file_dict
})
# The absolute path to the output file
output_realpath = ntpath.join(dirname, output_filename)
# Safety net
if not saveto_file_dict and os.path.isfile(output_realpath) and target_file_dict['filename'] == output_filename:
return self.log(MSG_TYPE['FATAL'], 'A file already exist at the path specified and the name equals to the original. I will not continue at risk of overwriting the original.\n\nEvaluated filename:\n' + output_filename + '\n\nDirectory:\n' + target_file_dict['dirname'] + '\n\n' + 'Please look over your settings.', target_file_dict)
# If the source's filename equals to the target, this means that this is the last write.
# Depending on current settings, trim the content.
if (source_file_dict['filename'], target_file_dict['filename']) and self.setting(target_file_dict, 'trim_output'):
content = content.strip()
# The w-flag will create a new file or overwrite an existing file.
with open(output_realpath, 'w') as handle:
handle.write(content)
target_file_dict['output_filename'] = output_filename
target_file_dict['output_dirname'] = dirname
target_file_dict['output_realpath'] = output_realpath
return target_file_dict
def parse (self, target_file_dict, referer_file_dict, callback, memo = False):
if not memo:
memo = {}
memo['runtime_start'] = time.time()
memo['written_file_dicts'] = []
memo['referenced_file_dicts'] = []
memo['referenced_file_bytes'] = 0
memo['partof_queue'] = []
memo['source_file_dict'] = target_file_dict
memo['missing_parents'] = []
memo['missing_children'] = []
memo['num_reused_files'] = 0
target_file_dict['is_child'] = False
referer_file_dict['is_child'] = False
# A file can be both a parent and child at the same time.
is_child = target_file_dict['is_child']
is_parent = False
target_content = self.file_get_contents(target_file_dict['realpath'], False)
target_matches = self.re_method.findall(target_content)
target_partofs = []
self.log(MSG_TYPE['INFO'], 'Started parsing ' + ('child' if is_child else 'parent'), target_file_dict)
# Reset saveto-variables. This can be filled via the @saveto
saveto_file_dicts = []
# Temporary file_dict holder (gets appended to saveto_file_dicts is successful)
saveto_file_dict = False
if len(target_matches) > 0:
for parent_match in target_matches:
beg_linebreak, fullmatch, indentation, method, value, end_linebreak = parent_match
# Clean the value from ' " and whitespaces
value = value.strip('\'" ')
# Users can prefix values with 'glob:' to activate globsearch
globsearch = value.startswith('glob:')
if (globsearch):
value = value[5:] # Remove the 'glob:'-prefix
# Handle 'partof', 'option' and 'saveto' methods
if method == 'partof' or method == 'option' or method == 'saveto':
# Save all partof's and parse them later, when all import's are done
if not is_child and method == 'partof':
memo['partof_queue'].append(parent_match)
# Handle @option
elif method == 'option':
option_split = value.split(',', 2)
if len(option_split) > 1:
option_key = option_split[0].strip('\'" ').lower()
option_val = option_split[1].strip('\'" ')
option_rec = option_split[2].strip('\'" ').lower() if len(option_split) > 2 else False
if option_rec == 'true' or option_rec == '1':
option_rec = True
else:
option_rec = False
if option_val.lower() == 'default':
self.clear_jit_setting(option_key, option_rec, target_file_dict)
else:
self.push_jit_setting(option_key, option_val, option_rec, target_file_dict)
else:
self.log(MSG_TYPE['WARNING'], 'Malformed @option method: "' + fullmatch + '"', target_file_dict)
# Handle @saveto
elif not is_child and method == 'saveto':
if len(value) > 0:
# If the value seems to have an extension, we'll assume the user wants us to write to a file
saveto_file = len(ntpath.splitext(value)[1]) > 1
saveto_file_dict = self.get_path_info(value if saveto_file else ntpath.join(value, 'tempname.ext'), target_file_dict['dirname'])
if not saveto_file:
saveto_file_dict['filename'] = ''
# If the evaluated path does not exist, ask the user if we should create it.
if not os.path.isdir(saveto_file_dict['dirname']):
if not sublime.ok_cancel_dialog(MESSAGE_HEADER + 'The path specified via @saveto in ' + target_file_dict['filename'] + ' does no exist. Do you want me to create it?\n\nPath specified:\n' + ntpath.dirname(value) + os.sep + '\n\nEvaluated:\n' + saveto_file_dict['dirname'] + os.sep):
saveto_file_dict = False
else:
try:
os.makedirs(saveto_file_dict['dirname'])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
self.log(MSG_TYPE['FATAL'], exc, target_file_dict)
saveto_file_dict = False
raise
# Append to lists of successful
if saveto_file_dict:
saveto_file_dicts.append(saveto_file_dict)
else:
self.log(MSG_TYPE['WARNING'], 'Malformed @saveto method: "' + fullmatch + '"', target_file_dict)
# Remove the fullmatch reference. Prioritize the preceding linebreak, then the succeeding
if end_linebreak:
target_content = target_content.replace(fullmatch + end_linebreak, '', 1)
else:
target_content = target_content.replace(beg_linebreak + fullmatch, '', 1)
# Handle the 'import' method
elif method == 'import':
child_file_dict = self.get_path_info(value, target_file_dict['dirname'])
# Skip if the file does not exist
if not globsearch and not os.path.isfile(child_file_dict['realpath']):
memo['missing_children'].append([child_file_dict, target_file_dict])
# Remove the fullmatch reference. Prioritize the preceding linebreak, then the succeeding
if end_linebreak:
target_content = target_content.replace(fullmatch + end_linebreak, '', 1)
else:
target_content = target_content.replace(beg_linebreak + fullmatch, '', 1)
continue
is_parent = True
child_content = ''
# Look through the "written_file_dicts"-list in the memo and check that we haven't already parsed and written this file to disc.
for already_written_dict in memo['written_file_dicts']:
if already_written_dict['realpath'] == child_file_dict['realpath']:
child_content = self.file_get_contents(already_written_dict['output_realpath'], False)
memo['num_reused_files'] += 1
break
else:
# Normalize the child_matches list.
# globsearch or not, we are gonna continue with a list of 0 or more matches
if globsearch:
child_matches = [self.get_path_info(filematch, target_file_dict['dirname']) for filematch in glob.glob(child_file_dict['realpath'])]
else:
child_matches = [child_file_dict]
if len(child_matches) > 0:
child_content = ''.join([self.parse(child_file_dict, target_file_dict, callback, memo) for child_file_dict in child_matches])
#
memo['referenced_file_dicts'].extend(child_matches)
memo['referenced_file_bytes'] += len(child_content)
# glob: can yield 0 results, therefore we have to check that we actually got any content
if child_content:
# Apply indentation
if len(indentation) > 0 and self.setting(target_file_dict, 'apply_intendation') == True:
child_content = indentation + child_content.replace('\n', '\n' + indentation)
# Stitch togheter the contents and replace them into the target_content
target_content = target_content.replace(fullmatch, child_content, 1)
else:
self.log(MSG_TYPE['INFO'], 'No methods found', target_file_dict)
# We handle parents and children almost exactly the same, but the user supplied settings can differ.
# Instead of doing more work in the name of clarity, we'll do half with variable variables.
write_to_disc = is_parent and (not is_child or self.setting(target_file_dict, 'write_nested_parents'))
trim_type = 'parents' if is_parent else 'children'
tpl_type = 'parent' if is_parent else 'child'
# Trim this file?
if self.setting(target_file_dict, 'trim_' + trim_type):
target_content = target_content.strip()
# Apply header/footer
values = {'this': target_file_dict, 'source': memo['source_file_dict'], 'referer': referer_file_dict}
header = self.setting(target_file_dict, 'tpl_' + tpl_type + '_header')
footer = self.setting(target_file_dict, 'tpl_' + tpl_type + '_footer')
header = header and self.template(target_file_dict, header, values)
footer = footer and self.template(target_file_dict, footer, values)
if header or footer:
target_content = header + target_content + footer
# Write the file(s), save the name in the file_dict and add it to the memo
if write_to_disc:
# If there is no "saveto"`s; pass False
if len(saveto_file_dicts) == 0:
memo['written_file_dicts'].append(self.write(memo['source_file_dict'], target_file_dict, referer_file_dict, target_content, False))
else:
# Loop through all save_to_file_dicts, writing each one.
while len(saveto_file_dicts) > 0:
memo['written_file_dicts'].append(self.write(memo['source_file_dict'], target_file_dict, referer_file_dict, target_content, saveto_file_dicts.pop(0)))
self.log(MSG_TYPE['INFO'], 'Finished parsing', target_file_dict)
# Clear JIT-settings
self.clear_jit_setting(key = '*', recursive = (not is_child), file_dict = target_file_dict)
last_file_to_write = not is_child and not len(memo['partof_queue'])
# Parse all the 'partof'-references
if not is_child:
if len(memo['partof_queue']) > 0:
while memo['partof_queue']:
# .pop() the first item from the queue, get the value (filepath) (@method(value)) at position 4 and clean it.
parent_file_dict = self.get_path_info(memo['partof_queue'].pop(0)[4].strip('\'" '), target_file_dict['dirname'])
parent_file_dict['is_child'] = False
# Skip if the file does not exist
if not os.path.isfile(parent_file_dict['realpath']):
memo['missing_parents'].append([parent_file_dict, target_file_dict])
else:
self.parse(parent_file_dict, target_file_dict, callback, memo)
else:
# End of the line, run the callback.
memo['runtime_end'] = time.time()
self.log(MSG_TYPE['INFO'], 'Parsing finished in ' + "{0:.2f}".format(memo['runtime_end'] - memo['runtime_start']) + ' seconds', target_file_dict)
callback(memo)
return target_content
def parser_callback (self, result):
num_missing_parents = len(result['missing_parents'])
num_missing_children = len(result['missing_children'])
# If one or more files could not be found, pop an error message
if num_missing_parents > 0 or num_missing_children > 0:
str_missing_parents = ''
str_missing_children = ''
# Build string for presenting missing parents
if num_missing_parents > 0:
str_missing_parents = 'Parents:\n' + ''.join([missing_file[0]['realpath'] + ', referer: ' + missing_file[1]['filename'] + '\n' for missing_file in result['missing_parents']]) + '\n\n'
# Build string for presenting missing children
if num_missing_children > 0:
str_missing_children = 'Children:\n' + ''.join([missing_file[0]['realpath'] + ', referer: '+ missing_file[1]['filename'] + '\n' for missing_file in result['missing_children']])
missing_message = MESSAGE_HEADER + str(num_missing_parents + num_missing_children) + ' referenced ' + ('files' if num_missing_children > 1 else 'file') + ' could not be found:\n\n'
# Notify user
sublime.error_message(missing_message + str_missing_parents + str_missing_children)
infos = self.log_list_types[MSG_TYPE['INFO']]
warnings = self.log_list_types[MSG_TYPE['WARNING']]
errors = self.log_list_types[MSG_TYPE['ERROR']]
fatals = self.log_list_types[MSG_TYPE['FATAL']]
message = MESSAGE_HEADER
message += 'The concatenation finished with '
message += str(warnings) + ' ' + ('warning' if warnings == 1 else 'warnings') + ', '
message += str(errors) + ' ' + ('error' if errors == 1 else 'errors') + ' and '
message += str(fatals) + ' fatal.\n\n'
message += '\n'.join(self.log_list)
if warnings or errors or fatals:
sublime.error_message(message)
elif self.setting(0, 'verbose') and infos:
sublime.message_dialog(message)
if len(result['written_file_dicts']) > 0:
# Set status message
status_message = self.setting(0, 'tpl_status_message')
if status_message:
status_message = self.template(0, status_message, {
'result': result
})
sublime.set_timeout(lambda: sublime.status_message(status_message), 0)
#
# The main run-method
# Executed from key-bindings, menu, save etc
#
def run (self, edit, targetFile = False, current_iteration = 0):
# 1) Has some very intermittent troubles with instance variables not resetting properly.. so we have to be quite rough here
self.reset_instance()
self.log(MSG_TYPE['INFO'], 'Initiating concatenation')
if targetFile == False:
targetFile = self.view.file_name()
# Generalized dictionary used throughout the plugin for file information
target_file_dict = self.get_path_info(ntpath.basename(targetFile), ntpath.dirname(targetFile))
# Get the ball rollin'
self.parse(target_file_dict, target_file_dict, self.parser_callback)
# See 1)
self.reset_instance()
#
# Event listener for post-save
#
class FileConcatenatorEventListener(sublime_plugin.EventListener):
def on_post_save(self, view):
settings = sublime.load_settings(SETTINGS_FILE)
# Should we the concat on save?
if settings.get('run_on_save', False) == False:
return
# Is the current extension set to be ran on save?
if view.file_name().split('.')[-1] not in settings.get('run_on_save_extensions'):
return
sublime.active_window().run_command('concatenator')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""_IOTensor"""
import tensorflow as tf
class _IOTensorMeta(property):
"""_IOTensorMeta is a decorator that is viewable to __repr__"""
class _IOTensorComponentFunction:
"""_IOTensorComponentFunction will translate call"""
def __init__(self, function, resource, component, shape, dtype):
self._function = function
self._resource = resource
self._component = component
self._length = shape[0]
self._shape = tf.TensorShape([None]).concatenate(shape[1:])
self._dtype = dtype
def __call__(self, start, stop):
start, stop, _ = slice(start, stop).indices(self._length)
return self._function(
self._resource,
start=start,
stop=stop,
component=self._component,
shape=self._shape,
dtype=self._dtype,
)
@property
def length(self):
return self._length
class _IOTensorIterablePartitionedFunction:
"""PartitionedFunction will translate call to cached Function call"""
# function: next call of the iterable
def __init__(self, function, shape):
self._function = function
self._partitions = []
self._length = None
self._slice_suffix_start = [0 for _ in shape[1:]]
self._slice_suffix_size = list(shape[1:])
def __call__(self, start, stop):
while self._length is None:
# if stop is not None then resolved partitions have to cover stop
# if stop is None then all partitions has to be resolved
if stop is not None:
if stop <= sum(e.shape[0] for e in self._partitions):
break
# resolve one step
partition = self._function()
if partition.shape[0] == 0:
self._length = sum(e.shape[0] for e in self._partitions)
else:
self._partitions.append(partition)
partitions_indices = (
tf.cumsum([e.shape[0] for e in self._partitions]).numpy().tolist()
)
self._partitions_start = list([0] + partitions_indices[:-1])
self._partitions_stop = partitions_indices
length = self._partitions_stop[-1]
index = slice(start, stop)
start, stop, _ = index.indices(length)
if start >= length:
raise IndexError("index %s is out of range" % index)
indices_start = tf.math.maximum(self._partitions_start, start)
indices_stop = tf.math.minimum(self._partitions_stop, stop)
indices_hit = tf.math.less(indices_start, indices_stop)
indices = tf.squeeze(tf.compat.v2.where(indices_hit), [1])
return self._partitions_read(indices_start, indices_stop, indices)
@property
def length(self):
"""length"""
while self._length is None:
# resolve until length is available
partition = self._function()
if partition.shape[0] == 0:
self._length = sum(e.shape[0] for e in self._partitions)
else:
self._partitions.append(partition)
partitions_indices = (
tf.cumsum([e.shape[0] for e in self._partitions]).numpy().tolist()
)
self._partitions_start = list([0] + partitions_indices[:-1])
self._partitions_stop = partitions_indices
return self._length
def _partitions_read(self, indices_start, indices_stop, indices):
"""_partitions_read"""
items = []
# TODO: change to tf.while_loop
for index in indices:
slice_start = indices_start[index] - self._partitions_start[index]
slice_size = indices_stop[index] - indices_start[index]
slice_start = [slice_start] + self._slice_suffix_start
slice_size = [slice_size] + self._slice_suffix_size
item = tf.slice(self._partitions[index], slice_start, slice_size)
items.append(item)
return tf.concat(items, axis=0)
class _IOTensorPartitionedFunction:
"""PartitionedFunction will translate call to cached Function call"""
def __init__(self, func, partitions):
self._func = func
self._partitions = partitions
partitions_indices = tf.cumsum(partitions).numpy().tolist()
self._partitions_start = list([0] + partitions_indices[:-1])
self._partitions_stop = partitions_indices
self._tensors = [None for _ in partitions]
def __call__(self, resource, start, stop):
indices_start = tf.math.maximum(self._partitions_start, start)
indices_stop = tf.math.minimum(self._partitions_stop, stop)
indices_hit = tf.math.less(indices_start, indices_stop)
indices = tf.squeeze(tf.compat.v2.where(indices_hit), [1])
items = []
# TODO: change to tf.while_loop
for index in indices:
if self._tensors[index] is None:
self._tensors[index] = self._func(
resource,
self._partitions_start[index],
self._partitions_stop[index],
)
slice_start = indices_start[index] - self._partitions_start[index]
slice_size = indices_stop[index] - indices_start[index]
item = tf.slice(self._tensors[index], [slice_start], [slice_size])
items.append(item)
return tf.concat(items, axis=0)
class _IOTensor:
"""_IOTensor"""
def __init__(self, spec, internal=False):
if not internal:
raise ValueError(
"IOTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"IOTensor.from_tensor())"
)
self._spec = spec
super().__init__()
# =============================================================================
# Accessors
# =============================================================================
@property
def spec(self):
"""The `TensorSpec` of values in this tensor."""
return self._spec
# =============================================================================
# String Encoding
# =============================================================================
def __repr__(self):
meta = "".join(
[
f", {k}={repr(v.__get__(self))}"
for k, v in self.__class__.__dict__.items()
if isinstance(v, _IOTensorMeta)
]
)
return f"<{self.__class__.__name__}: spec={self.spec}{meta}>"
class BaseIOTensor(_IOTensor):
"""BaseIOTensor
A `BaseIOTensor` is a basic `IOTensor` with only one component.
It is associated with a `Tensor` of `shape` and `dtype`, with
data backed by IO. It is the building block for `IOTensor`.
For example, a `CSVIOTensor` consists of multiple `BaseIOTensor`
where each one is a column of the CSV.
All `IOTensor` types are either a subclass of `BaseIOTensor`,
or are a composite of a collection of `BaseIOTensor`.
The additional properties exposed by `BaseIOTensor` are `shape`
and `dtype` associated with counterparts in `Tensor`.
"""
def __init__(self, spec, function, internal=False):
# function used for dataset should not be partitioned.
self._function = function
super().__init__(spec, internal=internal)
# =============================================================================
# Accessors
# =============================================================================
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of the tensor."""
return self.spec.shape
@property
def dtype(self):
"""Returns the `dtype` of elements in the tensor."""
return self.spec.dtype
# =============================================================================
# Indexing & Slicing
# =============================================================================
def __getitem__(self, key):
"""Returns the specified piece of this IOTensor."""
# Find out the indices based on length and key,
# based on python slice()'s indices method:
index = key if isinstance(key, slice) else slice(key, key + 1)
items = self._function(start=index.start, stop=index.stop)
return tf.squeeze(items, axis=[0]) if items.shape[0] == 1 else items
def __len__(self):
"""Returns the total number of items of this IOTensor."""
return self._function.length
# =============================================================================
# Windowing
# =============================================================================
def window(self, size):
"""Returns the sliding window of this IOTensor."""
spec = tf.TensorSpec(
tf.TensorShape(self._function.length - size + 1).concatenate(size),
self.dtype,
)
class _Function:
"""_Function"""
def __init__(self, func, spec, size):
self._func = func
self._spec = spec
self._size = size
self._length = self._spec.shape[0]
def __call__(self, start, stop):
start, stop, _ = slice(start, stop).indices(self._length)
if start >= self._length:
raise IndexError("index %s is out of range" % slice(start, stop))
return tf.reshape(
tf.image.extract_patches(
tf.reshape(
self._func(start, stop + self._size - 1),
[1, 1, stop + self._size - 1 - start, 1],
),
sizes=[1, 1, self._size, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding="VALID",
),
self._spec.shape,
)
return BaseIOTensor(spec, _Function(self._function, spec, size), internal=True)
# =============================================================================
# Tensor Type Conversions
# =============================================================================
def to_tensor(self, **kwargs):
"""Converts this `IOTensor` into a `tf.Tensor`.
Example:
```python
```
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `Tensor` with value obtained from this `IOTensor`.
"""
with tf.name_scope(kwargs.get("name", "IOToTensor")):
return self.__getitem__(slice(None, None))
class ScalarIOTensor(BaseIOTensor):
"""ScalarIOTensor
A `ScalarIOTensor` is an `IOTensor` from a scalar `Tensor`.
"""
def __init__(self, spec, tensor, internal=False):
tensor = tf.convert_to_tensor(tensor)
self._tensor = tensor
super().__init__(spec, None, internal=internal)
# =============================================================================
# Tensor Type Conversions
# =============================================================================
def to_tensor(self, **kwargs):
"""Converts this `IOTensor` into a `tf.Tensor`.
Example:
```python
```
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `Tensor` with value obtained from this `IOTensor`.
"""
with tf.name_scope(kwargs.get("name", "IOToTensor")):
return self._tensor
class TensorIOTensor(BaseIOTensor):
"""TensorIOTensor
A `TensorIOTensor` is an `IOTensor` from a regular `Tensor`.
"""
def __init__(self, tensor, internal=False):
tensor = tf.convert_to_tensor(tensor)
class _Function:
"""_Function"""
def __init__(self, tensor):
self._tensor = tensor
self._base_start = [0 for _ in tensor.shape.as_list()]
self._base_size = [-1 for _ in tensor.shape.as_list()]
self._length = tensor.shape[0]
def __call__(self, start, stop):
start, stop, _ = slice(start, stop).indices(self._length)
if start >= self._length:
raise IndexError("index %s is out of range" % slice(start, stop))
slice_start = self._base_start
slice_size = self._base_size
slice_start[0] = start
slice_size[0] = stop - start
return tf.slice(self._tensor, slice_start, slice_size)
@property
def length(self):
return self._length
self._tensor = tensor
super().__init__(
tf.TensorSpec(tensor.shape, tensor.dtype),
_Function(tensor),
internal=internal,
)
# =============================================================================
# Tensor Type Conversions
# =============================================================================
def to_tensor(self, **kwargs):
"""Converts this `IOTensor` into a `tf.Tensor`.
Example:
```python
```
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `Tensor` with value obtained from this `IOTensor`.
"""
with tf.name_scope(kwargs.get("name", "IOToTensor")):
return self._tensor
class _TableIOTensor(_IOTensor):
"""_TableIOTensor"""
def __init__(self, spec, columns, values, internal=False):
self._columns = columns
self._values = values
super().__init__(spec, internal=internal)
# =============================================================================
# Accessors
# =============================================================================
@property
def columns(self):
"""The names of columns"""
return self._columns
def __call__(self, column):
"""Return a BaseIOTensor with column named `column`"""
column_index = self.columns.index(next(e for e in self.columns if e == column))
return self._values[column_index]
class _CollectionIOTensor(_IOTensor):
"""_CollectionIOTensor
`CollectionIOTensor` is different from `TableIOTensor` in that each
component could have different shapes. While additional table-wide
operations are planned to be supported for `TableIOTensor` so that
the same operations could be applied to every column, there is no plan
to support the same in `CollectionIOTensor`. In other words,
`CollectionIOTensor` is only a dictionary with values consisting
of `BaseIOTensor`.
"""
def __init__(self, spec, keys, values, internal=False):
self._keys = keys
self._values = values
super().__init__(spec, internal=internal)
# =============================================================================
# Accessors
# =============================================================================
@property
def keys(self):
"""The names of columns"""
return self._keys
def __call__(self, key):
"""Return a BaseIOTensor with key named `key`"""
key_index = self.keys.index(next(e for e in self.keys if e == key))
return self._values[key_index]
class _SeriesIOTensor(_IOTensor):
"""_SeriesIOTensor"""
def __init__(self, spec, index, value, internal=False):
self._index = index
self._value = value
super().__init__(spec, internal=internal)
# =============================================================================
# Accessors
# =============================================================================
@property
def index(self):
"""The index column of the series"""
return self._index
@property
def value(self):
"""The value column of the series"""
return self._value
class _KeyValueIOTensor(_IOTensor):
"""_KeyValueIOTensor"""
def __init__(self, spec, function, iterable_init, iterable_next, internal=False):
self._function = function
self._iterable_init = iterable_init
self._iterable_next = iterable_next
super().__init__(spec, internal=internal)
# =============================================================================
# Iterator
# =============================================================================
def __iter__(self):
with tf.name_scope("KeyValueIOTensorIter"):
resource = self._iterable_init()
while True:
value = self._iterable_next(resource)
if tf.shape(value)[0].numpy() == 0:
return
yield value[0]
# =============================================================================
# Indexing
# =============================================================================
def __getitem__(self, key):
"""Returns the specified piece of this IOTensor."""
return self._function(key)
|
|
"""
Examples:
To smear an already smeared spectrum with a light yield of 200 to a
a light yield of 190 then the following lines are required::
>>> smearer = smear.SmearEnergySmearLY()
>>> ly = smearer.calc_smear_ly(190., cur_ly=200.)
>>> smearer.set_resolution(ly)
>>> smeared_spec = smearer.weighted_smear(spectrum)
.. note:: Similar methods are available in all other smearing classes.
"""
import numpy
import itertools
import copy
class Smear(object):
""" The base class for smearing spectra.
Args:
name (string): The name of the smearing class.
Attributes:
_name (string): name of the smeaing class.
_num_sigma (float): The width of the window in terms of number of sigma
you wish to apply weights to.
"""
def __init__(self, name):
""" Initialise the Smear class by seeding the random number generator.
"""
numpy.random.seed()
self._name = name
self._num_sigma = 5.
def calc_gaussian(self, x, mean, sigma):
""" Calculates the value of a gaussian whose integral is equal to
one at position x with a given mean and sigma.
Args:
x : Position to calculate the gaussian
mean : Mean of the gaussian
sigma : Sigma of the gaussian
Returns:
float: Value of the gaussian at the given position
"""
return (numpy.exp(-(x - mean) ** 2 / (2 * sigma ** 2)) /
(sigma*numpy.sqrt(2 * numpy.pi)))
def get_bin_mean(self, low, bin, width):
""" Calculates the mean value of a bin.
Args:
low (float): The lower bound value of the parameter.
bin (int): The number of the bin you wish to calculate the mean of.
width (float): The width of the bin.
Returns:
float: The mean value of the bin.
"""
return low + (bin + 0.5)*width
def get_num_sigma(self):
""" Returns the width of the window in terms of number of sigma
you wish to apply weights to.
Returns:
float: The number of sigma.
"""
return self._num_sigma
def set_num_sigma(self, num_sigma):
""" Sets the width of the window in terms of number of sigma
you wish to apply weights to.
Args:
num_sigma (float): The number of sigma you wish to apply weights to.
Raises:
ValueError: If the number of sigma is zero or negative.
"""
if (num_sigma > 0.):
self._num_sigma = float(num_sigma)
else:
raise ValueError("%s is an invalid num_sigma. Value must be "
"greater than zero." % num_sigma)
def get_bounds(self, mean, sigma):
""" Calculates the boundaries you wish to apply the smearing
weights to.
Args:
mean (float): The mean value you are smearing.
sigma (float): The sigma of the gaussian you are using to smear.
Returns:
tuple: First value of the tuple is the lower bound. The second is
the upper bound.
"""
low = mean - self._num_sigma*sigma
high = mean + self._num_sigma*sigma
return low, high
class EnergySmearLY(Smear):
""" The class which smears energy. It accepts resolution in terms of light
yield (LY) in units of NHit per MeV.
Args:
poisson (bool): If True, use poisson smearing.
Attributes:
_light_yield (float): The light yield of the scintillator in NHits per
MeV.
_poisson_smear (Bool): True if poisson smearing is to be applied. False
if gaussian smearing is to be applied.
"""
def __init__(self, poisson=True):
""" Initialises the class.
"""
super(EnergySmearLY, self).__init__("energy_light_yield")
self._poisson_smear = poisson
self._light_yield = 200 # Nhit/MeV
self._log_factorial = {}
def calc_poisson_energy(self, x, lamb):
""" Calculates the value of a poisson whose integral is equal to
one at position x with a given lambda value.
Args:
x : Number of events
lamb : Lambda of the poisson
Returns:
float: The value of the poisson at the given position
"""
photons = int(x*self._light_yield)
expected = lamb*self._light_yield
if photons not in self._log_factorial:
self._log_factorial[photons] = (
numpy.sum(numpy.log(numpy.arange(1, (photons+1)))))
log_pois = (photons*numpy.log(expected) -
self._log_factorial[photons] -
expected)
return numpy.exp(log_pois)
def calc_smear_ly(self, new_ly, cur_ly=None):
"""Calculates the value of light yield (ly) required to smear a
data set which has already been smeared with a light yield of cur_ly
to achieve a smeared data set with a new light yield of new_ly.
Args:
new_ly (float): The value of light yield wanted for the smeared PDF.
cur_ly (float, optional): Current value of light yield the PDF
has been convolved with from the true value PDF.
Raises:
ValueError: If new_ly is smaller than cur_sigma. Can't smear to
higher light yields (smaller sigmas)
Returns:
float: The value of light yield needed to smear the current
PDF to obtain a new light yield: new_ly.
"""
if not cur_ly:
cur_ly = self.get_resolution()
if new_ly > cur_ly:
raise ValueError("New light yield must be smaller than the"
"current light yield. cur_ly: %s. new_ly: %s."
% (cur_ly, new_ly))
return new_ly*cur_ly/(cur_ly-new_ly)
def get_resolution(self):
""" Returns the light yield.
Returns:
float: The light yield.
"""
return self._light_yield
def get_sigma(self, energy):
""" Calculates sigma at a given energy.
Args:
energy (float): Energy value of data point(s)
Returns:
float: Sigma equivalent to sqrt(energy/_light_yield)
"""
return numpy.sqrt(energy/self._light_yield)
def set_resolution(self, light_yield):
""" Sets the light yield
Args:
light_yield (float): The value you wish to set the light yield to.
Raises:
ValueError: If the light yield is zero or negative.
"""
if light_yield > 0.:
self._light_yield = float(light_yield)
else:
raise ValueError("%s is an invalid light yield. Light yield "
"must be greater than zero.")
def weighted_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
calculating a Gaussian PDF for each bin. Weights are then
applied to a window of width specified by the number of sigma
depending on the value of the Gaussian PDF at the mean of the
bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish
to smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = spectrum._name + "_ly" + str(self._light_yield)
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
low = None
high = None
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
sigma = self.get_sigma(mean)
low, high = self.get_bounds(mean, sigma)
low = spectrum.get_config().get_par(
par).round(low - 0.5 * widths[i]) + 0.5 * widths[i]
high = spectrum.get_config().get_par(
par).round(high + 0.5 * widths[i]) + \
0.5 * widths[i]
if low < spectrum.get_config().get_par(par)._low:
low = spectrum.get_config().get_par(par)._low + \
0.5 * widths[i]
if high > spectrum.get_config().get_par(par)._high:
high = spectrum.get_config().get_par(par)._high - \
0.5 * widths[i]
weights = []
for energy in numpy.arange(low, high, widths[i]):
if self._poisson_smear is True:
weights.append(self.calc_poisson_energy(energy,
mean))
else:
weights.append(self.calc_gaussian(energy,
mean,
sigma))
else:
data[par_names[i]] = mean
total_weight = sum(weights)
i = 0
for energy in numpy.arange(low, high, widths[idx]):
data[par] = energy
smeared_spec.fill(weight=entries*weights[i]/total_weight,
**data)
i += 1
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
def random_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
generating a number of random points from Gaussian PDF generated
from that bins mean value and the corresponding sigma. The number
of points generated is equivalent to the number of entries in that
bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = spectrum._name + "_ly" + str(self._light_yield)
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = int(spectrum._data[bin])
if entries:
data = {}
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
mean_e = mean
sigma = self.get_sigma(mean)
else:
data[par_names[i]] = mean
for i in range(entries):
if self._poisson_smear is True:
photons = (numpy.fabs
(numpy.random.poisson(mean_e *
self._light_yield)))
data[par] = photons / float(self._light_yield)
else:
data[par] = (numpy.fabs
(numpy.random.normal(mean_e, sigma)))
try:
smeared_spec.fill(**data)
except ValueError:
print "WARNING: Smeared energy out of bounds. Skipping"
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
class EnergySmearRes(Smear):
""" Allows you to smear directly by supplied energy resolution
(in :math:`\sqrt{MeV}`).
Inherits from :class:`Smear`
Args:
poisson (bool): If True, use poisson smearing.
Attributes:
_energy_resolution (float): Energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`.
_poisson_smear (Bool): True if poisson smearing is to be applied.
False if gaussian smearing is to be applied.
"""
def __init__(self, poisson=True):
""" Initialise the class
"""
super(EnergySmearRes, self).__init__("energy_resolution")
self._poisson_smear = poisson
self._light_yield = 200 # Nhit/MeV
self._log_factorial = {}
def calc_poisson_energy(self, x, lamb):
""" Calculates the value of a poisson whose integral is equal to
one at position x with a given lambda value.
Args:
x : Number of events
lamb : Lambda of the poisson
Returns:
float: The value of the poisson at the given position
"""
photons = int(x*self._light_yield)
expected = lamb*self._light_yield
if photons not in self._log_factorial:
self._log_factorial[photons] = (
numpy.sum(numpy.log(numpy.arange(1, (photons+1)))))
log_pois = (photons*numpy.log(expected) -
self._log_factorial[photons] -
expected)
return numpy.exp(log_pois)
def calc_smear_resoluton(self, new_res, cur_res=None):
"""Calculates the value of resolution required to smear a data set
which has already been smeared with a resolution of cur_res to
achieve a new resolution of new_res.
Args:
new_res (float): The value of resolution wanted for the smeared PDF.
cur_res (float, optional): Current value of resolution the PDF
has been convolved with from the true value PDF.
Raises:
ValueError: If new_res is smaller than cur_sigma. Can't smear to
higher resolutions (smaller sigmas)
Returns:
float: The value of resolution needed to smear the current
PDF to obtain a new resolution with sigma value new_res.
"""
if not cur_res:
cur_res = self.get_resolution()
if new_res < cur_res:
raise ValueError("New resolution must be larger than the"
"current resolution. cur_res: %s. new_res: %s."
% (cur_res, new_res))
return numpy.fabs(numpy.sqrt(new_res**2 - cur_res**2))
def get_resolution(self):
""" Get the energy resolution
Returns:
float: Energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`
"""
return self._resolution
def get_sigma(self, energy):
""" Calculates sigma at a given energy.
Args:
energy (float): Energy value of data point(s)
Returns:
float: Sigma (MeV) equivalent to energy_resolution *
:math:`\sqrt{energy}`
"""
return self._resolution * numpy.power(energy, (1. / 2.))
def set_resolution(self, resolution):
""" Set the energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`.
Args:
resolution (float): Energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`.
Raises:
ValueError: If the resolution is not between 0 and 1.
"""
if (resolution > 0. and resolution < 1.):
self._resolution = resolution
else:
raise ValueError("%s is an invalid energy resolution. Value "
"must be between 0. and 1." % resolution)
def weighted_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
calculating a Gaussian PDF for each bin. Weights are then applied
to a window of width specified by the number of sigma depending on
the value of the Gaussian PDF at the mean of the bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "%")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
low = None
high = None
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
sigma = self.get_sigma(mean)
low, high = self.get_bounds(mean, sigma)
low = spectrum.get_config().get_par(
par).round(low - 0.5 * widths[i]) + 0.5 * widths[i]
high = spectrum.get_config().get_par(
par).round(high + 0.5 * widths[i]) + \
0.5 * widths[i]
if low < spectrum.get_config().get_par(par)._low:
low = spectrum.get_config().get_par(par)._low + \
0.5 * widths[i]
if high > spectrum.get_config().get_par(par)._high:
high = spectrum.get_config().get_par(par)._high - \
0.5 * widths[i]
weights = []
for energy in numpy.arange(low, high, widths[i]):
if self._poisson_smear is True:
weights.append(self.calc_poisson_energy(energy,
mean))
else:
weights.append(self.calc_gaussian(energy,
mean,
sigma))
else:
data[par_names[i]] = mean
total_weight = sum(weights)
i = 0
for energy in numpy.arange(low, high, widths[idx]):
data[par] = energy
smeared_spec.fill(weight=entries*weights[i]/total_weight,
**data)
i += 1
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
def random_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
generating a number of random points from Gaussian PDF
generated from that bins mean value and the corresponding
sigma. The number of points generated is equivalent to the
number of entries in that bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "%")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = int(spectrum._data[bin])
if entries:
data = {}
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
mean_e = mean
sigma = self.get_sigma(mean)
else:
data[par_names[i]] = mean
for i in range(entries):
if self._poisson_smear is True:
photons = (numpy.fabs
(numpy.random.poisson(mean_e *
self._light_yield)))
data[par] = photons / float(self._light_yield)
else:
data[par] = (numpy.fabs
(numpy.random.normal(mean_e, sigma)))
try:
smeared_spec.fill(**data)
except ValueError:
print "WARNING: Smeared energy out of bounds. Skipping"
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
class RadialSmear(Smear):
""" The class which smears the radius. It accepts resolution in terms of
sigma in units of mm.
Args:
poisson (bool): If True, use poisson smearing.
Attributes:
_resolution (float): The position resolution (mm).
_poisson_smear (Bool): True if poisson smearing is to be applied.
False if gaussian smearing is to be applied.
"""
def __init__(self):
""" Initialises the class.
"""
super(RadialSmear, self).__init__("radial")
self._resolution = 100. # mm
def calc_smear_resoluton(self, new_res, cur_res=None):
"""Calculates the value of resolution required to smear a data set
which has already been smeared with a resolution of cur_res to
achieve a new resolution of new_res.
Args:
new_res (float): The value of resolution wanted for the smeared PDF.
cur_res (float, optional): Current value of resolution the PDF
has been convolved with from the true value PDF.
Raises:
ValueError: If new_res is smaller than cur_sigma. Can't smear to
higher resolutions (smaller sigmas)
Returns:
float: The value of resolution needed to smear the current
PDF to obtain a new resolution: new_res.
"""
if not cur_res:
cur_res = self.get_resolution()
if new_res < cur_res:
raise ValueError("New resolution must be larger than the"
"current resolution. cur_res: %s. new_res: %s."
% (cur_res, new_res))
return numpy.fabs(numpy.sqrt(new_res**2 - cur_res**2))
def get_resolution(self):
"""Gets the position resolution.
Returns:
float: Position resolution.
"""
return self._resolution
def set_resolution(self, resolution):
"""Sets the position resolution:
Raises:
ValueError: If resolution is zero or less.
Args:
resolution (float): Position resolution in mm.
"""
if resolution > 0:
self._resolution = resolution
else:
raise ValueError("%s is an incorrect position resolutioin. Value "
"must be greater than zero." % resolution)
def get_sigma(self):
"""Sigma and resolution are equivalent for radial dimensions
currently. This function calls self.get_resolution()
Returns:
float: Sigma in mm equivalent to resolution
"""
return self.get_resolution()
def weighted_smear(self, spectrum, par="radial_mc"):
""" Smears the radius of a :class:`spectra.Spectra` by
calculating a Gaussian PDF for each bin. Weights are then
applied to a window of width specified by the number of sigma
depending on the value of the Gaussian PDF at the mean of the
bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is radial_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "mm")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
low = None
high = None
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
sigma = self.get_sigma()
low, high = self.get_bounds(mean, sigma)
low = spectrum.get_config().get_par(
par).round(low - 0.5 * widths[i]) + 0.5 * widths[i]
high = spectrum.get_config().get_par(
par).round(high + 0.5 * widths[i]) + \
0.5 * widths[i]
if low < spectrum.get_config().get_par(par)._low:
low = spectrum.get_config().get_par(par)._low + \
0.5 * widths[i]
if high > spectrum.get_config().get_par(par)._high:
high = spectrum.get_config().get_par(par)._high - \
0.5 * widths[i]
weights = []
for radius in numpy.arange(low, high, widths[i]):
weights.append(self.calc_gaussian(radius,
mean,
sigma))
else:
data[par_names[i]] = mean
total_weight = sum(weights)
i = 0
for radius in numpy.arange(low, high, widths[idx]):
data[par] = radius
smeared_spec.fill(weight=entries*weights[i]/total_weight,
**data)
i += 1
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
def random_smear(self, spectrum, par="radial_mc"):
""" Smears the radius of a :class:`spectra.Spectra` by
generating a number of random points from Gaussian PDF
generated from that bins mean value and the corresponding
sigma. The number of points generated is equivalent to the
number of entries in that bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is radial_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "mm")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
mean_r = mean
sigma = self.get_sigma()
else:
data[par_names[i]] = mean
for i in range(entries):
data[par] = numpy.fabs(numpy.random.normal(mean_r, sigma))
try:
smeared_spec.fill(**data)
except ValueError:
print "WARNING: Smeared radius out of bounds. Skipping"
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
|
|
## @file
# This file is for installed package information database operations
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
Dependency
'''
##
# Import Modules
#
from os.path import dirname
import os
import Logger.Log as Logger
from Logger import StringTable as ST
from Library.Parsing import GetWorkspacePackage
from Library.Parsing import GetWorkspaceModule
from Library.Parsing import GetPkgInfoFromDec
from Library.Misc import GetRelativePath
from Library import GlobalData
from Logger.ToolError import FatalError
from Logger.ToolError import EDK1_INF_ERROR
from Logger.ToolError import UNKNOWN_ERROR
(DEPEX_CHECK_SUCCESS, DEPEX_CHECK_MODULE_NOT_FOUND, \
DEPEX_CHECK_PACKAGE_NOT_FOUND, DEPEX_CHECK_DP_NOT_FOUND) = (0, 1, 2, 3)
## DependencyRules
#
# This class represents the dependency rule check mechanism
#
# @param object: Inherited from object class
#
class DependencyRules(object):
def __init__(self, Datab, ToBeInstalledPkgList=None):
self.IpiDb = Datab
self.WsPkgList = GetWorkspacePackage()
self.WsModuleList = GetWorkspaceModule()
self.PkgsToBeDepend = [(PkgInfo[1], PkgInfo[2]) for PkgInfo in self.WsPkgList]
# Add package info from the DIST to be installed.
self.PkgsToBeDepend.extend(self.GenToBeInstalledPkgList(ToBeInstalledPkgList))
def GenToBeInstalledPkgList(self, ToBeInstalledPkgList):
if not ToBeInstalledPkgList:
return []
RtnList = []
for Dist in ToBeInstalledPkgList:
for Package in Dist.PackageSurfaceArea:
RtnList.append((Package[0], Package[1]))
return RtnList
## Check whether a module exists by checking the Guid+Version+Name+Path combination
#
# @param Guid: Guid of a module
# @param Version: Version of a module
# @param Name: Name of a module
# @param Path: Path of a module
# @return: True if module existed, else False
#
def CheckModuleExists(self, Guid, Version, Name, Path):
Logger.Verbose(ST.MSG_CHECK_MODULE_EXIST)
ModuleList = self.IpiDb.GetModInPackage(Guid, Version, Name, Path)
ModuleList.extend(self.IpiDb.GetStandaloneModule(Guid, Version, Name, Path))
Logger.Verbose(ST.MSG_CHECK_MODULE_EXIST_FINISH)
if len(ModuleList) > 0:
return True
else:
return False
## Check whether a module depex satisfied.
#
# @param ModuleObj: A module object
# @param DpObj: A distribution object
# @return: True if module depex satisfied
# False else
#
def CheckModuleDepexSatisfied(self, ModuleObj, DpObj=None):
Logger.Verbose(ST.MSG_CHECK_MODULE_DEPEX_START)
Result = True
Dep = None
if ModuleObj.GetPackageDependencyList():
Dep = ModuleObj.GetPackageDependencyList()[0]
for Dep in ModuleObj.GetPackageDependencyList():
#
# first check whether the dependency satisfied by current workspace
#
Exist = self.CheckPackageExists(Dep.GetGuid(), Dep.GetVersion())
#
# check whether satisfied by current distribution
#
if not Exist:
if DpObj is None:
Result = False
break
for GuidVerPair in DpObj.PackageSurfaceArea.keys():
if Dep.GetGuid() == GuidVerPair[0]:
if Dep.GetVersion() is None or \
len(Dep.GetVersion()) == 0:
Result = True
break
if Dep.GetVersion() == GuidVerPair[1]:
Result = True
break
else:
Result = False
break
if not Result:
Logger.Error("CheckModuleDepex", UNKNOWN_ERROR, \
ST.ERR_DEPENDENCY_NOT_MATCH % (ModuleObj.GetName(), \
Dep.GetPackageFilePath(), \
Dep.GetGuid(), \
Dep.GetVersion()))
return Result
## Check whether a package exists in a package list specified by PkgsToBeDepend.
#
# @param Guid: Guid of a package
# @param Version: Version of a package
# @return: True if package exist
# False else
#
def CheckPackageExists(self, Guid, Version):
Logger.Verbose(ST.MSG_CHECK_PACKAGE_START)
Found = False
for (PkgGuid, PkgVer) in self.PkgsToBeDepend:
if (PkgGuid == Guid):
#
# if version is not empty and not equal, then not match
#
if Version and (PkgVer != Version):
Found = False
break
else:
Found = True
break
else:
Found = False
Logger.Verbose(ST.MSG_CHECK_PACKAGE_FINISH)
return Found
## Check whether a package depex satisfied.
#
# @param PkgObj: A package object
# @param DpObj: A distribution object
# @return: True if package depex satisified
# False else
#
def CheckPackageDepexSatisfied(self, PkgObj, DpObj=None):
ModuleDict = PkgObj.GetModuleDict()
for ModKey in ModuleDict.keys():
ModObj = ModuleDict[ModKey]
if self.CheckModuleDepexSatisfied(ModObj, DpObj):
continue
else:
return False
return True
## Check whether a DP exists.
#
# @param Guid: Guid of a Distribution
# @param Version: Version of a Distribution
# @return: True if Distribution exist
# False else
def CheckDpExists(self, Guid, Version):
Logger.Verbose(ST.MSG_CHECK_DP_START)
DpList = self.IpiDb.GetDp(Guid, Version)
if len(DpList) > 0:
Found = True
else:
Found = False
Logger.Verbose(ST.MSG_CHECK_DP_FINISH)
return Found
## Check whether a DP depex satisfied by current workspace for Install
#
# @param DpObj: A distribution object
# @return: True if distribution depex satisfied
# False else
#
def CheckInstallDpDepexSatisfied(self, DpObj):
return self.CheckDpDepexSatisfied(DpObj)
# # Check whether multiple DP depex satisfied by current workspace for Install
#
# @param DpObjList: A distribution object list
# @return: True if distribution depex satisfied
# False else
#
def CheckTestInstallPdDepexSatisfied(self, DpObjList):
for DpObj in DpObjList:
if self.CheckDpDepexSatisfied(DpObj):
for PkgKey in DpObj.PackageSurfaceArea.keys():
PkgObj = DpObj.PackageSurfaceArea[PkgKey]
self.PkgsToBeDepend.append((PkgObj.Guid, PkgObj.Version))
else:
return False, DpObj
return True, DpObj
## Check whether a DP depex satisfied by current workspace
# (excluding the original distribution's packages to be replaced) for Replace
#
# @param DpObj: A distribution object
# @param OrigDpGuid: The original distribution's Guid
# @param OrigDpVersion: The original distribution's Version
#
def ReplaceCheckNewDpDepex(self, DpObj, OrigDpGuid, OrigDpVersion):
self.PkgsToBeDepend = [(PkgInfo[1], PkgInfo[2]) for PkgInfo in self.WsPkgList]
OrigDpPackageList = self.IpiDb.GetPackageListFromDp(OrigDpGuid, OrigDpVersion)
for OrigPkgInfo in OrigDpPackageList:
Guid, Version = OrigPkgInfo[0], OrigPkgInfo[1]
if (Guid, Version) in self.PkgsToBeDepend:
self.PkgsToBeDepend.remove((Guid, Version))
return self.CheckDpDepexSatisfied(DpObj)
## Check whether a DP depex satisfied by current workspace.
#
# @param DpObj: A distribution object
#
def CheckDpDepexSatisfied(self, DpObj):
for PkgKey in DpObj.PackageSurfaceArea.keys():
PkgObj = DpObj.PackageSurfaceArea[PkgKey]
if self.CheckPackageDepexSatisfied(PkgObj, DpObj):
continue
else:
return False
for ModKey in DpObj.ModuleSurfaceArea.keys():
ModObj = DpObj.ModuleSurfaceArea[ModKey]
if self.CheckModuleDepexSatisfied(ModObj, DpObj):
continue
else:
return False
return True
## Check whether a DP could be removed from current workspace.
#
# @param DpGuid: File's guid
# @param DpVersion: File's version
# @retval Removable: True if distribution could be removed, False Else
# @retval DependModuleList: the list of modules that make distribution can not be removed
#
def CheckDpDepexForRemove(self, DpGuid, DpVersion):
Removable = True
DependModuleList = []
WsModuleList = self.WsModuleList
#
# remove modules that included in current DP
# List of item (FilePath)
DpModuleList = self.IpiDb.GetDpModuleList(DpGuid, DpVersion)
for Module in DpModuleList:
if Module in WsModuleList:
WsModuleList.remove(Module)
else:
Logger.Warn("UPT\n",
ST.ERR_MODULE_NOT_INSTALLED % Module)
#
# get packages in current Dp and find the install path
# List of item (PkgGuid, PkgVersion, InstallPath)
DpPackageList = self.IpiDb.GetPackageListFromDp(DpGuid, DpVersion)
DpPackagePathList = []
WorkSP = GlobalData.gWORKSPACE
for (PkgName, PkgGuid, PkgVersion, DecFile) in self.WsPkgList:
if PkgName:
pass
DecPath = dirname(DecFile)
if DecPath.find(WorkSP) > -1:
InstallPath = GetRelativePath(DecPath, WorkSP)
DecFileRelaPath = GetRelativePath(DecFile, WorkSP)
else:
InstallPath = DecPath
DecFileRelaPath = DecFile
if (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
DpPackagePathList.append(DecFileRelaPath)
DpPackageList.remove((PkgGuid, PkgVersion, InstallPath))
#
# the left items in DpPackageList are the packages that installed but not found anymore
#
for (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
Logger.Warn("UPT",
ST.WARN_INSTALLED_PACKAGE_NOT_FOUND%(PkgGuid, PkgVersion, InstallPath))
#
# check modules to see if has dependency on package of current DP
#
for Module in WsModuleList:
if (not VerifyRemoveModuleDep(Module, DpPackagePathList)):
Removable = False
DependModuleList.append(Module)
return (Removable, DependModuleList)
## Check whether a DP could be replaced by a distribution containing NewDpPkgList
# from current workspace.
#
# @param OrigDpGuid: original Dp's Guid
# @param OrigDpVersion: original Dp's version
# @param NewDpPkgList: a list of package information (Guid, Version) in new Dp
# @retval Replaceable: True if distribution could be replaced, False Else
# @retval DependModuleList: the list of modules that make distribution can not be replaced
#
def CheckDpDepexForReplace(self, OrigDpGuid, OrigDpVersion, NewDpPkgList):
Replaceable = True
DependModuleList = []
WsModuleList = self.WsModuleList
#
# remove modules that included in current DP
# List of item (FilePath)
DpModuleList = self.IpiDb.GetDpModuleList(OrigDpGuid, OrigDpVersion)
for Module in DpModuleList:
if Module in WsModuleList:
WsModuleList.remove(Module)
else:
Logger.Warn("UPT\n",
ST.ERR_MODULE_NOT_INSTALLED % Module)
OtherPkgList = NewDpPkgList
#
# get packages in current Dp and find the install path
# List of item (PkgGuid, PkgVersion, InstallPath)
DpPackageList = self.IpiDb.GetPackageListFromDp(OrigDpGuid, OrigDpVersion)
DpPackagePathList = []
WorkSP = GlobalData.gWORKSPACE
for (PkgName, PkgGuid, PkgVersion, DecFile) in self.WsPkgList:
if PkgName:
pass
DecPath = dirname(DecFile)
if DecPath.find(WorkSP) > -1:
InstallPath = GetRelativePath(DecPath, WorkSP)
DecFileRelaPath = GetRelativePath(DecFile, WorkSP)
else:
InstallPath = DecPath
DecFileRelaPath = DecFile
if (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
DpPackagePathList.append(DecFileRelaPath)
DpPackageList.remove((PkgGuid, PkgVersion, InstallPath))
else:
OtherPkgList.append((PkgGuid, PkgVersion))
#
# the left items in DpPackageList are the packages that installed but not found anymore
#
for (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
Logger.Warn("UPT",
ST.WARN_INSTALLED_PACKAGE_NOT_FOUND%(PkgGuid, PkgVersion, InstallPath))
#
# check modules to see if it can be satisfied by package not belong to removed DP
#
for Module in WsModuleList:
if (not VerifyReplaceModuleDep(Module, DpPackagePathList, OtherPkgList)):
Replaceable = False
DependModuleList.append(Module)
return (Replaceable, DependModuleList)
## check whether module depends on packages in DpPackagePathList, return True
# if found, False else
#
# @param Path: a module path
# @param DpPackagePathList: a list of Package Paths
# @retval: False: module depends on package in DpPackagePathList
# True: module doesn't depend on package in DpPackagePathList
#
def VerifyRemoveModuleDep(Path, DpPackagePathList):
try:
for Item in GetPackagePath(Path):
if Item in DpPackagePathList:
DecPath = os.path.normpath(os.path.join(GlobalData.gWORKSPACE, Item))
Logger.Info(ST.MSG_MODULE_DEPEND_ON % (Path, DecPath))
return False
else:
return True
except FatalError as ErrCode:
if ErrCode.message == EDK1_INF_ERROR:
Logger.Warn("UPT",
ST.WRN_EDK1_INF_FOUND%Path)
return True
else:
return True
# # GetPackagePath
#
# Get Dependency package path from an Inf file path
#
def GetPackagePath(InfPath):
PackagePath = []
if os.path.exists(InfPath):
FindSection = False
for Line in open(InfPath).readlines():
Line = Line.strip()
if not Line:
continue
if Line.startswith('#'):
continue
if Line.startswith('[Packages') and Line.endswith(']'):
FindSection = True
continue
if Line.startswith('[') and Line.endswith(']') and FindSection:
break
if FindSection:
PackagePath.append(os.path.normpath(Line))
return PackagePath
## check whether module depends on packages in DpPackagePathList and can not be satisfied by OtherPkgList
#
# @param Path: a module path
# @param DpPackagePathList: a list of Package Paths
# @param OtherPkgList: a list of Package Information (Guid, Version)
# @retval: False: module depends on package in DpPackagePathList and can not be satisfied by OtherPkgList
# True: either module doesn't depend on DpPackagePathList or module depends on DpPackagePathList
# but can be satisfied by OtherPkgList
#
def VerifyReplaceModuleDep(Path, DpPackagePathList, OtherPkgList):
try:
for Item in GetPackagePath(Path):
if Item in DpPackagePathList:
DecPath = os.path.normpath(os.path.join(GlobalData.gWORKSPACE, Item))
Name, Guid, Version = GetPkgInfoFromDec(DecPath)
if (Guid, Version) not in OtherPkgList:
Logger.Info(ST.MSG_MODULE_DEPEND_ON % (Path, DecPath))
return False
else:
return True
except FatalError as ErrCode:
if ErrCode.message == EDK1_INF_ERROR:
Logger.Warn("UPT",
ST.WRN_EDK1_INF_FOUND%Path)
return True
else:
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.