gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import os
import re
import cx_Oracle
import collections
import datetime
import math
earDiagnosisCode = [6100,6200,6201,6202,6204,6205,6207,6209,6210,6211,6260]
#Primary query, Grab all processed contentions and order by particpant id and then claim date descending
SQL="select * \
from EAR_AGGREGATE_CONTENTION \
order by VET_ID,CLAIM_DATE"
class EarFeatureVector:
def __init__(self):
self.VET_ID = None
self.CLAIM_ID = None
self.DOB = 0
self.CLAIMANT_AGE = None
self.END_PRODUCT_CODE = None
self.RO_NUMBER = 0
self.CLAIM_DATE = None
self.PROFILE_DATE = None
self.PROMULGATION_DATE = None
self.RECENT_EAR_DATE = None
self.CONTENTION_COUNT = 0
self.EAR_CONTENTION_COUNT = 0
self.PRIOR_EAR_CDD = 0
self.QUANT_PRIOR_EAR_CDD = 0
self.CURR_EAR_CDD = 0
self.QUANT_EAR_CDD = 0
self.CLAIM_AGE = 0
self.EAR_CDD_AGE = 0
self.EAR_CLAIM_COUNT = 0
self.C2200 = 0
self.C2210 = 0
self.C2220 = 0
self.C3140 = 0
self.C3150 = 0
self.C4130 = 0
self.C4210 = 0
self.C4700 = 0
self.C4920 = 0
self.C5000 = 0
self.C5010 = 0
self.C5710 = 0
self.C6850 = 0
self.A6100 = 0
self.A6200 = 0
self.A6201 = 0
self.A6202 = 0
self.A6204 = 0
self.A6205 = 0
self.A6207 = 0
self.A6209 = 0
self.A6210 = 0
self.A6211 = 0
self.A6260 = 0
self.CONTENTION_LOSS = 0
self.CONTENTION_TINITU = 0
self.DECISION_LOSS = 0
self.DECISION_TINITU = 0
def __str__(self):
from pprint import pprint
return str(vars(self))
class AggregateDecision:
def __init__(self,VET_ID,PROFILE_DATE,PROMULGATION_DATE,RECENT_EAR_DATE,CDD,EAR_CDD,A6100,A6200,A6201,A6202,A6204,A6205,A6207,A6209,A6210,A6211,A6260,TXT_LOSS,TXT_TINITU):
self.VET_ID = VET_ID
self.PROFILE_DATE = PROFILE_DATE
self.PROMULGATION_DATE = PROMULGATION_DATE
self.RECENT_EAR_DATE = RECENT_EAR_DATE
self.CDD = CDD
self.EAR_CDD = EAR_CDD
self.A6100 = A6100
self.A6200 = A6200
self.A6201 = A6201
self.A6202 = A6202
self.A6204 = A6204
self.A6205 = A6205
self.A6207 = A6207
self.A6209 = A6209
self.A6210 = A6210
self.A6211 = A6211
self.A6260 = A6260
self.TXT_LOSS = TXT_LOSS
self.TXT_TINITU = TXT_TINITU
def __str__(self):
from pprint import pprint
return str(vars(self))
class AggregateContention:
def __init__(self,VET_ID,CLAIM_ID,DOB,END_PRODUCT_CODE,RO_NUMBER,CLAIM_DATE,MAX_PROFILE_DATE,CONTENTION_COUNT,EAR_CONTENTION_COUNT,C2200,C2210,C2220,C3140,C3150,C4130,C4210,C4700,C4920,C5000,C5010,C5710,C6850,TXT_LOSS,TXT_TINITU):
self.VET_ID = VET_ID
self.CLAIM_ID = CLAIM_ID
self.DOB = DOB
self.END_PRODUCT_CODE = END_PRODUCT_CODE
self.RO_NUMBER = RO_NUMBER
self.CLAIM_DATE = CLAIM_DATE
self.MAX_PROFILE_DATE = MAX_PROFILE_DATE
self.CONTENTION_COUNT = CONTENTION_COUNT
self.EAR_CONTENTION_COUNT = EAR_CONTENTION_COUNT
self.C2200 = C2200
self.C2210 = C2210
self.C2220 = C2220
self.C3140 = C3140
self.C3150 = C3150
self.C4130 = C4130
self.C4210 = C4210
self.C4700 = C4700
self.C4920 = C4920
self.C5000 = C5000
self.C5010 = C5010
self.C5710 = C5710
self.C6850 = C6850
self.TXT_LOSS = TXT_LOSS
self.TXT_TINITU = TXT_TINITU
def __str__(self):
from pprint import pprint
return str(vars(self))
print(str(datetime.datetime.now()))
connection = cx_Oracle.connect('developer/[email protected]:1521/DEV.BCDSS')
writeCursor = connection.cursor()
writeCursor.prepare('INSERT INTO DEVELOPER.EAR_FEATURE_VECTOR (VET_ID, CLAIM_ID, CLAIMANT_AGE, DOB, END_PRODUCT_CODE, RO_NUMBER, CLAIM_DATE, PROFILE_DATE, PROMULGATION_DATE, RECENT_EAR_DATE, CONTENTION_COUNT, EAR_CONTENTION_COUNT, PRIOR_EAR_CDD, QUANT_PRIOR_EAR_CDD, CURR_EAR_CDD, QUANT_EAR_CDD, CLAIM_AGE, EAR_CDD_AGE, EAR_CLAIM_COUNT, \
A6100, A6200,A6201,A6202,A6204,A6205,A6207,A6209,A6210,A6211,A6260,C2200,C2210, C2220,C3140,C3150,C4130,C4210,C4700,C4920,C5000, C5010, C5710, C6850, \
CONTENTION_LOSS, CONTENTION_TINITU, DECISION_LOSS, DECISION_TINITU) \
VALUES (:VET_ID, :CLAIM_ID, :CLAIMANT_AGE, :DOB, :END_PRODUCT_CODE, :RO_NUMBER, :CLAIM_DATE, :PROFILE_DATE, :PROMULGATION_DATE, :RECENT_EAR_DATE, :CONTENTION_COUNT, :EAR_CONTENTION_COUNT, :PRIOR_EAR_CDD, :QUANT_PRIOR_EAR_CDD, :CURR_EAR_CDD, :QUANT_EAR_CDD, :CLAIM_AGE, :EAR_CDD_AGE, :EAR_CLAIM_COUNT, \
:A6100, :A6200, :A6201, :A6202, :A6204, :A6205, :A6207, :A6209, :A6210, :A6211, :A6260, :C2200, :C2210, :C2220, :C3140, :C3150, :C4130 , :C4210, :C4700, :C4920, :C5000, :C5010, :C5710, :C6850, \
:CONTENTION_LOSS, :CONTENTION_TINITU, :DECISION_LOSS, :DECISION_TINITU)')
#Query used to pull decisions prior to claim.
#We use a rating profiles promulgation date before the claim date and for the given participant.
priorDecisionCursor = connection.cursor()
priorDecisionCursor.prepare('SELECT * FROM \
(SELECT * \
FROM EAR_AGGREGATE_DECISION \
WHERE to_date(PROMULGATION_DATE) < :CLAIM_DATE and VET_ID = :VET_ID \
ORDER BY PROMULGATION_DATE DESC) WHERE ROWNUM =1')
#Query used to pull decisions for the claim.
#We use a rating profiles profile date equal to the claims most recent rating profile date and for the given participant.
currDecisionCursor = connection.cursor()
currDecisionCursor.prepare('SELECT * \
FROM EAR_AGGREGATE_DECISION \
WHERE to_date(PROFILE_DATE) = :MAX_PROFILE_DATE and VET_ID = :VET_ID and ROWNUM = 1')
cursor = connection.cursor()
cursor.execute(SQL)
earFeatureVector = None
aggregateDecision = None
earCDDChangeDate = None
counter = 0;
currParticipant = -1
earClaimCount = 0
for row in cursor:
if counter == 1000:
connection.commit()
counter=0
aggregateContention = AggregateContention(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14],row[15],row[16],row[17],row[18],row[19],row[20],row[21],row[22],row[23])
if currParticipant != aggregateContention.VET_ID :
currParticipant = aggregateContention.VET_ID #Reset participant id
earClaimCount = 0
priorDecisionCursor.execute(None, {'VET_ID' :aggregateContention.VET_ID, 'CLAIM_DATE' :aggregateContention.CLAIM_DATE.strftime('%d-%b-%y')}) #rowcount always shows 0!!!!!!!!!!!!!!
aggregateDecision = None
prevEarCDD = -1;
earCDDChangeDate = None
for decisionRow in priorDecisionCursor:
aggregateDecision = AggregateDecision(decisionRow[0],decisionRow[1],decisionRow[2],decisionRow[3],decisionRow[4],decisionRow[5],decisionRow[6],decisionRow[7],decisionRow[8],decisionRow[9],decisionRow[10],decisionRow[11],decisionRow[12],decisionRow[13],decisionRow[14],decisionRow[15],decisionRow[16],decisionRow[17],decisionRow[18])
if prevEarCDD != aggregateDecision.EAR_CDD:
prevEarCDD = aggregateDecision.EAR_CDD
earCDDChangeDate = aggregateDecision.PROMULGATION_DATE
if aggregateDecision is None:
aggregateDecision = AggregateDecision(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None)
currDecisionRow = None
if not aggregateContention.MAX_PROFILE_DATE is None:
currDecisionCursor.execute(None, {'VET_ID' :aggregateContention.VET_ID, 'MAX_PROFILE_DATE' :aggregateContention.MAX_PROFILE_DATE.strftime('%d-%b-%y')})
currDecisionRow = currDecisionCursor.fetchone()
if not currDecisionRow is None:
currAggregateDecision = AggregateDecision(currDecisionRow[0],currDecisionRow[1],currDecisionRow[2],currDecisionRow[3],currDecisionRow[4],currDecisionRow[5],currDecisionRow[6],currDecisionRow[7],currDecisionRow[8],currDecisionRow[9],currDecisionRow[10],currDecisionRow[11],currDecisionRow[12],currDecisionRow[13],currDecisionRow[14],currDecisionRow[15],currDecisionRow[16],currDecisionRow[17],currDecisionRow[18])
else :
currAggregateDecision = AggregateDecision(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None)
earFeatureVector = EarFeatureVector()
earFeatureVector.VET_ID = aggregateContention.VET_ID
earFeatureVector.CLAIM_ID = aggregateContention.CLAIM_ID
if aggregateContention.DOB is None:
earFeatureVector.CLAIMANT_AGE = None
else:
earFeatureVector.CLAIMANT_AGE = int((((aggregateContention.CLAIM_DATE.year - aggregateContention.DOB) + 9)/10)) * 10 #Below the nearest 10, If you are 30 then show 30, if 31-39 show 40.
earFeatureVector.DOB = aggregateContention.DOB
earFeatureVector.END_PRODUCT_CODE = aggregateContention.END_PRODUCT_CODE
earFeatureVector.RO_NUMBER = aggregateContention.RO_NUMBER
earFeatureVector.CLAIM_DATE = aggregateContention.CLAIM_DATE
earFeatureVector.PROFILE_DATE = aggregateDecision.PROFILE_DATE
earFeatureVector.PROMULGATION_DATE = aggregateDecision.PROMULGATION_DATE
earFeatureVector.CONTENTION_COUNT = aggregateContention.CONTENTION_COUNT
earFeatureVector.EAR_CONTENTION_COUNT = aggregateContention.EAR_CONTENTION_COUNT
earFeatureVector.PRIOR_EAR_CDD = aggregateDecision.EAR_CDD
earFeatureVector.CURR_EAR_CDD = currAggregateDecision.EAR_CDD
earFeatureVector.CLAIM_AGE = int((datetime.datetime.now() - aggregateContention.CLAIM_DATE).days / 365.25) #Today - Claim Date: This is a relative and changing number so should probably be dropped
earFeatureVector.RECENT_EAR_DATE = currAggregateDecision.RECENT_EAR_DATE #Most recent ear begin date, from previous profile. We use begin date as promulgation keeps changing and not accurate to when diagnosed
if aggregateDecision.EAR_CDD is None:
earFeatureVector.QUANT_PRIOR_EAR_CDD = None
else:
earFeatureVector.QUANT_PRIOR_EAR_CDD = int(math.ceil(aggregateDecision.EAR_CDD / 10.0)) * 10 #Quantize Prior CDD to the nearest 10
if currAggregateDecision.EAR_CDD is None:
earFeatureVector.QUANT_EAR_CDD = None
else:
earFeatureVector.QUANT_EAR_CDD = int(math.ceil(currAggregateDecision.EAR_CDD / 10.0)) * 10 #Quantize CDD to the nearest 10
if earCDDChangeDate is None:
earFeatureVector.EAR_CDD_AGE = None
else:
earFeatureVector.EAR_CDD_AGE = int((aggregateContention.CLAIM_DATE - earCDDChangeDate).days / 365.25) #Claim Date (newer) - Promulgation date of previous (or later) profile where ear cdd changed
earFeatureVector.C2200 = aggregateContention.C2200
earFeatureVector.C2210 = aggregateContention.C2210
earFeatureVector.C2220 = aggregateContention.C2220
earFeatureVector.C3140 = aggregateContention.C3140
earFeatureVector.C3150 = aggregateContention.C3150
earFeatureVector.C4130 = aggregateContention.C4130
earFeatureVector.C4210 = aggregateContention.C4210
earFeatureVector.C4700 = aggregateContention.C4700
earFeatureVector.C4920 = aggregateContention.C4920
earFeatureVector.C5000 = aggregateContention.C5000
earFeatureVector.C5010 = aggregateContention.C5010
earFeatureVector.C5710 = aggregateContention.C5710
earFeatureVector.C6850 = aggregateContention.C6850
earFeatureVector.A6100 = aggregateDecision.A6100
earFeatureVector.A6200 = aggregateDecision.A6200
earFeatureVector.A6201 = aggregateDecision.A6201
earFeatureVector.A6202 = aggregateDecision.A6202
earFeatureVector.A6204 = aggregateDecision.A6204
earFeatureVector.A6205 = aggregateDecision.A6205
earFeatureVector.A6207 = aggregateDecision.A6207
earFeatureVector.A6209 = aggregateDecision.A6209
earFeatureVector.A6210 = aggregateDecision.A6210
earFeatureVector.A6211 = aggregateDecision.A6211
earFeatureVector.A6260 = aggregateDecision.A6260
earFeatureVector.CONTENTION_LOSS = aggregateContention.TXT_LOSS
earFeatureVector.CONTENTION_TINITU = aggregateContention.TXT_TINITU
earFeatureVector.DECISION_LOSS = aggregateDecision.TXT_LOSS
earFeatureVector.DECISION_TINITU = aggregateDecision.TXT_TINITU
if earFeatureVector.PRIOR_EAR_CDD != earFeatureVector.CURR_EAR_CDD:
earClaimCount += 1
earFeatureVector.EAR_CLAIM_COUNT = earClaimCount
writeCursor.execute(None, {'VET_ID' :earFeatureVector.VET_ID, 'CLAIM_ID' :earFeatureVector.CLAIM_ID, 'CLAIMANT_AGE' :earFeatureVector.CLAIMANT_AGE, 'DOB' :earFeatureVector.DOB, 'END_PRODUCT_CODE' :earFeatureVector.END_PRODUCT_CODE, 'RO_NUMBER' :earFeatureVector.RO_NUMBER, 'CLAIM_DATE' :earFeatureVector.CLAIM_DATE, 'PROFILE_DATE' :earFeatureVector.PROFILE_DATE, 'PROMULGATION_DATE' :earFeatureVector.PROMULGATION_DATE, 'CONTENTION_COUNT' :earFeatureVector.CONTENTION_COUNT, 'EAR_CONTENTION_COUNT' :earFeatureVector.EAR_CONTENTION_COUNT,
'PRIOR_EAR_CDD' :earFeatureVector.PRIOR_EAR_CDD, 'QUANT_PRIOR_EAR_CDD' :earFeatureVector.QUANT_PRIOR_EAR_CDD, 'CURR_EAR_CDD' :earFeatureVector.CURR_EAR_CDD, 'QUANT_EAR_CDD' :earFeatureVector.QUANT_EAR_CDD, 'CLAIM_AGE' :earFeatureVector.CLAIM_AGE, 'EAR_CDD_AGE' :earFeatureVector.EAR_CDD_AGE, 'RECENT_EAR_DATE' :earFeatureVector.RECENT_EAR_DATE, 'EAR_CLAIM_COUNT' :earFeatureVector.EAR_CLAIM_COUNT,
'C2200' :earFeatureVector.C2200, 'C2210' :earFeatureVector.C2210, 'C2220' :earFeatureVector.C2220, 'C3140' :earFeatureVector.C3140, 'C3150' :earFeatureVector.C3150, 'C4130' :earFeatureVector.C4130, 'C4210' :earFeatureVector.C4210, 'C4700' :earFeatureVector.C4700, 'C4920' :earFeatureVector.C4920, 'C5000' :earFeatureVector.C5000, 'C5010' :earFeatureVector.C5010, 'C5710' :earFeatureVector.C5710, 'C6850' :earFeatureVector.C6850,
'A6100' :earFeatureVector.A6100, 'A6200' :earFeatureVector.A6200, 'A6201' :earFeatureVector.A6201, 'A6202' :earFeatureVector.A6202, 'A6204' :earFeatureVector.A6204, 'A6205' :earFeatureVector.A6205, 'A6207' :earFeatureVector.A6207, 'A6209' :earFeatureVector.A6209, 'A6210' :earFeatureVector.A6210, 'A6211' :earFeatureVector.A6211, 'A6260' :earFeatureVector.A6260,
'CONTENTION_LOSS' :earFeatureVector.CONTENTION_LOSS, 'CONTENTION_TINITU' :earFeatureVector.CONTENTION_TINITU, 'DECISION_LOSS' :earFeatureVector.DECISION_LOSS, 'DECISION_TINITU' :earFeatureVector.DECISION_TINITU})
counter += 1
connection.commit()
print(str(datetime.datetime.now()))
writeCursor.close()
priorDecisionCursor.close()
currDecisionCursor.close()
cursor.close()
connection.close()
|
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import operator
import os
import click
import yaml
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst.directives import unchanged
from jinja2 import Environment, PackageLoader
from sphinx.directives import SphinxDirective as Directive
from sphinx.util.nodes import nested_parse_with_titles
from c7n.schema import (
ElementSchema, resource_vocabulary, generate as generate_schema)
from c7n.resources import load_resources
from c7n.provider import clouds
log = logging.getLogger('c7nsphinx')
def template_underline(value, under="="):
return len(value) * under
def get_environment():
env = Environment(loader=PackageLoader('c7n_sphinxext', '_templates'))
env.globals['underline'] = template_underline
env.globals['ename'] = ElementSchema.name
env.globals['edoc'] = ElementSchema.doc
env.globals['eschema'] = CustodianSchema.render_schema
env.globals['render_resource'] = CustodianResource.render_resource
return env
class CustodianDirective(Directive):
has_content = True
required_arguments = 1
vocabulary = None
env = None
def _parse(self, rst_text, annotation):
result = ViewList()
for line in rst_text.split("\n"):
result.append(line, annotation)
node = nodes.paragraph()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
def _nodify(self, template_name, annotation, variables):
return self._parse(
self._render(template_name, variables), annotation)
@classmethod
def _render(cls, template_name, variables):
t = cls.env.get_template(template_name)
return t.render(**variables)
@classmethod
def resolve(cls, schema_path):
return ElementSchema.resolve(cls.vocabulary, schema_path)
class CustodianResource(CustodianDirective):
@classmethod
def render_resource(cls, resource_path):
resource_class = cls.resolve(resource_path)
provider_name, resource_name = resource_path.split('.', 1)
return cls._render('resource.rst',
variables=dict(
provider_name=provider_name,
resource_name="%s.%s" % (provider_name, resource_class.type),
filters=ElementSchema.elements(resource_class.filter_registry),
actions=ElementSchema.elements(resource_class.action_registry),
resource=resource_class))
class CustodianSchema(CustodianDirective):
option_spec = {'module': unchanged}
@classmethod
def render_schema(cls, el):
return cls._render(
'schema.rst',
{'schema_yaml': yaml.safe_dump(
ElementSchema.schema(cls.definitions, el),
default_flow_style=False)})
def run(self):
schema_path = self.arguments[0]
el = self.resolve(schema_path)
schema_yaml = yaml.safe_dump(
ElementSchema.schema(self.definitions, el), default_flow_style=False)
return self._nodify(
'schema.rst', '<c7n-schema>',
dict(name=schema_path, schema_yaml=schema_yaml))
INITIALIZED = False
def init():
global INITIALIZED
if INITIALIZED:
return
load_resources()
CustodianDirective.vocabulary = resource_vocabulary()
CustodianDirective.definitions = generate_schema()['definitions']
CustodianDirective.env = env = get_environment()
INITIALIZED = True
return env
def setup(app):
init()
app.add_directive_to_domain(
'py', 'c7n-schema', CustodianSchema)
app.add_directive_to_domain(
'py', 'c7n-resource', CustodianResource)
return {'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True}
@click.command()
@click.option('--provider', required=True)
@click.option('--output-dir', type=click.Path(), required=True)
@click.option('--group-by')
def main(provider, output_dir, group_by):
try:
_main(provider, output_dir, group_by)
except Exception:
import traceback, pdb, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
def resource_file_name(output_dir, r):
return os.path.join(
output_dir, ("%s.rst" % r.type).replace(' ', '-').lower())
def _main(provider, output_dir, group_by):
"""Generate RST docs for a given cloud provider's resources
"""
env = init()
logging.basicConfig(level=logging.INFO)
output_dir = os.path.abspath(output_dir)
provider_class = clouds[provider]
# group by will be provider specific, supports nested attributes
group_by = operator.attrgetter(group_by or "type")
files = []
groups = {}
for r in provider_class.resources.values():
group = group_by(r)
if not isinstance(group, list):
group = [group]
for g in group:
groups.setdefault(g, []).append(r)
# Create individual resources pages
for r in provider_class.resources.values():
rpath = resource_file_name(output_dir, r)
with open(rpath, 'w') as fh:
t = env.get_template('provider-resource.rst')
fh.write(t.render(
provider_name=provider,
resource=r))
# Create files for all groups
for key, group in sorted(groups.items()):
group = sorted(group, key=operator.attrgetter('type'))
rpath = os.path.join(
output_dir, ("group-%s.rst" % key).replace(' ', '-').lower())
with open(rpath, 'w') as fh:
t = env.get_template('provider-group.rst')
fh.write(t.render(
provider_name=provider,
key=key,
resource_files=[os.path.basename(
resource_file_name(output_dir, r)) for r in group],
resources=group))
files.append(os.path.basename(rpath))
# Write out common provider filters & actions
common_actions = {}
common_filters = {}
for r in provider_class.resources.values():
for f in ElementSchema.elements(r.filter_registry):
if not f.schema_alias:
continue
common_filters[ElementSchema.name(f)] = (f, r)
for a in ElementSchema.elements(r.action_registry):
if not a.schema_alias:
continue
common_actions[ElementSchema.name(a)] = (a, r)
fpath = os.path.join(
output_dir,
("%s-common-filters.rst" % provider_class.type.lower()))
with open(fpath, 'w') as fh:
t = env.get_template('provider-common-elements.rst')
fh.write(t.render(
provider_name=provider_class.display_name,
element_type='filters',
elements=[common_filters[k] for k in sorted(common_filters)]))
files.insert(0, os.path.basename(fpath))
fpath = os.path.join(
output_dir,
("%s-common-actions.rst" % provider_class.type.lower()))
with open(fpath, 'w') as fh:
t = env.get_template('provider-common-elements.rst')
fh.write(t.render(
provider_name=provider_class.display_name,
element_type='actions',
elements=[common_actions[k] for k in sorted(common_actions)]))
files.insert(0, os.path.basename(fpath))
log.info("%s Wrote %d resources groups", provider.title(), len(files))
# Write out the provider index
provider_path = os.path.join(output_dir, 'index.rst')
with open(provider_path, 'w') as fh:
log.info("Writing Provider Index to %s", provider_path)
t = env.get_template('provider-index.rst')
fh.write(t.render(provider_name=provider_class.display_name, files=files))
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class AtrousConv2DTest(tf.test.TestCase):
def _upsample_filters(self, filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: [h, w, in_depth, out_depth]. Original filters.
rate: An int, specifying the upsampling rate.
Returns:
filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with
h_up = h + (h - 1) * (rate - 1)
w_up = w + (w - 1) * (rate - 1)
containing (rate - 1) zeros between consecutive filter values along
the filters' spatial dimensions.
"""
if rate == 1:
return filters
# [h, w, in_depth, out_depth] -> [in_depth, out_depth, h, w]
filters_up = np.transpose(filters, [2, 3, 0, 1])
ker = np.zeros([rate, rate])
ker[0, 0] = 1
filters_up = np.kron(filters_up, ker)[:, :, :-(rate-1), :-(rate-1)]
# [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
filters_up = np.transpose(filters_up, [2, 3, 0, 1])
self.assertEqual(np.sum(filters), np.sum(filters_up))
return filters_up
def testAtrousConv2DForward(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
# Input: [batch, height, width, input_depth]
height = 15
for width in [15, 16]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 5):
for kernel_width in range(1, 5):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 5):
f_up = self._upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
y2 = tf.nn.conv2d(x, f_up, strides=[1, 1, 1, 1],
padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2,
atol=1e-2)
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
is equivalent to:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
"""
padding = "SAME" # The padding needs to be "SAME"
np.random.seed(1) # Make it reproducible.
with self.test_session():
# Input: [batch, height, width, input_depth]
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
for kernel in [1, 3, 5]: # The kernel size needs to be odd.
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [kernel, kernel, 2, 2]
f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
y2 = tf.space_to_batch(x, paddings=pad, block_size=rate)
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = tf.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
def testGradient(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
# Input: [batch, height, width, input_depth]
x_shape = [2, 5, 6, 2]
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [3, 3, 2, 2]
# Output: [batch, height, width, output_depth]
y_shape = [2, 5, 6, 2]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = tf.constant(x_val, name="x", dtype=tf.float32)
f = tf.constant(f_val, name="f", dtype=tf.float32)
for rate in range(1, 4):
output = tf.nn.atrous_conv2d(x, f, rate=rate, padding="SAME")
err = tf.test.compute_gradient_error(
[x, f], [x_shape, f_shape], output, y_shape)
print("atrous_conv2d gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
class Conv2DTransposeTest(tf.test.TestCase):
def testConv2DTransposeSingleStride(self):
with self.test_session():
strides = [1, 1, 1, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 6, 4, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
padding="SAME")
value = output.eval()
# We count the number of cells being added at the locations in the output.
# At the center, #cells=kernel_height * kernel_width
# At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
# At the borders, #cells=ceil(kernel_height/2)*kernel_width or
# kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[1] - 1
w_in = w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeSame(self):
with self.test_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 12, 8, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
padding="SAME")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeValid(self):
with self.test_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 13, 9, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
padding="VALID")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[2] - pad):
for h in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[
1] == 0 and h > pad and h < y_shape[1] - 1 - pad
w_in = w % strides[
2] == 0 and w > pad and w < y_shape[2] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, h, w, k] = target
# copy values in the border
cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
cache_values[n, -1, :, k] = cache_values[n, -2, :, k]
self.assertAllClose(cache_values, value)
def testGradient(self):
x_shape = [2, 6, 4, 3]
f_shape = [3, 3, 2, 3]
y_shape = [2, 12, 8, 2]
strides = [1, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.test_session():
x = tf.constant(x_val, name="x", dtype=tf.float32)
f = tf.constant(f_val, name="f", dtype=tf.float32)
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
padding="SAME")
err = tf.test.compute_gradient_error(
[x, f], [x_shape, f_shape], output, y_shape)
print("DeConv gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
tf.test.main()
|
|
import os
import redis
import connection
from scrapy.http import Request
from scrapy.spider import Spider
from unittest import TestCase
from .dupefilter import RFPDupeFilter
from .queue import SpiderQueue, SpiderPriorityQueue, SpiderStack
from .scheduler import Scheduler
# allow test settings from environment
REDIS_HOST = os.environ.get('REDIST_HOST', 'localhost')
REDIS_PORT = int(os.environ.get('REDIS_PORT', 6379))
class DupeFilterTest(TestCase):
def setUp(self):
self.server = redis.Redis(REDIS_HOST, REDIS_PORT)
self.key = 'scrapy_redis:tests:dupefilter:'
self.df = RFPDupeFilter(self.server, self.key)
def tearDown(self):
self.server.delete(self.key)
def test_dupe_filter(self):
req = Request('http://example.com')
self.assertFalse(self.df.request_seen(req))
self.assertTrue(self.df.request_seen(req))
self.df.close('nothing')
class QueueTestMixin(object):
queue_cls = None
def setUp(self):
self.spider = Spider('myspider')
self.key = 'scrapy_redis:tests:%s:queue' % self.spider.name
self.server = redis.Redis(REDIS_HOST, REDIS_PORT)
self.q = self.queue_cls(self.server, Spider('myspider'), self.key)
def tearDown(self):
self.server.delete(self.key)
def test_clear(self):
self.assertEqual(len(self.q), 0)
for i in range(10):
# XXX: can't use same url for all requests as SpiderPriorityQueue
# uses redis' set implemention and we will end with only one
# request in the set and thus failing the test. It should be noted
# that when using SpiderPriorityQueue it acts as a request
# duplication filter whenever the serielized requests are the same.
# This might be unwanted on repetitive requests to the same page
# even with dont_filter=True flag.
req = Request('http://example.com/?page=%s' % i)
self.q.push(req)
self.assertEqual(len(self.q), 10)
self.q.clear()
self.assertEqual(len(self.q), 0)
class SpiderQueueTest(QueueTestMixin, TestCase):
queue_cls = SpiderQueue
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop()
self.assertEqual(out1.url, req1.url)
self.assertEqual(out2.url, req2.url)
class SpiderPriorityQueueTest(QueueTestMixin, TestCase):
queue_cls = SpiderPriorityQueue
def test_queue(self):
req1 = Request('http://example.com/page1', priority=100)
req2 = Request('http://example.com/page2', priority=50)
req3 = Request('http://example.com/page2', priority=200)
self.q.push(req1)
self.q.push(req2)
self.q.push(req3)
out1 = self.q.pop()
out2 = self.q.pop()
out3 = self.q.pop()
self.assertEqual(out1.url, req3.url)
self.assertEqual(out2.url, req1.url)
self.assertEqual(out3.url, req2.url)
class SpiderStackTest(QueueTestMixin, TestCase):
queue_cls = SpiderStack
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop()
self.assertEqual(out1.url, req2.url)
self.assertEqual(out2.url, req1.url)
class SchedulerTest(TestCase):
def setUp(self):
self.server = redis.Redis(REDIS_HOST, REDIS_PORT)
self.key_prefix = 'scrapy_redis:tests:'
self.queue_key = self.key_prefix + '%(spider)s:requests'
self.dupefilter_key = self.key_prefix + '%(spider)s:dupefilter'
self.idle_before_close = 0
self.scheduler = Scheduler(self.server, False, self.queue_key,
SpiderQueue, self.dupefilter_key,
self.idle_before_close)
def tearDown(self):
for key in self.server.keys(self.key_prefix):
self.server.delete(key)
def test_scheduler(self):
# default no persist
self.assertFalse(self.scheduler.persist)
spider = Spider('myspider')
self.scheduler.open(spider)
self.assertEqual(len(self.scheduler), 0)
req = Request('http://example.com')
self.scheduler.enqueue_request(req)
self.assertTrue(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 1)
# dupefilter in action
self.scheduler.enqueue_request(req)
self.assertEqual(len(self.scheduler), 1)
out = self.scheduler.next_request()
self.assertEqual(out.url, req.url)
self.assertFalse(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 0)
self.scheduler.close('finish')
def test_scheduler_persistent(self):
messages = []
spider = Spider('myspider')
spider.log = lambda *args, **kwargs: messages.append([args, kwargs])
self.scheduler.persist = True
self.scheduler.open(spider)
self.assertEqual(messages, [])
self.scheduler.enqueue_request(Request('http://example.com/page1'))
self.scheduler.enqueue_request(Request('http://example.com/page2'))
self.assertTrue(self.scheduler.has_pending_requests())
self.scheduler.close('finish')
self.scheduler.open(spider)
self.assertEqual(messages, [
[('Resuming crawl (2 requests scheduled)',), {}],
])
self.assertEqual(len(self.scheduler), 2)
self.scheduler.persist = False
self.scheduler.close('finish')
self.assertEqual(len(self.scheduler), 0)
class ConnectionTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# We can get a connection from just REDIS_URL.
def test_redis_url(self):
settings = dict(
REDIS_URL = 'redis://foo:bar@localhost:9001/42'
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We can get a connection from REDIS_HOST/REDIS_PORT.
def test_redis_host_port(self):
settings = dict(
REDIS_HOST = 'localhost',
REDIS_PORT = 9001
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
# REDIS_URL takes precedence over REDIS_HOST/REDIS_PORT.
def test_redis_url_precedence(self):
settings = dict(
REDIS_HOST = 'baz',
REDIS_PORT = 1337,
REDIS_URL = 'redis://foo:bar@localhost:9001/42'
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We fallback to REDIS_HOST/REDIS_PORT if REDIS_URL is None.
def test_redis_host_port_fallback(self):
settings = dict(
REDIS_HOST = 'baz',
REDIS_PORT = 1337,
REDIS_URL = None
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'baz')
self.assertEqual(connect_args['port'], 1337)
# We use default values for REDIS_HOST/REDIS_PORT.
def test_redis_default(self):
settings = dict()
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 6379)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015-2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various utility functions that do not neatly fit into one category or another
import base64
import confluent.exceptions as cexc
import confluent.log as log
import hashlib
import netifaces
import os
import re
import socket
import ssl
import struct
import eventlet.green.subprocess as subprocess
def run(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
retcode = process.poll()
if retcode:
raise subprocess.CalledProcessError(retcode, process.args, output=stdout)
return stdout, stderr
def stringify(instr):
# Normalize unicode and bytes to 'str', correcting for
# current python version
if isinstance(instr, bytes) and not isinstance(instr, str):
return instr.decode('utf-8', errors='replace')
elif not isinstance(instr, bytes) and not isinstance(instr, str):
return instr.encode('utf-8')
return instr
def list_interface_indexes():
# Getting the interface indexes in a portable manner
# would be better, but there's difficulty from a python perspective.
# For now be linux specific
try:
for iface in os.listdir('/sys/class/net/'):
if not os.path.exists('/sys/class/net/{0}/ifindex'.format(iface)):
continue
ifile = open('/sys/class/net/{0}/ifindex'.format(iface), 'r')
intidx = int(ifile.read())
ifile.close()
yield intidx
except (IOError, OSError):
# Probably situation is non-Linux, just do limited support for
# such platforms until other people come along
for iface in netifaces.interfaces():
addrinfo = netifaces.ifaddresses(iface).get(socket.AF_INET6, [])
for addr in addrinfo:
v6addr = addr.get('addr', '').partition('%')[2]
if v6addr:
yield(int(v6addr))
break
return
def list_ips():
# Used for getting addresses to indicate the multicast address
# as well as getting all the broadcast addresses
for iface in netifaces.interfaces():
addrs = netifaces.ifaddresses(iface)
if netifaces.AF_INET in addrs:
for addr in addrs[netifaces.AF_INET]:
yield addr
def randomstring(length=20):
"""Generate a random string of requested length
:param length: The number of characters to produce, defaults to 20
"""
chunksize = length // 4
if length % 4 > 0:
chunksize += 1
strval = base64.urlsafe_b64encode(os.urandom(chunksize * 3))
return stringify(strval[0:length])
def securerandomnumber(low=0, high=4294967295):
"""Return a random number within requested range
Note that this function will not return smaller than 0 nor larger
than 2^32-1 no matter what is requested.
The python random number facility does not provide characteristics
appropriate for secure rng, go to os.urandom
:param low: Smallest number to return (defaults to 0)
:param high: largest number to return (defaults to 2^32-1)
"""
number = -1
while number < low or number > high:
number = struct.unpack("I", os.urandom(4))[0]
return number
def monotonic_time():
"""Return a monotoc time value
In scenarios like timeouts and such, monotonic timing is preferred.
"""
# for now, just support POSIX systems
return os.times()[4]
def get_certificate_from_file(certfile):
cert = open(certfile, 'r').read()
inpemcert = False
prunedcert = ''
for line in cert.split('\n'):
if '-----BEGIN CERTIFICATE-----' in line:
inpemcert = True
if inpemcert:
prunedcert += line
if '-----END CERTIFICATE-----' in line:
break
return ssl.PEM_cert_to_DER_cert(prunedcert)
def get_fingerprint(certificate, algo='sha512'):
if algo == 'sha256':
return 'sha256$' + hashlib.sha256(certificate).hexdigest()
elif algo == 'sha512':
return 'sha512$' + hashlib.sha512(certificate).hexdigest()
raise Exception('Unsupported fingerprint algorithm ' + algo)
def cert_matches(fingerprint, certificate):
if not fingerprint or not certificate:
return False
algo, _, fp = fingerprint.partition('$')
newfp = None
if algo in ('sha512', 'sha256'):
newfp = get_fingerprint(certificate, algo)
return newfp and fingerprint == newfp
class TLSCertVerifier(object):
def __init__(self, configmanager, node, fieldname):
self.cfm = configmanager
self.node = node
self.fieldname = fieldname
def verify_cert(self, certificate):
storedprint = self.cfm.get_node_attributes(self.node, (self.fieldname,)
)
if (self.fieldname not in storedprint[self.node] or
storedprint[self.node][self.fieldname]['value'] == ''):
# no stored value, check policy for next action
newpolicy = self.cfm.get_node_attributes(self.node,
('pubkeys.addpolicy',))
if ('pubkeys.addpolicy' in newpolicy[self.node] and
'value' in newpolicy[self.node]['pubkeys.addpolicy'] and
newpolicy[self.node]['pubkeys.addpolicy']['value'] == 'manual'):
# manual policy means always raise unless a match is set
# manually
fingerprint = get_fingerprint(certificate, 'sha256')
raise cexc.PubkeyInvalid('New certificate detected',
certificate, fingerprint,
self.fieldname, 'newkey')
# since the policy is not manual, go ahead and add new key
# after logging to audit log
fingerprint = get_fingerprint(certificate, 'sha256')
auditlog = log.Logger('audit')
auditlog.log({'node': self.node, 'event': 'certautoadd',
'fingerprint': fingerprint})
self.cfm.set_node_attributes(
{self.node: {self.fieldname: fingerprint}})
return True
elif cert_matches(storedprint[self.node][self.fieldname]['value'],
certificate):
return True
fingerprint = get_fingerprint(certificate, 'sha256')
raise cexc.PubkeyInvalid(
'Mismatched certificate detected', certificate, fingerprint,
self.fieldname, 'mismatch')
numregex = re.compile('([0-9]+)')
def naturalize_string(key):
"""Analyzes string in a human way to enable natural sort
:param nodename: The node name to analyze
:returns: A structure that can be consumed by 'sorted'
"""
return [int(text) if text.isdigit() else text.lower()
for text in re.split(numregex, key)]
def natural_sort(iterable):
"""Return a sort using natural sort if possible
:param iterable:
:return:
"""
try:
return sorted(iterable, key=naturalize_string)
except TypeError:
# The natural sort attempt failed, fallback to ascii sort
return sorted(iterable)
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from tempest_lib import decorators
import testtools
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.services.network import resources as net_resources
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""
This smoke test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify network connectivity as follows:
There are presumed to be two types of networks: tenant and
public. A tenant network may or may not be reachable from the
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
a VM via a tenant network, a public network, or both. If both
networking types are to be evaluated, tests that need to be
executed remotely on the VM (via ssh) will only be run against
one of the networks (to minimize test execution time).
Determine which types of networks to test as follows:
* Configure tenant network checks (via the
'tenant_networks_reachable' key) if the Tempest host should
have direct connectivity to tenant networks. This is likely to
be the case if Tempest is running on the same host as a
single-node devstack installation with IP namespaces disabled.
* Configure checks for a public network if a public network has
been configured prior to the test suite being run and if the
Tempest host should have connectivity to that public network.
Checking connectivity for a public network requires that a
value be provided for 'public_network_id'. A value can
optionally be provided for 'public_router_id' if tenants will
use a shared router to access a public network (as is likely to
be the case when IP namespaces are not enabled). If a value is
not provided for 'public_router_id', a router will be created
for each tenant and use the network identified by
'public_network_id' as its gateway.
"""
@classmethod
def check_preconditions(cls):
super(TestNetworkBasicOps, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
for ext in ['router', 'security-group']:
if not test.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
# Create no network resources for these tests.
cls.set_network_resources()
super(TestNetworkBasicOps, cls).resource_setup()
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _setup_network_and_servers(self, **kwargs):
self.security_group = \
self._create_security_group(tenant_id=self.tenant_id)
self.network, self.subnet, self.router = self.create_networks(**kwargs)
self.check_networks()
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network)
self._check_tenant_network_connectivity()
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets]
"""
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(self.network.name, seen_names)
self.assertIn(self.network.id, seen_ids)
if self.subnet:
seen_subnets = self._list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets]
seen_subnet_ids = [n['id'] for n in seen_subnets]
self.assertIn(self.network.id, seen_net_ids)
self.assertIn(self.subnet.id, seen_subnet_ids)
if self.router:
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(self.router.name,
seen_router_names)
self.assertIn(self.router.id,
seen_router_ids)
def _create_server(self, name, network):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'networks': [
{'uuid': network.id},
],
'key_name': keypair['name'],
'security_groups': security_groups,
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.servers.append(server)
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_tenant_network_connectivity(self):
ssh_login = CONF.compute.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(TestNetworkBasicOps, self).\
_check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def check_public_network_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True):
"""Verifies connectivty to a VM via public network and floating IP,
and verifies floating IP has resource status is correct.
:param should_connect: bool. determines if connectivity check is
negative or positive.
:param msg: Failure message to add to Error message. Should describe
the place in the test scenario where the method was called,
to indicate the context of the failure
:param should_check_floating_ip_status: bool. should status of
floating_ip be checked or not
"""
ssh_login = CONF.compute.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
floatingip_status = 'DOWN'
if should_connect:
private_key = self._get_server_key(server)
floatingip_status = 'ACTIVE'
# call the common method in the parent class
super(TestNetworkBasicOps, self).check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
if should_check_floating_ip_status:
self.check_floating_ip_status(floating_ip, floatingip_status)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
self._disassociate_floating_ip(floating_ip)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
name = data_utils.rand_name('new_server-smoke-')
# create a new server for the floating ip
server = self._create_server(name, self.network)
self._associate_floating_ip(floating_ip, server)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, server)
def _create_new_network(self):
self.new_net = self._create_network(tenant_id=self.tenant_id)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip.floating_ip_address
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
old_nic_list = self._get_server_nics(ssh_client)
# get a port from a list of one item
port_list = self._list_ports(device_id=server['id'])
self.assertEqual(1, len(port_list))
old_port = port_list[0]
_, interface = self.interface_client.create_interface(
server=server['id'],
network_id=self.new_net.id)
self.addCleanup(self.network_client.wait_for_resource_deletion,
'port',
interface['port_id'])
self.addCleanup(self.delete_wrapper,
self.interface_client.delete_interface,
server['id'], interface['port_id'])
def check_ports():
self.new_port_list = [port for port in
self._list_ports(device_id=server['id'])
if port['id'] != old_port['id']]
return len(self.new_port_list) == 1
if not test.call_until_true(check_ports, CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException(
"No new port attached to the server in time (%s sec)! "
"Old port: %s. Number of new ports: %d" % (
CONF.network.build_timeout, old_port,
len(self.new_port_list)))
new_port = net_resources.DeletablePort(client=self.network_client,
**self.new_port_list[0])
def check_new_nic():
new_nic_list = self._get_server_nics(ssh_client)
self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
return len(self.diff_list) == 1
if not test.call_until_true(check_new_nic, CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException("Interface not visible on the "
"guest after %s sec"
% CONF.network.build_timeout)
num, new_nic = self.diff_list[0]
ssh_client.assign_static_ip(nic=new_nic,
addr=new_port.fixed_ips[0]['ip_address'])
ssh_client.turn_nic_on(nic=new_nic)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
ipatxt = ssh_client.get_ip_list()
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network):
"""
via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network ports in the new network
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
self._list_ports(tenant_id=server['tenant_id'],
network_id=network.id)
if p['device_owner'].startswith('network'))
self._check_server_connectivity(floating_ip, internal_ips)
def _check_network_external_connectivity(self):
"""
ping public network default gateway to imply external connectivity
"""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
subnet = self._list_subnets(
network_id=CONF.network.public_network_id)
self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet))
external_ips = [subnet[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list):
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(self.floating_ip_tuple.server)
ssh_source = self._ssh_to_server(ip_address, private_key)
for remote_ip in address_list:
try:
self.assertTrue(self._check_remote_connectivity(ssh_source,
remote_ip),
"Timed out waiting for %s to become "
"reachable" % remote_ip)
except Exception:
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
raise
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_network_basic_ops(self):
"""
For a freshly-booted VM with an IP address ("port") on a given
network:
- the Tempest host can ping the IP address. This implies, but
does not guarantee (see the ssh check that follows), that the
VM has been assigned the correct IP address and has
connectivity to the Tempest host.
- the Tempest host can perform key-based authentication to an
ssh server hosted at the IP address. This check guarantees
that the IP address is associated with the target VM.
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
- ping an external IP address, implying external connectivity.
- ping an external hostname, implying that dns is correctly
configured.
- ping an internal IP address, implying connectivity to another
VM on the same network.
- detach the floating-ip from the VM and verify that it becomes
unreachable
- associate detached floating ip to a new VM and verify connectivity.
VMs are created with unique keypair so connectivity also asserts that
floating IP is associated with the new VM instead of the old one
Verifies that floating IP status is updated correctly after each change
"""
self._setup_network_and_servers()
self.check_public_network_connectivity(should_connect=True)
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._disassociate_floating_ips()
self.check_public_network_connectivity(should_connect=False,
msg="after disassociate "
"floating ip")
self._reassociate_floating_ips()
self.check_public_network_connectivity(should_connect=True,
msg="after re-associate "
"floating ip")
@testtools.skipUnless(CONF.compute_feature_enabled.interface_attach,
'NIC hotplug not available')
@testtools.skipIf(CONF.network.port_vnic_type in ['direct', 'macvtap'],
'NIC hotplug not supported for '
'vnic_type direct or macvtap')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_hotplug_nic(self):
"""
1. create a new network, with no gateway (to prevent overwriting VM's
gateway)
2. connect VM to new network
3. set static ip and bring new nic up
4. check VM can ping new network dhcp port
"""
self._setup_network_and_servers()
self.check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
self._check_network_internal_connectivity(network=self.new_net)
@testtools.skipIf(CONF.baremetal.driver_enabled,
'Router state cannot be altered on a shared baremetal '
'network')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_update_router_admin_state(self):
"""
1. Check public connectivity before updating
admin_state_up attribute of router to False
2. Check public connectivity after updating
admin_state_up attribute of router to False
3. Check public connectivity after updating
admin_state_up attribute of router to True
"""
self._setup_network_and_servers()
self.check_public_network_connectivity(
should_connect=True, msg="before updating "
"admin_state_up of router to False")
self._update_router_admin_state(self.router, False)
# TODO(alokmaurya): Remove should_check_floating_ip_status=False check
# once bug 1396310 is fixed
self.check_public_network_connectivity(
should_connect=False, msg="after updating "
"admin_state_up of router to False",
should_check_floating_ip_status=False)
self._update_router_admin_state(self.router, True)
self.check_public_network_connectivity(
should_connect=True, msg="after updating "
"admin_state_up of router to True")
def _check_dns_server(self, ssh_client, dns_servers):
servers = ssh_client.get_dns_servers()
self.assertEqual(set(dns_servers), set(servers),
'Looking for servers: {trgt_serv}. '
'Retrieved DNS nameservers: {act_serv} '
'From host: {host}.'
.format(host=ssh_client.ssh_client.host,
act_serv=servers,
trgt_serv=dns_servers))
@decorators.skip_because(bug="1412325")
@testtools.skipUnless(CONF.scenario.dhcp_client,
"DHCP client is not available.")
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_subnet_details(self):
"""Tests that subnet's extra configuration details are affecting
the VMs
NOTE: Neutron subnets push data to servers via dhcp-agent, so any
update in subnet requires server to actively renew its DHCP lease.
1. Configure subnet with dns nameserver
2. retrieve the VM's configured dns and verify it matches the one
configured for the subnet.
3. update subnet's dns
4. retrieve the VM's configured dns and verify it matches the new one
configured for the subnet.
TODO(yfried): add host_routes
any resolution check would be testing either:
* l3 forwarding (tested in test_network_basic_ops)
* Name resolution of an external DNS nameserver - out of scope for
Tempest
"""
# this test check only updates (no actual resolution) so using
# arbitrary ip addresses as nameservers, instead of parsing CONF
initial_dns_server = '1.2.3.4'
alt_dns_server = '9.8.7.6'
self._setup_network_and_servers(dns_nameservers=[initial_dns_server])
self.check_public_network_connectivity(should_connect=True)
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(server)
ssh_client = self._ssh_to_server(ip_address, private_key)
self._check_dns_server(ssh_client, [initial_dns_server])
self.subnet.update(dns_nameservers=[alt_dns_server])
# asserts that Neutron DB has updated the nameservers
self.assertEqual([alt_dns_server], self.subnet.dns_nameservers,
"Failed to update subnet's nameservers")
# server needs to renew its dhcp lease in order to get the new dns
# definitions from subnet
ssh_client.renew_lease(fixed_ip=floating_ip['fixed_ip_address'])
self._check_dns_server(ssh_client, [alt_dns_server])
@testtools.skipIf(CONF.baremetal.driver_enabled,
'admin_state of instance ports cannot be altered '
'for baremetal nodes')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_update_instance_port_admin_state(self):
"""
1. Check public connectivity before updating
admin_state_up attribute of instance port to False
2. Check public connectivity after updating
admin_state_up attribute of instance port to False
3. Check public connectivity after updating
admin_state_up attribute of instance port to True
"""
self._setup_network_and_servers()
floating_ip, server = self.floating_ip_tuple
server_id = server['id']
port_id = self._list_ports(device_id=server_id)[0]['id']
self.check_public_network_connectivity(
should_connect=True, msg="before updating "
"admin_state_up of instance port to False")
self.network_client.update_port(port_id, admin_state_up=False)
self.check_public_network_connectivity(
should_connect=False, msg="after updating "
"admin_state_up of instance port to False",
should_check_floating_ip_status=False)
self.network_client.update_port(port_id, admin_state_up=True)
self.check_public_network_connectivity(
should_connect=True, msg="after updating "
"admin_state_up of instance port to True")
|
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from cpu import CPUCollector
################################################################################
def find_metric(metric_list, metric_name):
return filter(lambda metric:metric["name"].find(metric_name) > -1, metric_list)
class TestCPUCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('CPUCollector', {
'interval': 10,
'normalize': False
})
self.collector = CPUCollector(config, None)
def test_import(self):
self.assertTrue(CPUCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
CPUCollector.PROC = '/proc/stat'
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/stat')
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu 100 200 300 400 500 0 0 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu 110 220 330 440 550 0 0 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {
'cpu.total.idle': 440,
'cpu.total.iowait': 550,
'cpu.total.nice': 220,
'cpu.total.system': 330,
'cpu.total.user': 110
})
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
CPUCollector.PROC = self.getFixturePath('proc_stat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
CPUCollector.PROC = self.getFixturePath('proc_stat_2')
self.collector.collect()
metrics = {
'cpu.total.idle': 3925832001,
'cpu.total.iowait': 575306,
'cpu.total.nice': 1104382,
'cpu.total.system': 8454154,
'cpu.total.user': 29055791
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_ec2_data(self, publish_mock):
self.collector.config['interval'] = 30
self.collector.config['xenfix'] = False
patch_open = patch('os.path.isdir', Mock(return_value=True))
patch_open.start()
CPUCollector.PROC = self.getFixturePath('ec2_stat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
CPUCollector.PROC = self.getFixturePath('ec2_stat_2')
self.collector.collect()
patch_open.stop()
metrics = {
'cpu.total.idle': 2806608501,
'cpu.total.iowait': 13567144,
'cpu.total.nice': 15545,
'cpu.total.system': 170762788,
'cpu.total.user': 243646997
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_total_metrics_enable_aggregation_false(self, publish_mock):
self.collector.config['enableAggregation'] = False
CPUCollector.PROC = self.getFixturePath('proc_stat_2')
self.collector.collect()
publishedMetrics = {
'cpu.total.nice': 1104382,
'cpu.total.irq': 3,
'cpu.total.softirq': 59032,
'cpu.total.user': 29055791
}
unpublishedMetrics = {
'cpu.total.user_mode': 30160173,
'cpu.total.irq_softirq': 59035
}
self.assertPublishedMany(publish_mock, publishedMetrics)
self.collector.collect()
self.assertUnpublishedMany(publish_mock, unpublishedMetrics)
@patch.object(Collector, 'publish')
def test_total_metrics_enable_aggregation_true(self, publish_mock):
self.collector.config['enableAggregation'] = True
CPUCollector.PROC = self.getFixturePath('proc_stat_2')
self.collector.collect()
publishedMetrics = {
'cpu.total.nice': 1104382,
'cpu.total.irq': 3,
'cpu.total.softirq': 59032,
'cpu.total.user': 29055791,
'cpu.total.user_mode': 30160173,
'cpu.total.irq_softirq': 59035
}
self.assertPublishedMany(publish_mock, publishedMetrics)
@patch.object(Collector, 'publish')
def test_total_metrics_enable_aggregation_true_blacklist(self, publish_mock):
self.collector.config['enableAggregation'] = True
CPUCollector.PROC = self.getFixturePath('proc_stat_2')
self.collector.collect()
publishedMetrics = {
'cpu.total.nice': 1104382,
'cpu.total.irq': 3,
'cpu.total.softirq': 59032,
'cpu.total.user': 29055791,
'cpu.total.user_mode': 30160173,
'cpu.total.irq_softirq': 59035
}
self.assertPublishedMany(publish_mock, publishedMetrics)
@patch.object(Collector, 'publish')
def test_core_metrics_enable_aggregation_false(self, publish_mock):
self.collector.config['enableAggregation'] = False
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu0 110 220 330 440 550 660 770 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
publishedMetrics = {
'cpu.nice': 220,
'cpu.irq': 660,
'cpu.softirq': 770,
'cpu.user': 110
}
self.assertPublishedMany(publish_mock, publishedMetrics)
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu0 110 220 330 440 550 660 770 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
unpublishedMetrics = {
'cpu.user_mode': 330,
'cpu.irq_softirq': 1430
}
self.assertUnpublishedMany(publish_mock, unpublishedMetrics)
@patch.object(Collector, 'publish')
def test_core_metrics_enable_aggregation_true(self, publish_mock):
self.collector.config['enableAggregation'] = True
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu0 110 220 330 440 550 660 770 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
publishedMetrics = {
'cpu.nice': 220,
'cpu.irq': 660,
'cpu.softirq': 770,
'cpu.user': 110,
'cpu.user_mode': 330,
'cpu.irq_softirq': 1430
}
self.assertPublishedMany(publish_mock, publishedMetrics)
class TestCPUCollectorNormalize(CollectorTestCase):
def setUp(self):
config = get_collector_config('CPUCollector', {
'interval': 1,
'normalize': True,
})
self.collector = CPUCollector(config, None)
self.num_cpu = 2
# first measurement
self.input_base = {
'user': 100,
'nice': 200,
'system': 300,
'idle': 400,
}
# second measurement
self.input_next = {
'user': 110,
'nice': 220,
'system': 330,
'idle': 440,
}
self.expected = {
'cpu.total.user': 110,
'cpu.total.nice': 220,
'cpu.total.system': 330,
'cpu.total.idle': 440,
}
self.expected2 = {
'cpu.total.user': 55,
'cpu.total.nice': 110,
'cpu.total.system': 165,
'cpu.total.idle': 220,
}
# convert an input dict with values to a string that might come from
# /proc/stat
def input_dict_to_proc_string(self, cpu_id, dict_):
return ("cpu%s %i %i %i %i 0 0 0 0 0 0" %
(cpu_id,
dict_['user'],
dict_['nice'],
dict_['system'],
dict_['idle'],
)
)
@patch.object(Collector, 'publish')
def test_should_work_proc_stat(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
"\n".join([self.input_dict_to_proc_string('', self.input_base),
self.input_dict_to_proc_string('0', self.input_base),
self.input_dict_to_proc_string('1', self.input_base),
])
)))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
"\n".join([self.input_dict_to_proc_string('', self.input_next),
self.input_dict_to_proc_string('0', self.input_next),
self.input_dict_to_proc_string('1', self.input_next),
])
)))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, self.expected)
@patch.object(Collector, 'publish')
@patch('cpu.os')
@patch('cpu.psutil')
def test_should_work_psutil(self, psutil_mock, os_mock, publish_mock):
os_mock.access.return_value = False
total = Mock(**self.input_base)
cpu_time = [Mock(**self.input_base),
Mock(**self.input_base),
]
psutil_mock.cpu_times.side_effect = [cpu_time, total]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
total = Mock(**self.input_next)
cpu_time = [Mock(**self.input_next),
Mock(**self.input_next),
]
psutil_mock.cpu_times.side_effect = [cpu_time, total]
self.collector.collect()
self.assertPublishedMany(publish_mock, self.expected2)
class TestCPUCollectorDimensions(CollectorTestCase):
def setUp(self):
config = get_collector_config('CPUCollector', {
'interval': 10,
'normalize': False
})
self.collector = CPUCollector(config, None)
@patch.object(Collector, 'flush')
def test_core_dimension_core_metrics(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu0 110 220 330 440 550 660 770 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
for metric_name in ['cpu.user', 'cpu.idle', 'cpu.nice', 'cpu.softirq']:
metrics = find_metric(self.collector.payload, metric_name)
self.assertEqual(len(metrics), 1)
self.assertTrue(metrics[0]['dimensions'].has_key('core'))
@patch.object(Collector, 'flush')
def test_core_dimension_total_metrics(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu 110 220 330 440 550 660 770 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
for metric_name in ['cpu.total.user', 'cpu.total.idle', 'cpu.total.nice', 'cpu.total.softirq']:
metrics = find_metric(self.collector.payload, metric_name)
self.assertEqual(len(metrics), 1)
self.assertFalse(metrics[0]['dimensions'].has_key('core'))
@patch.object(Collector, 'flush')
def test_core_dimension_core_metrics_aggregated(self, publish_mock):
self.collector.config['enableAggregation'] = True
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu0 110 220 330 440 550 660 770 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
for metric_name in ['cpu.user_mode', 'cpu.idle', 'cpu.nice', 'cpu.irq_softirq']:
metrics = find_metric(self.collector.payload, metric_name)
self.assertEqual(len(metrics), 1)
self.assertTrue(metrics[0]['dimensions'].has_key('core'))
@patch.object(Collector, 'flush')
def test_core_dimension_total_metrics_aggregated(self, publish_mock):
self.collector.config['enableAggregation'] = True
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu 110 220 330 440 550 660 770 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
for metric_name in ['cpu.total.user_mode', 'cpu.total.idle', 'cpu.total.nice', 'cpu.total.irq_softirq']:
metrics = find_metric(self.collector.payload, metric_name)
self.assertEqual(len(metrics), 1)
self.assertFalse(metrics[0]['dimensions'].has_key('core'))
################################################################################
if __name__ == "__main__":
unittest.main()
|
|
# How to Launch:
# Example: <validation-tests>/v2/tests/upgrade/rancher_upgrade_test.py
# -b 0.37.0 -t 0.37.1 -s 104.197.121.156 -u aruneli
# base version has no significance now, but target version should be a
# valid rancher server version
# server option (-s): IP Address of the rancher server
# username is the username with which you ssh to your GCE instance
from common_fixtures import * # NOQA
import argparse
import os
import paramiko
import requests
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-b', help='base version')
parser.add_argument('-t', help='target version')
parser.add_argument('-s', help='server node')
parser.add_argument('-u', help='ssh username of rancher server host')
parser.add_argument('-script',
help='provide the script or give "/" for all tests')
args = parser.parse_args()
tmp_dir = os.path.join(root_dir, 'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
logger.info("tmp_dir is: %s", tmp_dir)
core_dir = os.path.join(root_dir, 'cattlevalidationtest', 'core')
logger.info("core_dir is: %s", core_dir)
core_target_checkedout_dir = os.path.join(
tmp_dir, 'rancher-tests', 'tests', 'validation_v2',
'cattlevalidationtest', 'core')
if not os.path.exists(core_target_checkedout_dir):
os.makedirs(core_target_checkedout_dir)
logger.info("core_target_checkedout_dir : %s", core_target_checkedout_dir)
core_target_dir = os.path.join(root_dir, 'cattlevalidationtest',
'core_target')
if not os.path.exists(core_target_dir):
os.makedirs(core_target_dir)
logger.info("core_target_dir is: %s", core_target_dir)
upgrade_test(args.b, args.t, args.s, args.u, tmp_dir, core_dir,
core_target_dir, core_target_checkedout_dir, args.script)
def upgrade_test(base, target, servernode, username, tmp_dir, core_dir,
core_target_dir, core_target_checkedout_dir, script_to_test):
logger.info("CREATING SERVICES NOW IN BASE SETUP...")
# create_cmd = "py.test " + core_dir + "/ -v -m create -s"
create_cmd = "py.test -s --junit-xml=results_create.xml " + core_dir + script_to_test+" -v -m create -s"
logger.info("create command is: %s", create_cmd)
os.system(create_cmd)
upgrade_rancher_server(base, target, servernode, username)
# below one line is temporary until we start having real tagged versions
os.system(("cp -r " + core_dir + "/*.py " + tmp_dir))
# os.system("git clone -b master --single-branch "
# "https://github.com/aruneli/rancher-tests.git")
logger.info("COPYING TARGET LIBRARIES in core_target folder...")
os.system(("cp -r " + tmp_dir + "/test_*.py " + core_target_dir))
os.system(("cp -r " + tmp_dir + "/common_fixtures.py " + core_target_dir))
logger.info("VALIDATING UPGRADED SETUP NOW WITH TARGET")
# validate_cmd = "py.test " + core_target_dir + "/ -v -m validate -s"
validate_cmd =\
"py.test -s --junit-xml=results_validate.xml " + core_target_dir + script_to_test+" -v -m validate -s"
logger.info(validate_cmd)
os.system(validate_cmd)
logger.info("VALIDATION COMPLETE")
os.system("rm -rf " + tmp_dir + "/*")
os.system("rm -rf " + core_target_dir + "/*")
def upgrade_rancher_server(base, target, servernode, username):
logger.info("UPGRADING RANCHER SERVER TO TARGET")
i = 1
#
# Try to connect to the host.
# Retry a few times if it fails.
#
while True:
logger.info("Trying to connect to %s (%i/30)", servernode, i)
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
privatekeyfile = os.path.expanduser('~/.ssh/google_compute_engine')
mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
ssh.connect(servernode, username=username, pkey=mykey)
logger.info("Connected to %s", servernode)
break
except paramiko.AuthenticationException:
logger.info("Authentication failed when connecting to %s",
servernode)
sys.exit(1)
except:
logger.info("Could not SSH to %s, waiting for it to start",
servernode)
i += 1
time.sleep(2)
# If we could not connect within time limit
if i == 30:
logger.info("Could not connect to %s. Giving up. "
"Please check private key file.", servernode)
ssh.close()
sys.exit(1)
try:
cmd = "sudo docker ps"
# Send the command (non-blocking)
logger.info("command being executed %s:", cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
except:
logger.info("Execution of cmd %s failed", cmd)
try:
cmd = "sudo docker stop $(sudo docker ps -q | awk '{print $1}')"
# Send the command (non-blocking)
logger.info("command being executed %s:", cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
server_container_id = stdout.readlines()[0].strip("\n")
logger.info(server_container_id)
cmd = "sudo docker ps -a | awk ' NR>1 {print $2}' | cut -d \: -f 2" \
" | cut -d \\v -f 2"
# Send the command (non-blocking)
logger.info("command being executed %s:", cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
tag_of_previous_rancher_server = stdout.readlines()[0].strip("\n")
logger.info(tag_of_previous_rancher_server)
cmd = "sudo docker create --volumes-from " + server_container_id + \
" --name rancher-data rancher/server:v"\
+ tag_of_previous_rancher_server
logger.info("command being executed %s:", cmd)
# Send the command (non-blocking)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
cmd = "sudo docker pull rancher/server:v" + target
logger.info("command being executed %s:", cmd)
# Send the command (non-blocking)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
cmd = "sudo docker run -d --volumes-from rancher-data " \
"--restart=always -p 8080:8080 rancher/server:v" + target
logger.info("command being executed %s:", cmd)
# Send the command (non-blocking)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info(response)
cmd = "sudo docker ps | awk ' NR>1 {print $2}' | cut -d \: -f 2| " \
"cut -d \\v -f 2"
logger.info("command being executed %s:", cmd)
# Send the command (non-blocking)
stdin, stdout, stderr = ssh.exec_command(cmd)
tag_of_rancher_version_after_upgrade = \
stdout.readlines()[0].strip("\n")
logger.info("tag_of_rancher_version_after_upgrade is: %s",
tag_of_rancher_version_after_upgrade)
cmd = "sudo docker ps | awk ' NR>1 {print $8}' "
logger.info("command being executed %s:", cmd)
# Send the command (non-blocking)
stdin, stdout, stderr = ssh.exec_command(cmd)
state_of_rancher_server_container_after_upgrade = \
stdout.readlines()[0].strip("\n")
logger.info("state_of_rancher_server_container_after_upgrade is: %s",
state_of_rancher_server_container_after_upgrade)
time.sleep(90)
if tag_of_rancher_version_after_upgrade == target and \
state_of_rancher_server_container_after_upgrade == "Up":
server = 'http://' + servernode + ":8080"
if requests.get(server).status_code == 200:
logger.info(
"UPGRADE RANCHER SERVER TO TARGET COMPLETE AND SUCCESSFUL")
except:
logger.info("Execution of cmd %s failed", cmd)
ssh.close()
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import
import six
from hashlib import sha1
from sentry.utils.compat.mock import patch
from uuid import uuid4
from sentry.models import (
add_group_to_inbox,
Activity,
Commit,
CommitAuthor,
Group,
GroupAssignee,
GroupInbox,
GroupInboxReason,
GroupLink,
GroupStatus,
GroupSubscription,
OrganizationMember,
Release,
Repository,
UserEmail,
UserOption,
)
from sentry.testutils import TestCase
class ResolveGroupResolutionsTest(TestCase):
@patch("sentry.tasks.clear_expired_resolutions.clear_expired_resolutions.delay")
def test_simple(self, mock_delay):
release = Release.objects.create(version="a", organization_id=self.project.organization_id)
release.add_project(self.project)
mock_delay.assert_called_once_with(release_id=release.id)
class ResolvedInCommitTest(TestCase):
def assertResolvedFromCommit(self, group, commit):
assert GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
assert Group.objects.filter(
id=group.id, status=GroupStatus.RESOLVED, resolved_at__isnull=False
).exists()
assert not GroupInbox.objects.filter(group=group).exists()
def assertNotResolvedFromCommit(self, group, commit):
assert not GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
assert not Group.objects.filter(id=group.id, status=GroupStatus.RESOLVED).exists()
assert GroupInbox.objects.filter(group=group).exists()
# TODO(dcramer): pull out short ID matching and expand regexp tests
def test_simple_no_author(self):
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=u"Foo Biz\n\nFixes {}".format(group.qualified_short_id),
)
self.assertResolvedFromCommit(group, commit)
def test_updating_commit(self):
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
)
self.assertNotResolvedFromCommit(group, commit)
commit.message = u"Foo Biz\n\nFixes {}".format(group.qualified_short_id)
commit.save()
self.assertResolvedFromCommit(group, commit)
def test_updating_commit_with_existing_grouplink(self):
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=u"Foo Biz\n\nFixes {}".format(group.qualified_short_id),
)
self.assertResolvedFromCommit(group, commit)
commit.message = u"Foo Bar Biz\n\nFixes {}".format(group.qualified_short_id)
commit.save()
self.assertResolvedFromCommit(group, commit)
def test_removes_group_link_when_message_changes(self):
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=u"Foo Biz\n\nFixes {}".format(group.qualified_short_id),
)
self.assertResolvedFromCommit(group, commit)
commit.message = "no groups here"
commit.save()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
self.assertNotResolvedFromCommit(group, commit)
def test_no_matching_group(self):
repo = Repository.objects.create(name="example", organization_id=self.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=self.organization.id,
message=u"Foo Biz\n\nFixes {}-12F".format(self.project.slug.upper()),
)
assert not GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
def test_matching_author_with_assignment(self):
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
user = self.create_user(name="Foo Bar", email="[email protected]", is_active=True)
email = UserEmail.get_primary_email(user=user)
email.is_verified = True
email.save()
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
OrganizationMember.objects.create(organization=group.project.organization, user=user)
UserOption.objects.set_value(user=user, key="self_assign_issue", value="1")
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
organization_id=group.organization.id,
repository_id=repo.id,
message=u"Foo Biz\n\nFixes {}".format(group.qualified_short_id),
author=CommitAuthor.objects.create(
organization_id=group.organization.id, name=user.name, email=user.email
),
)
self.assertResolvedFromCommit(group, commit)
assert GroupAssignee.objects.filter(group=group, user=user).exists()
assert Activity.objects.filter(
project=group.project, group=group, type=Activity.ASSIGNED, user=user
)[0].data == {
"assignee": six.text_type(user.id),
"assigneeEmail": user.email,
"assigneeType": "user",
}
assert GroupSubscription.objects.filter(group=group, user=user).exists()
def test_matching_author_without_assignment(self):
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
user = self.create_user(name="Foo Bar", email="[email protected]", is_active=True)
email = UserEmail.get_primary_email(user=user)
email.is_verified = True
email.save()
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
OrganizationMember.objects.create(organization=group.project.organization, user=user)
UserOption.objects.set_value(user=user, key="self_assign_issue", value="0")
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
organization_id=group.organization.id,
repository_id=repo.id,
message=u"Foo Biz\n\nFixes {}".format(group.qualified_short_id),
author=CommitAuthor.objects.create(
organization_id=group.organization.id, name=user.name, email=user.email
),
)
self.assertResolvedFromCommit(group, commit)
assert not Activity.objects.filter(
project=group.project, group=group, type=Activity.ASSIGNED, user=user
).exists()
assert GroupSubscription.objects.filter(group=group, user=user).exists()
|
|
"""Collection for ExperimentSet and ExperimentSetReplicate."""
from pyramid.threadlocal import get_current_request
from pyramid.view import view_config
from snovault import (
calculated_property,
collection,
load_schema,
AfterModified,
BeforeModified
)
from snovault.calculated import calculate_properties
from snovault.validators import (
validate_item_content_post,
validate_item_content_put,
validate_item_content_patch,
validate_item_content_in_place,
no_validate_item_content_post,
no_validate_item_content_put,
no_validate_item_content_patch
)
from snovault.crud_views import (
collection_add,
item_edit,
)
from snovault.util import debug_log
from .base import (
Item,
lab_award_attribution_embed_list,
get_item_or_none
)
from .dependencies import DependencyEmbedder
import datetime
@collection(
name='experiment-sets',
unique_key='accession',
properties={
'title': 'Experiment Sets',
'description': 'Listing Experiment Sets',
})
class ExperimentSet(Item):
"""The experiment set class."""
item_type = 'experiment_set'
schema = load_schema('encoded:schemas/experiment_set.json')
name_key = "accession"
rev = {
'publications_using': ('Publication', 'exp_sets_used_in_pub'),
'publications_produced': ('Publication', 'exp_sets_prod_in_pub'),
}
aggregated_items = {
"badges": [
"messages",
"badge.commendation",
"badge.warning",
"badge.uuid",
"badge.@id",
"badge.badge_icon",
"badge.description"
]
}
embedded_list = Item.embedded_list + lab_award_attribution_embed_list + [
# Badge linkTo
"badges.badge.title",
"badges.badge.commendation",
"badges.badge.warning",
"badges.badge.badge_classification",
"badges.badge.description",
"badges.badge.badge_icon",
"badges.messages",
# Publication linkTo
"produced_in_pub.ID",
"produced_in_pub.title",
"produced_in_pub.url",
"produced_in_pub.abstract",
"produced_in_pub.journal",
"produced_in_pub.authors",
"produced_in_pub.short_attribution",
"produced_in_pub.date_published",
# Publication linkTo
"publications_of_set.ID",
"publications_of_set.title",
"publications_of_set.abstract",
"publications_of_set.journal",
"publications_of_set.authors",
"publications_of_set.date_published",
# Experiment linkTo
"experiments_in_set.@type",
"experiments_in_set.accession",
"experiments_in_set.status",
"experiments_in_set.dbxrefs",
"experiments_in_set.external_references.*",
# ExperimentType linkTo
"experiments_in_set.experiment_type.title",
"experiments_in_set.experiment_type.display_title",
"experiments_in_set.experiment_type.assay_classification",
"experiments_in_set.experiment_type.assay_subclassification",
"experiments_in_set.experiment_type.assay_subclass_short",
"experiments_in_set.experiment_type.experiment_category",
"experiments_in_set.experiment_type.other_tags",
# object field
"experiments_in_set.experiment_categorizer.field",
"experiments_in_set.experiment_categorizer.value",
"experiments_in_set.experiment_categorizer.combined",
# Badges linkTo
"experiments_in_set.badges.badge.title",
"experiments_in_set.badges.badge.commendation",
"experiments_in_set.badges.badge.warning",
"experiments_in_set.badges.badge.badge_classification",
"experiments_in_set.badges.badge.badge_icon",
"experiments_in_set.badges.badge.description",
"experiments_in_set.badges.messages",
# Biosample linkTo
"experiments_in_set.biosample.accession",
"experiments_in_set.biosample.treatments_summary", # calc prop covered by display title
"experiments_in_set.biosample.modifications_summary", # calc prop covered by display_title
"experiments_in_set.biosample.biosource_summary", # requires additional embedding
"experiments_in_set.biosample.biosample_type",
"experiments_in_set.biosample.biosample_category",
"experiments_in_set.biosample.cell_culture_details.in_vitro_differentiated", # needed for biosource_summary calc prop
# XXX: this field needs to be refactored to work with invalidation scope -Will
"experiments_in_set.biosample.tissue_organ_info.organ_system",
"experiments_in_set.biosample.tissue_organ_info.tissue_source",
# Biosource linkTo
"experiments_in_set.biosample.biosource.biosource_type",
"experiments_in_set.biosample.biosource.cell_line_tier",
"experiments_in_set.biosample.biosource.override_biosource_name",
"experiments_in_set.biosample.biosource.override_organism_name", # do we need this?
# OntologyTerm linkTo
"experiments_in_set.biosample.biosource.cell_line.term_id",
"experiments_in_set.biosample.biosource.cell_line.term_name",
"experiments_in_set.biosample.biosource.cell_line.preferred_name",
"experiments_in_set.biosample.biosource.cell_line.slim_terms",
"experiments_in_set.biosample.biosource.cell_line.synonyms",
# OntologyTerm linkTo
"experiments_in_set.biosample.biosource.tissue.term_id",
"experiments_in_set.biosample.biosource.tissue.term_name",
"experiments_in_set.biosample.biosource.tissue.preferred_name",
"experiments_in_set.biosample.biosource.tissue.slim_terms",
"experiments_in_set.biosample.biosource.tissue.synonyms",
# Organism linkTo
"experiments_in_set.biosample.biosource.organism.name", # calc prop
"experiments_in_set.biosample.biosource.organism.scientific_name",
# OntologyTerm linkTo
"experiments_in_set.biosample.cell_culture_details.tissue.preferred_name",
"experiments_in_set.biosample.cell_culture_details.tissue.term_id",
"experiments_in_set.biosample.cell_culture_details.tissue.term_name",
"experiments_in_set.biosample.cell_culture_details.tissue.slim_terms",
"experiments_in_set.biosample.cell_culture_details.tissue.synonyms",
# Modification linkTo
"experiments_in_set.biosample.modifications.modification_name", # calc prop
"experiments_in_set.biosample.modifications.modification_type",
"experiments_in_set.biosample.modifications.genomic_change",
"experiments_in_set.biosample.modifications.override_modification_name",
# BioFeature linkTo
"experiments_in_set.biosample.modifications.target_of_mod.feature_type",
"experiments_in_set.biosample.modifications.target_of_mod.preferred_label",
"experiments_in_set.biosample.modifications.target_of_mod.cellular_structure",
"experiments_in_set.biosample.modifications.target_of_mod.organism_name",
"experiments_in_set.biosample.modifications.target_of_mod.relevant_genes",
"experiments_in_set.biosample.modifications.target_of_mod.feature_mods",
"experiments_in_set.biosample.modifications.target_of_mod.genome_location",
# Treatment linkTo
"experiments_in_set.biosample.treatments.treatment_type",
"experiments_in_set.biosample.treatments.description",
"experiments_in_set.biosample.treatments.chemical",
"experiments_in_set.biosample.treatments.biological_agent",
"experiments_in_set.biosample.treatments.duration",
"experiments_in_set.biosample.treatments.duration_units",
"experiments_in_set.biosample.treatments.concentration",
"experiments_in_set.biosample.treatments.concentration_units",
"experiments_in_set.biosample.treatments.temperature",
# Construct linkTo
"experiments_in_set.biosample.treatments.constructs.name",
# Badges linkTo
"experiments_in_set.biosample.badges.badge.title",
"experiments_in_set.biosample.badges.badge.commendation",
"experiments_in_set.biosample.badges.badge.warning",
"experiments_in_set.biosample.badges.badge.badge_classification",
"experiments_in_set.biosample.badges.badge.badge_icon",
"experiments_in_set.biosample.badges.badge.description",
"experiments_in_set.biosample.badges.messages",
# Enzyme linkTo
"experiments_in_set.digestion_enzyme.name",
# File linkTo
"experiments_in_set.filesets.files_in_set.accession",
# Last modified
"experiments_in_set.last_modified.date_modified",
# Files - For common embeds (href, file_format, etc) we could programatically get rid of a bunch of similar lines - e.g.:
# for f in ['href', 'accession', 'file_size, ...]:
# ExperimentSet.embedded_list.append("experiments_in_set.files." + f)
# ExperimentSet.embedded_list.append("experiments_in_set.processed_files." + f) ...
# File linkTo
"experiments_in_set.files",
"experiments_in_set.files.href",
"experiments_in_set.files.accession",
"experiments_in_set.files.uuid",
"experiments_in_set.files.file_size",
"experiments_in_set.files.upload_key",
"experiments_in_set.files.md5sum",
"experiments_in_set.files.file_type",
"experiments_in_set.files.file_type_detailed",
"experiments_in_set.files.file_classification",
"experiments_in_set.files.paired_end",
"experiments_in_set.files.status",
"experiments_in_set.files.notes_to_tsv",
"experiments_in_set.files.dbxrefs",
"experiments_in_set.files.external_references.*",
"experiments_in_set.files.open_data_url",
"experiments_in_set.files.contributing_labs.display_title",
"experiments_in_set.files.lab.display_title",
"experiments_in_set.files.track_and_facet_info.*",
# MicroscopeConfiguration linkTo
"experiments_in_set.files.microscope_configuration.title",
"experiments_in_set.files.microscope_configuration.microscope.Name",
# FileFormat linkTo
"experiments_in_set.files.file_format.file_format",
# File linkTo
"experiments_in_set.files.extra_files",
"experiments_in_set.files.extra_files.href",
"experiments_in_set.files.extra_files.file_size",
"experiments_in_set.files.extra_files.md5sum",
"experiments_in_set.files.extra_files.use_for",
# FileFormat linkTo
"experiments_in_set.files.extra_files.file_format.file_format",
# QualityMetric linkTo
"experiments_in_set.files.quality_metric.Total Sequences",
"experiments_in_set.files.quality_metric.Sequence length",
"experiments_in_set.files.quality_metric.url",
"experiments_in_set.files.quality_metric.overall_quality_status",
"experiments_in_set.files.quality_metric.quality_metric_summary.*", # This may not yet be enabled on raw files.
# Badge linkTo
"experiments_in_set.files.badges.badge.title",
"experiments_in_set.files.badges.badge.commendation",
"experiments_in_set.files.badges.badge.warning",
"experiments_in_set.files.badges.badge.badge_classification",
"experiments_in_set.files.badges.badge.badge_icon",
"experiments_in_set.files.badges.badge.description",
"experiments_in_set.files.badges.messages",
# Lab linkTos
"experiments_in_set.files.contributing_labs.name",
"experiments_in_set.files.lab.name",
"processed_files.lab.name",
"processed_files.contributing_labs.name",
"experiments_in_set.files.related_files.relationship_type",
# File linkTo
"experiments_in_set.files.related_files.file.accession",
"experiments_in_set.files.related_files.file.paired_end",
"experiments_in_set.files.related_files.file.file_type",
# ProcessedFile linkTo
"processed_files.href",
"processed_files.accession",
"processed_files.uuid",
"processed_files.file_size",
"processed_files.upload_key",
"processed_files.file_format",
"processed_files.file_classification",
"processed_files.file_type",
"processed_files.file_type_detailed",
"processed_files.status",
"processed_files.external_references.*",
"processed_files.md5sum",
"processed_files.higlass_uid",
"processed_files.genome_assembly",
"processed_files.notes_to_tsv",
# File linkTo
"processed_files.extra_files.href",
"processed_files.extra_files.file_size",
"processed_files.extra_files.md5sum",
"processed_files.extra_files.use_for",
# FileFormat linkTo
"processed_files.extra_files.file_format.file_format",
"processed_files.last_modified.date_modified",
# StaticSection linkTo
"processed_files.static_content.location",
"processed_files.static_content.description",
"processed_files.static_content.content.@type",
# "processed_files.quality_metric.Total reads",
# "processed_files.quality_metric.Total Sequences",
# "processed_files.quality_metric.Sequence length",
"processed_files.quality_metric.url",
"processed_files.quality_metric.overall_quality_status",
"processed_files.quality_metric.quality_metric_summary.*",
"processed_files.quality_metric.Total reads",
"processed_files.quality_metric.qc_list.value.Total reads",
"processed_files.quality_metric.quality_metric_summary.*",
"processed_files.notes_to_tsv",
"processed_files.open_data_url",
"processed_files.track_and_facet_info.*",
# FileProcessed linkTo
"experiments_in_set.processed_files.href",
"experiments_in_set.processed_files.accession",
"experiments_in_set.processed_files.uuid",
"experiments_in_set.processed_files.file_size",
"experiments_in_set.processed_files.upload_key",
"experiments_in_set.processed_files.file_format",
"experiments_in_set.processed_files.file_classification",
"experiments_in_set.processed_files.file_type",
"experiments_in_set.processed_files.file_type_detailed",
"experiments_in_set.processed_files.status",
"experiments_in_set.processed_files.external_references.*",
"experiments_in_set.processed_files.md5sum",
"experiments_in_set.processed_files.higlass_uid",
"experiments_in_set.processed_files.genome_assembly",
"experiments_in_set.processed_files.notes_to_tsv",
# File linkTo
"experiments_in_set.processed_files.extra_files.href",
"experiments_in_set.processed_files.extra_files.file_size",
"experiments_in_set.processed_files.extra_files.md5sum",
"experiments_in_set.processed_files.extra_files.use_for",
# FileFormat linkTo
"experiments_in_set.processed_files.extra_files.file_format.file_format",
# QualityMetric linkTo
"experiments_in_set.processed_files.quality_metric.url",
"experiments_in_set.processed_files.quality_metric.overall_quality_status",
"experiments_in_set.processed_files.quality_metric.quality_metric_summary.*",
"experiments_in_set.processed_files.quality_metric.Total reads",
"experiments_in_set.processed_files.quality_metric.qc_list.value.Total reads",
# File linkTo
"experiments_in_set.processed_files.related_files.relationship_type",
"experiments_in_set.processed_files.related_files.file.accession",
"experiments_in_set.processed_files.related_files.file.file_type",
# StaticSection linkTo
"experiments_in_set.processed_files.static_content.location",
"experiments_in_set.processed_files.static_content.description",
"experiments_in_set.processed_files.static_content.content.@type", # Should only pull in @id, uuid, & display_title
"experiments_in_set.processed_files.last_modified.date_modified",
"experiments_in_set.processed_files.contributing_labs.name",
"experiments_in_set.processed_files.lab.name",
"experiments_in_set.processed_files.notes_to_tsv",
"experiments_in_set.processed_files.open_data_url",
"experiments_in_set.processed_files.contributing_labs.display_title",
"experiments_in_set.processed_files.lab.display_title",
"experiments_in_set.processed_files.track_and_facet_info.*",
"other_processed_files.files.accession",
"other_processed_files.files.file_type_detailed",
"other_processed_files.files.file_format",
"other_processed_files.files.file_size",
"other_processed_files.files.higlass_uid",
"other_processed_files.files.genome_assembly",
"other_processed_files.files.href",
"other_processed_files.files.status",
"other_processed_files.files.md5sum",
"other_processed_files.files.open_data_url",
"other_processed_files.files.contributing_labs.display_title",
"other_processed_files.files.lab.display_title",
"other_processed_files.files.track_and_facet_info.*",
"other_processed_files.files.last_modified.date_modified",
"other_processed_files.files.quality_metric.url",
"other_processed_files.files.quality_metric.overall_quality_status",
"other_processed_files.files.quality_metric.quality_metric_summary.*",
"other_processed_files.files.notes_to_tsv",
# Lab linkTo
"other_processed_files.files.contributing_labs.name",
"other_processed_files.files.lab.name",
# higlass view config linkTO
"other_processed_files.higlass_view_config.description",
"other_processed_files.higlass_view_config.last_modified.date_modified",
"experiments_in_set.other_processed_files.files.href",
"experiments_in_set.other_processed_files.title",
"experiments_in_set.other_processed_files.description",
"experiments_in_set.other_processed_files.type",
"experiments_in_set.other_processed_files.files.accession",
"experiments_in_set.other_processed_files.files.file_type_detailed",
"experiments_in_set.other_processed_files.files.file_size",
"experiments_in_set.other_processed_files.files.higlass_uid",
"experiments_in_set.other_processed_files.files.genome_assembly",
"experiments_in_set.other_processed_files.files.status",
"experiments_in_set.other_processed_files.files.md5sum",
"experiments_in_set.other_processed_files.files.last_modified.date_modified",
"experiments_in_set.other_processed_files.files.quality_metric.url",
"experiments_in_set.other_processed_files.files.quality_metric.overall_quality_status",
"experiments_in_set.other_processed_files.files.quality_metric.quality_metric_summary.*",
"experiments_in_set.other_processed_files.files.notes_to_tsv",
"experiments_in_set.other_processed_files.files.open_data_url",
"experiments_in_set.other_processed_files.files.contributing_labs.display_title",
"experiments_in_set.other_processed_files.files.lab.display_title",
"experiments_in_set.other_processed_files.files.track_and_facet_info.*",
# FileFormat linkTo
"experiments_in_set.other_processed_files.files.file_format.file_format",
# Lab linkTo
"experiments_in_set.other_processed_files.files.contributing_labs.name",
"experiments_in_set.other_processed_files.files.lab.name",
# File linkTo
"experiments_in_set.reference_files.accession",
"experiments_in_set.reference_files.file_classification",
"experiments_in_set.reference_files.file_type_detailed",
"experiments_in_set.reference_files.file_type",
"experiments_in_set.reference_files.file_size",
"experiments_in_set.reference_files.href",
"experiments_in_set.reference_files.status",
"experiments_in_set.reference_files.md5sum",
"experiments_in_set.reference_files.lab.name",
"experiments_in_set.reference_files.contributing_labs.name",
# FileFormat linkTo
"experiments_in_set.reference_files.file_format.file_format",
'sample_image.caption',
'sample_image.microscopy_file.accession',
'sample_image.microscopy_file.omerolink',
'sample_image.attachment.href',
'sample_image.attachment.type',
'sample_image.attachment.md5sum',
'sample_image.attachment.download',
'sample_image.attachment.width',
'sample_image.attachment.height'
]
@calculated_property(schema={
"title": "Produced in Publication",
"description": "The Publication in which this Experiment Set was produced.",
"type": "string",
"linkTo": "Publication"
})
def produced_in_pub(self, request):
pub_paths = self.rev_link_atids(request, 'publications_produced')
pubs = [request.embed('/', path, '@@object') for path in pub_paths]
if pubs:
return sorted(pubs, key=lambda pub: pub.get('date_released', pub['date_created']),
reverse=True)[0].get('@id')
@calculated_property(schema={
"title": "Publications Using",
"description": "Publications using this Experiment Set",
"type": "array",
"items": {
"title": "Publication",
"type": "string",
"linkTo": "Publication"
}
})
def pubs_using(self, request):
return self.rev_link_atids(request, 'publications_using')
@calculated_property(schema={
"title": "Publications",
"description": "Publications associated with this Experiment Set.",
"type": "array",
"items": {
"title": "Publication",
"type": "string",
"linkTo": "Publication"
}
})
def publications_of_set(self, request):
pubs_produced = self.rev_link_atids(request, 'publications_produced')
pubs_using = self.rev_link_atids(request, 'publications_using')
return list(set(pubs_produced + pubs_using))
@calculated_property(schema={
"title": "Number of Experiments",
"description": "The number of Experiments in this Experiment Set.",
"type": "integer"
})
def number_of_experiments(self, request, experiments_in_set=None):
if experiments_in_set:
return len(experiments_in_set)
def _build_experiment_set_replicate_embedded_list():
""" Helper function intended to be used to create the embedded list for Replicate Experiment Sets.
All types should implement a function like this going forward.
"""
imaging_path_embeds = DependencyEmbedder.embed_for_type(
base_path='imaging_paths.path',
t='imaging_path',
additional_embeds=['imaging_rounds', 'experiment_type.title'])
imaging_path_target_embeds = DependencyEmbedder.embed_defaults_for_type(
base_path='imaging_paths.path.target',
t='bio_feature')
return (
ExperimentSet.embedded_list + imaging_path_embeds + imaging_path_target_embeds + [
'replicate_exps.replicate_exp.accession',
]
)
@collection(
name='experiment-set-replicates',
unique_key='accession',
properties={
'title': 'Replicate Experiment Sets',
'description': 'Experiment set covering biological and technical experiments',
})
class ExperimentSetReplicate(ExperimentSet):
"""The experiment set class for replicate experiments."""
base_types = ['ExperimentSet'] + Item.base_types
item_type = 'experiment_set_replicate'
schema = load_schema('encoded:schemas/experiment_set_replicate.json')
name_key = "accession"
embedded_list = _build_experiment_set_replicate_embedded_list()
def _update(self, properties, sheets=None):
all_experiments = [exp['replicate_exp'] for exp in properties.get('replicate_exps', [])]
properties['experiments_in_set'] = all_experiments
super(ExperimentSetReplicate, self)._update(properties, sheets)
@calculated_property(schema={
"title": "Imaging Paths",
"type": "array",
"items": {
"title": "Imaging path",
"type": "object",
"properties": {
"path": {
"title": "Imaging Path",
"type": "string",
"linkTo": "ImagingPath"
},
"channel": {
"title": "Imaging channnel",
"description": "channel info, ie. ch01, ch02...",
"type": "string",
"pattern": "^(ch\\d\\d)$"
}
}
}
})
def imaging_paths(self, request, experiments_in_set=None):
if not experiments_in_set:
return None
# We presume all experiments in set have the exact same imaging paths.
# Thus we grab from 1st experiment. If not the case, this is a data issue.
# We should have a foursight check to assert this perhaps?
first_experiment_id = experiments_in_set[0] # replicate_exps[0]['replicate_exp']
if '/experiments-mic/' not in first_experiment_id:
# We only need to check Microscopy Experiments
return None
first_experiment_obj = get_item_or_none(request, first_experiment_id, frame='raw')
if not first_experiment_obj: # Not yet in DB?
return None
return first_experiment_obj.get('imaging_paths')
class Collection(Item.Collection):
pass
def validate_experiment_set_replicate_experiments(context, request):
'''
Validates that each replicate_exps.replicate_exp in context (ExperimentSetReplicate Item)
is unique within the ExperimentSetReplicate.
'''
data = request.json
replicate_exp_objects = data.get('replicate_exps', [])
have_seen_exps = set()
any_failures = False
for replicate_idx, replicate_exp_object in enumerate(replicate_exp_objects):
experiment = replicate_exp_object.get('replicate_exp')
if experiment in have_seen_exps:
request.errors.add(
'body', 'ExperimentSet: non-unique exps',
'Duplicate experiment "' + experiment + '" defined in replicate_exps[' + str(replicate_idx) + ']'
)
any_failures = True
continue
have_seen_exps.add(experiment)
if not any_failures:
request.validated.update({})
@view_config(context=ExperimentSetReplicate.Collection, permission='add', request_method='POST',
validators=[validate_item_content_post, validate_experiment_set_replicate_experiments])
@view_config(context=ExperimentSetReplicate.Collection, permission='add_unvalidated',
request_method='POST', validators=[no_validate_item_content_post],
request_param=['validate=false'])
@debug_log
def experiment_set_replicate_add(context, request, render=None):
return collection_add(context, request, render)
@view_config(context=ExperimentSetReplicate, permission='edit', request_method='PUT',
validators=[validate_item_content_put, validate_experiment_set_replicate_experiments])
@view_config(context=ExperimentSetReplicate, permission='edit', request_method='PATCH',
validators=[validate_item_content_patch, validate_experiment_set_replicate_experiments])
@view_config(context=ExperimentSetReplicate, permission='edit_unvalidated', request_method='PUT',
validators=[no_validate_item_content_put],
request_param=['validate=false'])
@view_config(context=ExperimentSetReplicate, permission='edit_unvalidated', request_method='PATCH',
validators=[no_validate_item_content_patch],
request_param=['validate=false'])
@view_config(context=ExperimentSetReplicate, permission='index', request_method='GET',
validators=[validate_item_content_in_place, validate_experiment_set_replicate_experiments],
request_param=['check_only=true'])
@debug_log
def experiment_set_replicate_edit(context, request, render=None):
return item_edit(context, request, render)
|
|
from __future__ import print_function
import gc
import sys
import time
import unittest
from nose.plugins.skip import SkipTest
import numpy
from six import itervalues
from theano import function
from theano.gof import vm
from theano.gof import OpWiseCLinker
from six.moves import xrange
from theano.compile import Mode
from theano import tensor
from theano.ifelse import ifelse
import theano
class TestCallbacks(unittest.TestCase):
"""
Test the VM_Linker's callback argument, which can be useful for debugging.
"""
def setUp(self):
self.n_callbacks = {}
def callback(self, node, thunk, storage_map, compute_map):
key = node.op.__class__.__name__
self.n_callbacks.setdefault(key, 0)
self.n_callbacks[key] += 1
def test_callback(self):
a, b, c = tensor.scalars('abc')
f = function([a, b, c], (a + b) + c,
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(callback=self.callback)))
f(1, 2, 3)
assert sum(self.n_callbacks.values()) == len(f.maker.fgraph.toposort())
f(1, 2, 3)
assert (sum(self.n_callbacks.values()) ==
len(f.maker.fgraph.toposort()) * 2)
def test_callback_with_ifelse(self):
a, b, c = tensor.scalars('abc')
f = function([a, b, c], ifelse(a, 2 * b, 2 * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(callback=self.callback)))
f(1, 2, 3)
assert self.n_callbacks['IfElse'] == 2
def test_c_thunks():
a = tensor.scalars('a')
b, c = tensor.vectors('bc')
cases = [False]
if theano.config.cxx:
cases.append(True)
for c_thunks in cases:
f = function([a, b, c], ifelse(a, a * b, b * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(c_thunks=c_thunks,
use_cloop=False)))
f(1, [2], [3, 2])
from nose.tools import assert_raises
assert_raises(ValueError, f, 0, [2], [3, 4])
assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
def test_speed():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = (z + z)
return z
def numpy_version(x, depth):
z = x
for d in xrange(depth):
z = (z + z)
return z
def time_numpy():
steps_a = 5
steps_b = 100
x = numpy.asarray([2.0, 3.0], dtype=theano.config.floatX)
numpy_version(x, steps_a)
t0 = time.time()
# print numpy_version(x, steps_a)
t1 = time.time()
t2 = time.time()
# print numpy_version(x, steps_b)
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print("%s takes %f s/Kop" % (
'numpy',
(1000 * (t_b - t_a) / (steps_b - steps_a))))
def time_linker(name, linker):
steps_a = 5
steps_b = 100
x = tensor.vector()
a = build_graph(x, steps_a)
b = build_graph(x, steps_b)
f_a = function([x], a,
mode=Mode(optimizer=None, linker=linker()))
f_b = function([x], b,
mode=Mode(optimizer=None, linker=linker()))
f_a([2.0, 3.0])
t0 = time.time()
f_a([2.0, 3.0])
t1 = time.time()
f_b([2.0, 3.0])
t2 = time.time()
f_b([2.0, 3.0])
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print("%s takes %f s/Kop" % (
name,
(1000 * (t_b - t_a) / (steps_b - steps_a))))
time_linker('c|py', OpWiseCLinker)
time_linker('vmLinker', vm.VM_Linker)
time_linker('vmLinker_nogc', lambda: vm.VM_Linker(allow_gc=False))
if theano.config.cxx:
time_linker('vmLinker_CLOOP', lambda: vm.VM_Linker(allow_gc=False,
use_cloop=True))
time_numpy()
def test_speed_lazy():
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = ifelse(z[0] > 0, -z, z)
return z
def time_linker(name, linker):
steps_a = 10
steps_b = 100
x = tensor.vector()
a = build_graph(x, steps_a)
b = build_graph(x, steps_b)
f_a = function([x], a,
mode=Mode(optimizer=None,
linker=linker()))
f_b = function([x], b,
mode=Mode(optimizer=None,
linker=linker()))
f_a([2.0])
t0 = time.time()
f_a([2.0])
t1 = time.time()
f_b([2.0])
t2 = time.time()
f_b([2.0])
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print("%s takes %f s/Kop" % (
name,
(1000 * (t_b - t_a) / (steps_b - steps_a))))
time_linker('vmLinker', vm.VM_Linker)
time_linker('vmLinker_nogc', lambda: vm.VM_Linker(allow_gc=False))
if theano.config.cxx:
time_linker('vmLinker_C', lambda: vm.VM_Linker(allow_gc=False,
use_cloop=True))
def test_allow_gc_cvm():
mode = theano.config.mode
if mode in ['DEBUG_MODE', 'DebugMode']:
mode = "FAST_RUN"
v = theano.tensor.vector()
f = theano.function([v], v + 1, mode=mode)
f([1])
n = list(f.maker.fgraph.apply_nodes)[0].outputs[0]
assert f.fn.storage_map[n][0] is None
assert f.fn.allow_gc is True
f.fn.allow_gc = False
assert f.fn.allow_gc is False
f([1])
assert f.fn.storage_map[n][0] is not None
f.fn.allow_gc = True
assert f.fn.allow_gc is True
f([1])
assert f.fn.storage_map[n][0] is None
run_memory_usage_tests = False
if run_memory_usage_tests:
# these are not normal unit tests, do not run them as part of standard
# suite. I ran them while looking at top, and stopped when memory usage
# was stable.
def test_leak2():
import theano.sandbox.cuda as cuda
for i in xrange(1000000):
n = numpy.asarray([2.3, 4.5], dtype='f')
c = sys.getrefcount(n)
a = cuda.CudaNdarray(n)
a.sum()
assert c == sys.getrefcount(n)
# This is to confuse flake8
a = a
del a
if not i % 1000:
print('.', end=' ')
print(gc.collect(), end=' ')
print(gc.collect())
sys.stdout.flush()
def test_no_leak_many_graphs():
# Verify no memory leaks when creating and deleting a lot of functions
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
for i in xrange(10000):
x = tensor.vector()
z = x
for d in range(10):
z = tensor.sin(-z + 1)
f = function([x], z, mode=Mode(optimizer=None, linker='cvm'))
if not i % 100:
print(gc.collect())
sys.stdout.flush()
gc.collect()
if 1:
f([2.0])
f([3.0])
f([4.0])
f([5.0])
def test_no_leak_many_call_lazy():
# Verify no memory leaks when calling a function a lot of times
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = ifelse(z.mean() > 0.5, -z, z)
return z
def time_linker(name, linker):
steps_a = 10
x = tensor.dvector()
a = build_graph(x, steps_a)
f_a = function([x], a,
mode=Mode(optimizer=None,
linker=linker()))
inp = numpy.random.rand(1000000)
for i in xrange(100):
f_a(inp)
if 0: # this doesn't seem to work, prints 0 for everything
import resource
pre = resource.getrusage(resource.RUSAGE_SELF)
post = resource.getrusage(resource.RUSAGE_SELF)
print(pre.ru_ixrss, post.ru_ixrss)
print(pre.ru_idrss, post.ru_idrss)
print(pre.ru_maxrss, post.ru_maxrss)
print(1)
time_linker('vmLinker_C',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
print(2)
time_linker('vmLinker',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
def test_no_leak_many_call_nonlazy():
# Verify no memory leaks when calling a function a lot of times
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak.
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = tensor.sin(-z + 1)
return z
def time_linker(name, linker):
steps_a = 10
x = tensor.dvector()
a = build_graph(x, steps_a)
f_a = function([x], a,
mode=Mode(optimizer=None,
linker=linker()))
inp = numpy.random.rand(1000000)
for i in xrange(500):
f_a(inp)
print(1)
time_linker('vmLinker_C',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
print(2)
time_linker('vmLinker',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
class RunOnce(theano.Op):
__props__ = ("nb_run",)
def __init__(self):
self.nb_run = 0
def make_node(self, x):
return theano.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
assert self.nb_run == 0
self.nb_run += 1
outputs[0][0] = inputs[0].copy()
def test_vm_gc():
"""This already caused a bug in the trunk of Theano.
The bug was introduced in the trunk on July 5th, 2012 and fixed on
July 30th.
"""
x = theano.tensor.vector()
p = RunOnce()(x)
mode = theano.Mode(linker=theano.gof.vm.VM_Linker(lazy=True))
f = theano.function([theano.In(x, mutable=True)], [p + 1, p + 2],
mode=mode)
f([1, 2, 3])
p = RunOnce()(x)
pp = p + p
f = theano.function([x], [pp + pp],
mode=mode)
f([1, 2, 3])
def test_reallocation():
x = tensor.scalar('x')
y = tensor.scalar('y')
z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
# The functinality is currently implement for non lazy and non c VM only.
for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
m = theano.compile.get_mode(theano.Mode(linker=l))
m = m.excluding('fusion', 'inplace')
f = theano.function([x, y], z, name="test_reduce_memory",
mode=m)
output = f(1, 2)
assert output
storage_map = f.fn.storage_map
def check_storage(storage_map):
from theano.tensor.var import TensorConstant
for i in storage_map:
if not isinstance(i, TensorConstant):
keys_copy = list(storage_map.keys())[:]
keys_copy.remove(i)
for o in keys_copy:
if (storage_map[i][0] and
storage_map[i][0] is storage_map[o][0]):
return [True, storage_map[o][0]]
return [False, None]
assert check_storage(storage_map)[0]
assert len(set(id(v) for v in
itervalues(storage_map))) < len(storage_map)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.autograph.core import ag_ctx as autograph_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
np_arrays = lazy_loader.LazyLoader(
"np_arrays", globals(),
"tensorflow.python.ops.numpy_ops.np_arrays")
@tf_export(v1=["map_fn"])
@deprecation.deprecated_args(None, "Use fn_output_signature instead", "dtype")
def map_fn(fn,
elems,
dtype=None,
parallel_iterations=None,
back_prop=True,
swap_memory=False,
infer_shape=True,
name=None,
fn_output_signature=None):
"""Transforms `elems` by applying `fn` to each element unstacked on axis 0.
See also `tf.scan`.
`map_fn` unstacks `elems` on axis 0 to obtain a sequence of elements;
calls `fn` to transform each element; and then stacks the transformed
values back together.
#### Mapping functions with single-Tensor inputs and outputs
If `elems` is a single tensor and `fn`'s signature is `tf.Tensor->tf.Tensor`,
then `map_fn(fn, elems)` is equivalent to
`tf.stack([fn(elem) for elem in tf.unstack(elems)])`. E.g.:
>>> tf.map_fn(fn=lambda t: tf.range(t, t + 3), elems=tf.constant([3, 5, 2]))
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
`map_fn(fn, elems).shape = [elems.shape[0]] + fn(elems[0]).shape`.
#### Mapping functions with multi-arity inputs and outputs
`map_fn` also supports functions with multi-arity inputs and outputs:
* If `elems` is a tuple (or nested structure) of tensors, then those tensors
must all have the same outer-dimension size (`num_elems`); and `fn` is
used to transform each tuple (or structure) of corresponding slices from
`elems`. E.g., if `elems` is a tuple `(t1, t2, t3)`, then `fn` is used to
transform each tuple of slices `(t1[i], t2[i], t3[i])`
(where `0 <= i < num_elems`).
* If `fn` returns a tuple (or nested structure) of tensors, then the
result is formed by stacking corresponding elements from those structures.
#### Specifying `fn`'s output signature
If `fn`'s input and output signatures are different, then the output
signature must be specified using `fn_output_signature`. (The input and
output signatures are differ if their structures, dtypes, or tensor types do
not match). E.g.:
>>> tf.map_fn(fn=tf.strings.length, # input & output have different dtypes
... elems=tf.constant(["hello", "moon"]),
... fn_output_signature=tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([5, 4], dtype=int32)>
>>> tf.map_fn(fn=tf.strings.join, # input & output have different structures
... elems=[tf.constant(['The', 'A']), tf.constant(['Dog', 'Cat'])],
... fn_output_signature=tf.string)
<tf.Tensor: shape=(2,), dtype=string,
numpy=array([b'TheDog', b'ACat'], dtype=object)>
`fn_output_signature` can be specified using any of the following:
* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)
* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)
* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)
* A (possibly nested) tuple, list, or dict containing the above types.
#### RaggedTensors
`map_fn` supports `tf.RaggedTensor` inputs and outputs. In particular:
* If `elems` is a `RaggedTensor`, then `fn` will be called with each
row of that ragged tensor.
* If `elems` has only one ragged dimension, then the values passed to
`fn` will be `tf.Tensor`s.
* If `elems` has multiple ragged dimensions, then the values passed to
`fn` will be `tf.RaggedTensor`s with one fewer ragged dimension.
* If the result of `map_fn` should be a `RaggedTensor`, then use a
`tf.RaggedTensorSpec` to specify `fn_output_signature`.
* If `fn` returns `tf.Tensor`s with varying sizes, then use a
`tf.RaggedTensorSpec` with `ragged_rank=0` to combine them into a
single ragged tensor (which will have ragged_rank=1).
* If `fn` returns `tf.RaggedTensor`s, then use a `tf.RaggedTensorSpec`
with the same `ragged_rank`.
>>> # Example: RaggedTensor input
>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> tf.map_fn(tf.reduce_sum, rt, fn_output_signature=tf.int32)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([6, 0, 9, 6], dtype=int32)>
>>> # Example: RaggedTensor output
>>> elems = tf.constant([3, 5, 0, 2])
>>> tf.map_fn(tf.range, elems,
... fn_output_signature=tf.RaggedTensorSpec(shape=[None],
... dtype=tf.int32))
<tf.RaggedTensor [[0, 1, 2], [0, 1, 2, 3, 4], [], [0, 1]]>
Note: `map_fn` should only be used if you need to map a function over the
*rows* of a `RaggedTensor`. If you wish to map a function over the
individual values, then you should use:
* `tf.ragged.map_flat_values(fn, rt)`
(if fn is expressible as TensorFlow ops)
* `rt.with_flat_values(map_fn(fn, rt.flat_values))`
(otherwise)
E.g.:
>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> tf.ragged.map_flat_values(lambda x: x + 2, rt)
<tf.RaggedTensor [[3, 4, 5], [], [6, 7], [8]]>
#### SparseTensors
`map_fn` supports `tf.sparse.SparseTensor` inputs and outputs. In particular:
* If `elems` is a `SparseTensor`, then `fn` will be called with each row
of that sparse tensor. In particular, the value passed to `fn` will be a
`tf.sparse.SparseTensor` with one fewer dimension than `elems`.
* If the result of `map_fn` should be a `SparseTensor`, then use a
`tf.SparseTensorSpec` to specify `fn_output_signature`. The individual
`SparseTensor`s returned by `fn` will be stacked into a single
`SparseTensor` with one more dimension.
>>> # Example: SparseTensor input
>>> st = tf.sparse.SparseTensor([[0, 0], [2, 0], [2, 1]], [2, 3, 4], [4, 4])
>>> tf.map_fn(tf.sparse.reduce_sum, st, fn_output_signature=tf.int32)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([2, 0, 7, 0], dtype=int32)>
>>> # Example: SparseTensor output
>>> tf.sparse.to_dense(
... tf.map_fn(tf.sparse.eye, tf.constant([2, 3]),
... fn_output_signature=tf.SparseTensorSpec(None, tf.float32)))
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]],
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]], dtype=float32)>
Note: `map_fn` should only be used if you need to map a function over the
*rows* of a `SparseTensor`. If you wish to map a function over the nonzero
values, then you should use:
* If the function is expressible as TensorFlow ops, use:
```python
tf.sparse.SparseTensor(st.indices, fn(st.values), st.dense_shape)
```
* Otherwise, use:
```python
tf.sparse.SparseTensor(st.indices, tf.map_fn(fn, st.values),
st.dense_shape)
```
#### `map_fn` vs. vectorized operations
`map_fn` will apply the operations used by `fn` to each element of `elems`,
resulting in `O(elems.shape[0])` total operations. This is somewhat
mitigated by the fact that `map_fn` can process elements in parallel.
However, a transform expressed using `map_fn` is still typically less
efficient than an equivalent transform expressed using vectorized operations.
`map_fn` should typically only be used if one of the following is true:
* It is difficult or expensive to express the desired transform with
vectorized operations.
* `fn` creates large intermediate values, so an equivalent vectorized
transform would take too much memory.
* Processing elements in parallel is more efficient than an equivalent
vectorized transform.
* Efficiency of the transform is not critical, and using `map_fn` is
more readable.
E.g., the example given above that maps `fn=lambda t: tf.range(t, t + 3)`
across `elems` could be rewritten more efficiently using vectorized ops:
>>> elems = tf.constant([3, 5, 2])
>>> tf.range(3) + tf.expand_dims(elems, 1)
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
In some cases, `tf.vectorized_map` can be used to automatically convert a
function to a vectorized eqivalent.
#### Eager execution
When executing eagerly, `map_fn` does not execute in parallel even if
`parallel_iterations` is set to a value > 1. You can still get the
performance benefits of running a function in parallel by using the
`tf.function` decorator:
>>> fn=lambda t: tf.range(t, t + 3)
>>> @tf.function
... def func(elems):
... return tf.map_fn(fn, elems, parallel_iterations=3)
>>> func(tf.constant([3, 5, 2]))
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
Note: if you use the `tf.function` decorator, any non-TensorFlow Python
code that you may have written in your function won't get executed. See
`tf.function` for more details. The recommendation would be to debug without
`tf.function` but switch to it to get performance benefits of running `map_fn`
in parallel.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`. Its output must have the
same structure as `fn_output_signature` if one is provided; otherwise it
must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unstacked along their first dimension. `fn` will be applied to the
nested sequence of the resulting slices. `elems` may include ragged and
sparse tensors. `elems` must consist of at least one tensor.
dtype: Deprecated: Equivalent to `fn_output_signature`.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel. When graph building, the default value is 10. While executing
eagerly, the default value is set to 1.
back_prop: (optional) False disables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
fn_output_signature: The output signature of `fn`. Must be specified if
`fn`'s input and output signatures are different (i.e., if their
structures, dtypes, or tensor types do not match).
`fn_output_signature` can be specified using any of the following:
* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)
* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)
* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)
* A (possibly nested) tuple, list, or dict containing the above types.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor stacks the
results of applying `fn` to tensors unstacked from `elems` along the first
dimension, from first to last. The result may include ragged and sparse
tensors.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `fn_output_signature` do not match.
ValueError: if the lengths of the output of `fn` and `fn_output_signature`
do not match, or if the `elems` does not contain any tensor.
Examples:
>>> elems = np.array([1, 2, 3, 4, 5, 6])
>>> tf.map_fn(lambda x: x * x, elems)
<tf.Tensor: shape=(6,), dtype=int64, numpy=array([ 1, 4, 9, 16, 25, 36])>
>>> elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
>>> tf.map_fn(lambda x: x[0] * x[1], elems, fn_output_signature=tf.int64)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([-1, 2, -3])>
>>> elems = np.array([1, 2, 3])
>>> tf.map_fn(lambda x: (x, -x), elems,
... fn_output_signature=(tf.int64, tf.int64))
(<tf.Tensor: shape=(3,), dtype=int64, numpy=array([1, 2, 3])>,
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([-1, -2, -3])>)
"""
# This function uses a `while_loop` to call `fn` on each value of the input
# tensor(s) (unstacked on dimension 0). The following sequence of variables
# are used to transform the input tensor(s) (`elems`) into the output
# tensor(s) (`result`):
#
# - Preparing and unstacking input values for the while_loop:
# - elems: The input tensor(s) to map_fn. May include composite tensors.
# - elems_flat: Flattened list of tensors from elems (using nest.flatten)
# May include composite tensors.
# - elems_batchable: Concatenation of "batchable tensor lists" for each
# tensor in elems_flat. This "boxes" composite tensors
# into sliceable tf.Tensor objects. For more info see:
# TensorSpec._to_batched_tensor_list
# - elems_batchable_ta: List of TensorArrays used to unstack each Tensor
# in elems_batchable into elems_value_batchable.
#
# - Calling `fn` on each unstacked value in the body of the while_loop:
# - elems_value_batchable: Single unstacked value from elems_batchable.
# - elems_value_flat: Single unstacked value from elems_flat,
# constructed from elems_value_batchable (using
# TensorSpec._from_tensor_list).
# - elems_value: Single unstacked value from elems (the input to fn).
# - result_value: Result of calling `fn(elems_value)`. May contain
# composite tensors.
# - result_value_flat: Flattened list of tensors from result_value.
# May contain composite tensors.
# - result_value_batchable: Concatenation of batchable tensor lists for
# each tensor in result_value_flat
# (using TensorSpec._to_tensor_list).
#
# - Collecting and stacking output values from the while_loop:
# - result_batchable_ta: List of TensorArrays used to stack each tensor
# ta result_value_batchable into result_batchable.
# - result_batchable: Stacked tensors from result_batchable_ta.
# - result_flat: Flat list of tensors for the result, constructed from
# results bactchable (using TensorSpec._from_tensor_list).
# - result: Structured result value packed from results flat
# (using nest.pack_sequence_as).
if fn_output_signature is None:
fn_output_signature = dtype
if not callable(fn):
raise TypeError("fn must be callable.")
in_graph_mode = not context.executing_eagerly()
# Set the default number of parallel_iterations depending on graph/eager mode.
if in_graph_mode and not parallel_iterations:
parallel_iterations = 10
elif not in_graph_mode and not parallel_iterations:
parallel_iterations = 1
elif not in_graph_mode and parallel_iterations > 1:
logging.log_first_n(
logging.WARN, "Setting parallel_iterations > 1 has no "
"effect when executing eagerly. Consider calling map_fn"
" with tf.function to execute fn in "
"parallel.", 1)
parallel_iterations = 1
# Flatten the input tensors, and get the TypeSpec for each one.
elems_flat = nest.flatten(elems)
# Check in case this is an empty list
if len(elems_flat) == 0:
raise ValueError(
"elems must be a Tensor or (possibly nested) sequence of Tensors. "
"Got {}, which does not contain any Tensors.".format(elems))
elems_flat_signature = [type_spec.type_spec_from_value(e) for e in elems_flat]
elems_unflatten = lambda x: nest.pack_sequence_as(elems, x)
# Flatten fn's output signature.
if fn_output_signature is None:
# If fn_output_signature was not specified, then assume that it matches the
# input signature.
result_flat_signature = [
_most_general_compatible_type(s)._unbatch() # pylint: disable=protected-access
for s in elems_flat_signature
]
result_unflatten = elems_unflatten
else:
result_flat_signature = [
_dtype_to_spec(d) for d in nest.flatten(fn_output_signature)
]
result_unflatten = lambda x: nest.pack_sequence_as(fn_output_signature, x)
with ops.name_scope(name, "map", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ops.convert_to_tensor_or_composite(t, name="elem") for t in elems_flat
]
# Check that inputs are not scalars.
first_elem = elems_flat[0]
if isinstance(first_elem, np_arrays.ndarray):
first_elem = first_elem.data
elems_static_shape = first_elem.shape
if elems_static_shape.ndims is not None and elems_static_shape.ndims < 1:
if len(elems_flat) == 1:
raise ValueError("elems must be a 1+ dimensional Tensor, not a scalar")
else:
raise ValueError(
"elements in elems must be 1+ dimensional Tensors, not scalars"
)
# Box any composite tensors into tensor lists.
elems_batchable = _elems_flat_to_batchable(elems_flat)
# Find the number of iterations, n. (may be known statically.)
n_static = tensor_shape.Dimension(
tensor_shape.dimension_value(
elems_batchable[0].get_shape().with_rank_at_least(1)[0]))
for tensor in elems_batchable[1:]:
n_static.merge_with(
tensor_shape.Dimension(
tensor_shape.dimension_value(
tensor.get_shape().with_rank_at_least(1)[0])))
n = n_static.value or array_ops.shape(elems_batchable[0])[0]
# Convert elems to tensor array.
# TODO(edloper): Should we set infer_shape=False for composite tensors?
elems_batchable_ta = [
tensor_array_ops.TensorArray(
dtype=t.dtype, size=n, dynamic_size=False, infer_shape=True)
for t in elems_batchable
]
# Unpack elements
elems_batchable_ta = [
ta.unstack(t) for (ta, t) in zip(elems_batchable_ta, elems_batchable)
]
i = constant_op.constant(0)
# Prepare result tensor array.
# TODO(edloper): Should we set infer_shape=False for composite tensors?
result_batchable_tensor_spec = (
_result_flat_signature_to_batchable_tensor_spec(result_flat_signature))
result_batchable_ta = []
for spec in result_batchable_tensor_spec:
result_batchable_ta.append(
tensor_array_ops.TensorArray(
dtype=spec.dtype, size=n, dynamic_size=False,
infer_shape=infer_shape, element_shape=spec.shape))
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if fn_output_signature and result_value structure don't match
ValueType: if fn_output_signature and result_value lengths don't match
"""
elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta]
elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable,
elems_flat_signature)
elems_value = elems_unflatten(elems_value_flat)
ag_ctx = autograph_ctx.control_status_ctx()
autographed_fn = autograph.tf_convert(fn, ag_ctx)
result_value = autographed_fn(elems_value)
nest.assert_same_structure(fn_output_signature or elems, result_value)
result_value_flat = nest.flatten(result_value)
result_value_batchable = _result_value_flat_to_batchable(
result_value_flat, result_flat_signature)
tas = [
ta.write(i, value) for (ta, value) in zip(tas, result_value_batchable)
]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n,
compute, (i, result_batchable_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
result_batchable = [r.stack() for r in r_a]
# Update each output tensor w/ static shape info about the outer dimension.
for r in result_batchable:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
result_flat = _result_batchable_to_flat(result_batchable,
result_flat_signature,
n_static)
result = result_unflatten(result_flat)
return result
def _dtype_to_spec(d):
if not isinstance(d, type_spec.TypeSpec):
d = tensor_spec.TensorSpec(None, d)
return d
def _most_general_compatible_type(spec):
"""Returns the most general TypeSpec compatible with `spec`."""
# TODO(edloper): Consider adding most_general_compatible_type to TypeSpec API
if isinstance(spec, tensor_spec.TensorSpec):
return tensor_spec.TensorSpec(None, spec.dtype)
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
# pylint: disable=protected-access
return ragged_tensor.RaggedTensorSpec(None, spec._dtype, spec._ragged_rank,
spec._row_splits_dtype)
elif isinstance(spec, sparse_tensor.SparseTensorSpec):
# pylint: disable=protected-access
return sparse_tensor.SparseTensorSpec(None, spec.dtype)
else:
return spec
def _result_flat_signature_to_batchable_tensor_spec(result_flat_signature):
"""Converts result_flat_signature -> result_batchable_tensor_specs."""
tensor_specs = []
for spec in result_flat_signature:
if not isinstance(spec, type_spec.BatchableTypeSpec):
raise TypeError("map_fn can not generate %s outputs" % (spec,))
tensor_specs.extend(spec._flat_tensor_specs) # pylint: disable=protected-access
return tensor_specs
def _elems_flat_to_batchable(elems_flat):
"""Converts elems_flat -> elems_batchable."""
elems_batchable = []
for elems_tensor in elems_flat:
spec = type_spec.type_spec_from_value(elems_tensor)
if not isinstance(spec, type_spec.BatchableTypeSpec):
raise TypeError("map_fn can not consume %s inputs: got %r" %
(spec, elems_tensor))
# pylint: disable=protected-access
elems_batchable.extend(spec._to_batched_tensor_list(elems_tensor))
return elems_batchable
def _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature):
"""Converts elems_value_batchable -> elems_value_flat."""
elems_value_flat = []
i = 0
for spec in elems_flat_signature:
# pylint: disable=protected-access
spec = spec._unbatch()
tensor_list = elems_value_batchable[i:i + len(spec._flat_tensor_specs)]
elems_value_flat.append(spec._from_compatible_tensor_list(tensor_list))
i += len(tensor_list)
assert i == len(elems_value_batchable)
return elems_value_flat
def _result_value_flat_to_batchable(result_value_flat, result_flat_signature):
"""Converts result_value_flat -> result_value_batchable."""
result_value_batchable = []
for (r_value, r_spec) in zip(result_value_flat, result_flat_signature):
if isinstance(r_spec, tensor_spec.TensorSpec):
result_value_batchable.append(r_value)
else:
if not r_spec.is_compatible_with(r_value):
raise ValueError(
"Error in map_fn:\n Expected `fn` to return a:\n %s\n"
" But it returned a:\n %s\n (value=%s)\n"
" To fix, update the `fn_output_signature` (or `dtype`) "
"argument to `map_fn`." %
(r_spec, type_spec.type_spec_from_value(r_value), r_value))
result_value_batchable.extend(r_spec._to_tensor_list(r_value)) # pylint: disable=protected-access
return result_value_batchable
def _result_batchable_to_flat(result_batchable, result_flat_signature,
batch_size):
"""Converts result_batchable -> result_flat."""
result_flat = []
i = 0
for spec in result_flat_signature:
# pylint: disable=protected-access
num_tensors = len(spec._flat_tensor_specs)
result_flat.append(
spec._batch(batch_size)._from_compatible_tensor_list(
result_batchable[i:i + num_tensors]))
i += num_tensors
assert i == len(result_batchable)
return result_flat
@tf_export("map_fn", v1=[])
@deprecation.deprecated_arg_values(
None,
"""back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))""",
warn_once=True,
back_prop=False)
@deprecation.deprecated_args(None, "Use fn_output_signature instead", "dtype")
def map_fn_v2(fn,
elems,
dtype=None,
parallel_iterations=None,
back_prop=True,
swap_memory=False,
infer_shape=True,
name=None,
fn_output_signature=None):
"""Transform `elems` by applying `fn` to each element unstacked on axis 0."""
if fn_output_signature is None:
fn_output_signature = dtype
return map_fn(
fn=fn,
elems=elems,
fn_output_signature=fn_output_signature,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
infer_shape=infer_shape,
name=name)
# Docstring for v2 is the same as v1, except that back_prop is deprecated.
map_fn_v2.__doc__ = re.sub(
r"( back_prop: \(optional\) )(.*)",
r"\1Deprecated: prefer using `tf.stop_gradient` instead. \2",
map_fn.__doc__)
assert "prefer using `tf.stop_gradient` instead" in map_fn_v2.__doc__
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPyLib: Quantitative Trading Python Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
import sys
import numpy as np
import pandas as pd
import pymysql
from pymysql.constants.CLIENT import MULTI_STATEMENTS
from ezibpy import ezIBpy
from ezibpy.utils import contract_expiry_from_symbol
from qtpylib import tools
from qtpylib.blotter import (
load_blotter_args, get_symbol_id,
mysql_insert_tick, mysql_insert_bar
)
_IB_HISTORY_DOWNLOADED = False
# =============================================
# check min, python version
if sys.version_info < (3, 4):
raise SystemError("QTPyLib requires Python version >= 3.4")
# =============================================
tools.createLogger(__name__) # .setLevel(logging.DEBUG)
# =============================================
def ibCallback(caller, msg, **kwargs):
global _IB_HISTORY_DOWNLOADED
if caller == "handleHistoricalData":
if kwargs["completed"]:
_IB_HISTORY_DOWNLOADED = True
# print(kwargs)
def get_data_ib(instrument, start, resolution="1 min",
blotter=None, output_path=None):
"""
Downloads historical data from Interactive Brokers
:Parameters:
instrument : mixed
IB contract tuple / string (same as that given to strategy)
start : str
Backtest start date (YYYY-MM-DD [HH:MM:SS[.MS])
:Optional:
resolution : str
1/5/15/30 secs, 1/2/3/5/15/30 min (default 1min), 1 hour, 1 day
blotter : str
Store MySQL server used by this Blotter (default is "auto detect")
output_path : str
Path to where the resulting CSV should be saved (optional)
:Returns:
data : pd.DataFrame
Pandas DataFrame in a QTPyLib-compatible format and timezone
"""
global _IB_HISTORY_DOWNLOADED
_IB_HISTORY_DOWNLOADED = False
# load blotter settings
blotter_args = load_blotter_args(
blotter, logger=logging.getLogger(__name__))
# create contract string (no need for connection)
ibConn = ezIBpy()
ibConn.ibCallback = ibCallback
if not ibConn.connected:
ibConn.connect(clientId=997,
port=int(blotter_args['ibport']),
host=str(blotter_args['ibserver']))
# generate a valid ib tuple
instrument = tools.create_ib_tuple(instrument)
contract_string = ibConn.contractString(instrument)
contract = ibConn.createContract(instrument)
ibConn.requestHistoricalData(contracts=[contract],
data="TRADES", resolution=resolution,
lookback=tools.ib_duration_str(start),
rth=False)
while not _IB_HISTORY_DOWNLOADED:
time.sleep(1)
ibConn.disconnect()
data = ibConn.historicalData[contract_string]
data['datetime'] = data.index
return prepare_data(instrument, data, output_path=output_path)
# =============================================
# data preparation methods
# =============================================
_BARS_COLSMAP = {
'open': 'open',
'high': 'high',
'low': 'low',
'close': 'close',
'volume': 'volume',
'opt_price': 'opt_price',
'opt_underlying': 'opt_underlying',
'opt_dividend': 'opt_dividend',
'opt_volume': 'opt_volume',
'opt_iv': 'opt_iv',
'opt_oi': 'opt_oi',
'opt_delta': 'opt_delta',
'opt_gamma': 'opt_gamma',
'opt_vega': 'opt_vega',
'opt_theta': 'opt_theta'
}
_TICKS_COLSMAP = {
'bid': 'bid',
'bidsize': 'bidsize',
'ask': 'ask',
'asksize': 'asksize',
'last': 'last',
'lastsize': 'lastsize',
'opt_price': 'opt_price',
'opt_underlying': 'opt_underlying',
'opt_dividend': 'opt_dividend',
'opt_volume': 'opt_volume',
'opt_iv': 'opt_iv',
'opt_oi': 'opt_oi',
'opt_delta': 'opt_delta',
'opt_gamma': 'opt_gamma',
'opt_vega': 'opt_vega',
'opt_theta': 'opt_theta'
}
# ---------------------------------------------
def validate_columns(df, kind="BAR", raise_errors=True):
global _TICKS_COLSMAP, _BARS_COLSMAP
# validate columns
if "asset_class" not in df.columns:
if raise_errors:
raise ValueError('Column asset_class not found')
return False
is_option = "OPT" in list(df['asset_class'].unique())
colsmap = _TICKS_COLSMAP if kind == "TICK" else _BARS_COLSMAP
for el in colsmap:
col = colsmap[el]
if col not in df.columns:
if "opt_" in col and is_option:
if raise_errors:
raise ValueError('Column %s not found' % el)
return False
elif "opt_" not in col and not is_option:
if raise_errors:
raise ValueError('Column %s not found' % el)
return False
return True
# ---------------------------------------------
def prepare_data(instrument, data, output_path=None,
index=None, colsmap=None, kind="BAR", resample="1T"):
"""
Converts given DataFrame to a QTPyLib-compatible format and timezone
:Parameters:
instrument : mixed
IB contract tuple / string (same as that given to strategy)
data : pd.DataFrame
Pandas DataDrame with that instrument's market data
output_path : str
Path to where the resulting CSV should be saved (optional)
index : pd.Series
Pandas Series that will be used for df's index (optioanl)
colsmap : dict
Dict for mapping df's columns to those used by QTPyLib
(default assumes same naming convention as QTPyLib's)
kind : str
Is this ``BAR`` or ``TICK`` data
resample : str
Pandas resolution (defaults to 1min/1T)
:Returns:
data : pd.DataFrame
Pandas DataFrame in a QTPyLib-compatible format and timezone
"""
global _TICKS_COLSMAP, _BARS_COLSMAP
# work on copy
df = data.copy()
# ezibpy's csv?
if set(df.columns) == set([
'datetime', 'C', 'H', 'L', 'O', 'OI', 'V', 'WAP']):
df.rename(columns={
'datetime': 'datetime',
'O': 'open',
'H': 'high',
'L': 'low',
'C': 'close',
'OI': 'volume',
}, inplace=True)
df.index = pd.to_datetime(df['datetime'])
df.index = df.index.tz_localize(tools.get_timezone()).tz_convert("UTC")
index = None
# lower case columns
df.columns = map(str.lower, df.columns)
# set index
if index is None:
index = df.index
# set defaults columns
if not isinstance(colsmap, dict):
colsmap = {}
_colsmap = _TICKS_COLSMAP if kind == "TICK" else _BARS_COLSMAP
for el in _colsmap:
if el not in colsmap:
colsmap[el] = _colsmap[el]
# generate a valid ib tuple
instrument = tools.create_ib_tuple(instrument)
# create contract string (no need for connection)
ibConn = ezIBpy()
contract_string = ibConn.contractString(instrument)
asset_class = tools.gen_asset_class(contract_string)
symbol_group = tools.gen_symbol_group(contract_string)
# add symbol data
df.loc[:, 'symbol'] = contract_string
df.loc[:, 'symbol_group'] = symbol_group
df.loc[:, 'asset_class'] = asset_class
# validate columns
valid_cols = validate_columns(df, kind)
if not valid_cols:
raise ValueError('Invalid Column list')
# rename columns to map
df.rename(columns=colsmap, inplace=True)
# force option columns on options
if asset_class == "OPT":
df = tools.force_options_columns(df)
# remove all other columns
known_cols = list(colsmap.values()) + \
['symbol', 'symbol_group', 'asset_class', 'expiry']
for col in df.columns:
if col not in known_cols:
df.drop(col, axis=1, inplace=True)
# set UTC index
df.index = pd.to_datetime(index)
df = tools.set_timezone(df, "UTC")
df.index.rename("datetime", inplace=True)
# resample
if resample and kind == "BAR":
df = tools.resample(df, resolution=resample, tz="UTC")
# add expiry
df.loc[:, 'expiry'] = np.nan
if asset_class in ("FUT", "OPT", "FOP"):
df.loc[:, 'expiry'] = contract_expiry_from_symbol(contract_string)
# save csv
if output_path is not None:
output_path = output_path[
:-1] if output_path.endswith('/') else output_path
df.to_csv("%s/%s.%s.csv" % (output_path, contract_string, kind))
# return df
return df
# ---------------------------------------------
def store_data(df, blotter=None, kind="BAR"):
"""
Store QTPyLib-compatible csv files in Blotter's MySQL.
TWS/GW data are required for determining futures/options expiration
:Parameters:
df : dict
Tick/Bar data
:Optional:
blotter : str
Store MySQL server used by this Blotter (default is "auto detect")
kind : str
Is this ``BAR`` or ``TICK`` data
"""
# validate columns
valid_cols = validate_columns(df, kind)
if not valid_cols:
raise ValueError('Invalid Column list')
# load blotter settings
blotter_args = load_blotter_args(
blotter, logger=logging.getLogger(__name__))
# blotter not running
if blotter_args is None:
raise Exception("Cannot connect to running Blotter.")
# cannot continue
if blotter_args['dbskip']:
raise Exception("Cannot continue. Blotter running with --dbskip")
# connect to mysql using blotter's settings
dbconn = pymysql.connect(
client_flag=MULTI_STATEMENTS,
host=str(blotter_args['dbhost']),
port=int(blotter_args['dbport']),
user=str(blotter_args['dbuser']),
passwd=str(blotter_args['dbpass']),
db=str(blotter_args['dbname']),
autocommit=True
)
dbcurr = dbconn.cursor()
# loop through symbols and save in db
for symbol in list(df['symbol'].unique()):
data = df[df['symbol'] == symbol]
symbol_id = get_symbol_id(symbol, dbconn, dbcurr)
# prepare columns for insert
data.loc[:, 'timestamp'] = data.index.strftime('%Y-%m-%d %H:%M:%S')
data.loc[:, 'symbol_id'] = symbol_id
# insert row by row to handle greeks
data = data.to_dict(orient="records")
if kind == "BAR":
for _, row in enumerate(data):
mysql_insert_bar(row, symbol_id, dbcurr)
else:
for _, row in enumerate(data):
mysql_insert_tick(row, symbol_id, dbcurr)
try:
dbconn.commit()
except Exception as e:
return False
return True
# =============================================
# data analyze methods
# =============================================
def analyze_portfolio(file):
""" analyze portfolio (TBD) """
pass
|
|
from six import BytesIO
from vcr.filters import (
remove_headers,
replace_headers,
remove_query_parameters,
replace_query_parameters,
remove_post_data_parameters,
replace_post_data_parameters,
decode_response,
)
from vcr.compat import mock
from vcr.request import Request
import gzip
import json
import zlib
def test_replace_headers():
# This tests all of:
# 1. keeping a header
# 2. removing a header
# 3. replacing a header
# 4. replacing a header using a callable
# 5. removing a header using a callable
# 6. replacing a header that doesn't exist
headers = {"one": ["keep"], "two": ["lose"], "three": ["change"], "four": ["shout"], "five": ["whisper"]}
request = Request("GET", "http://google.com", "", headers)
replace_headers(
request,
[
("two", None),
("three", "tada"),
("four", lambda key, value, request: value.upper()),
("five", lambda key, value, request: None),
("six", "doesntexist"),
],
)
assert request.headers == {"one": "keep", "three": "tada", "four": "SHOUT"}
def test_replace_headers_empty():
headers = {"hello": "goodbye", "secret": "header"}
request = Request("GET", "http://google.com", "", headers)
replace_headers(request, [])
assert request.headers == headers
def test_replace_headers_callable():
# This goes beyond test_replace_headers() to ensure that the callable
# receives the expected arguments.
headers = {"hey": "there"}
request = Request("GET", "http://google.com", "", headers)
callme = mock.Mock(return_value="ho")
replace_headers(request, [("hey", callme)])
assert request.headers == {"hey": "ho"}
assert callme.call_args == ((), {"request": request, "key": "hey", "value": "there"})
def test_remove_headers():
# Test the backward-compatible API wrapper.
headers = {"hello": ["goodbye"], "secret": ["header"]}
request = Request("GET", "http://google.com", "", headers)
remove_headers(request, ["secret"])
assert request.headers == {"hello": "goodbye"}
def test_replace_query_parameters():
# This tests all of:
# 1. keeping a parameter
# 2. removing a parameter
# 3. replacing a parameter
# 4. replacing a parameter using a callable
# 5. removing a parameter using a callable
# 6. replacing a parameter that doesn't exist
uri = "http://g.com/?one=keep&two=lose&three=change&four=shout&five=whisper"
request = Request("GET", uri, "", {})
replace_query_parameters(
request,
[
("two", None),
("three", "tada"),
("four", lambda key, value, request: value.upper()),
("five", lambda key, value, request: None),
("six", "doesntexist"),
],
)
assert request.query == [("four", "SHOUT"), ("one", "keep"), ("three", "tada")]
def test_remove_all_query_parameters():
uri = "http://g.com/?q=cowboys&w=1"
request = Request("GET", uri, "", {})
replace_query_parameters(request, [("w", None), ("q", None)])
assert request.uri == "http://g.com/"
def test_replace_query_parameters_callable():
# This goes beyond test_replace_query_parameters() to ensure that the
# callable receives the expected arguments.
uri = "http://g.com/?hey=there"
request = Request("GET", uri, "", {})
callme = mock.Mock(return_value="ho")
replace_query_parameters(request, [("hey", callme)])
assert request.uri == "http://g.com/?hey=ho"
assert callme.call_args == ((), {"request": request, "key": "hey", "value": "there"})
def test_remove_query_parameters():
# Test the backward-compatible API wrapper.
uri = "http://g.com/?q=cowboys&w=1"
request = Request("GET", uri, "", {})
remove_query_parameters(request, ["w"])
assert request.uri == "http://g.com/?q=cowboys"
def test_replace_post_data_parameters():
# This tests all of:
# 1. keeping a parameter
# 2. removing a parameter
# 3. replacing a parameter
# 4. replacing a parameter using a callable
# 5. removing a parameter using a callable
# 6. replacing a parameter that doesn't exist
body = b"one=keep&two=lose&three=change&four=shout&five=whisper"
request = Request("POST", "http://google.com", body, {})
replace_post_data_parameters(
request,
[
("two", None),
("three", "tada"),
("four", lambda key, value, request: value.upper()),
("five", lambda key, value, request: None),
("six", "doesntexist"),
],
)
assert request.body == b"one=keep&three=tada&four=SHOUT"
def test_replace_post_data_parameters_empty_body():
# This test ensures replace_post_data_parameters doesn't throw exception when body is empty.
body = None
request = Request("POST", "http://google.com", body, {})
replace_post_data_parameters(
request,
[
("two", None),
("three", "tada"),
("four", lambda key, value, request: value.upper()),
("five", lambda key, value, request: None),
("six", "doesntexist"),
],
)
assert request.body is None
def test_remove_post_data_parameters():
# Test the backward-compatible API wrapper.
body = b"id=secret&foo=bar"
request = Request("POST", "http://google.com", body, {})
remove_post_data_parameters(request, ["id"])
assert request.body == b"foo=bar"
def test_preserve_multiple_post_data_parameters():
body = b"id=secret&foo=bar&foo=baz"
request = Request("POST", "http://google.com", body, {})
replace_post_data_parameters(request, [("id", None)])
assert request.body == b"foo=bar&foo=baz"
def test_remove_all_post_data_parameters():
body = b"id=secret&foo=bar"
request = Request("POST", "http://google.com", body, {})
replace_post_data_parameters(request, [("id", None), ("foo", None)])
assert request.body == b""
def test_replace_json_post_data_parameters():
# This tests all of:
# 1. keeping a parameter
# 2. removing a parameter
# 3. replacing a parameter
# 4. replacing a parameter using a callable
# 5. removing a parameter using a callable
# 6. replacing a parameter that doesn't exist
body = b'{"one": "keep", "two": "lose", "three": "change", "four": "shout", "five": "whisper"}'
request = Request("POST", "http://google.com", body, {})
request.headers["Content-Type"] = "application/json"
replace_post_data_parameters(
request,
[
("two", None),
("three", "tada"),
("four", lambda key, value, request: value.upper()),
("five", lambda key, value, request: None),
("six", "doesntexist"),
],
)
request_data = json.loads(request.body.decode("utf-8"))
expected_data = json.loads('{"one": "keep", "three": "tada", "four": "SHOUT"}')
assert request_data == expected_data
def test_remove_json_post_data_parameters():
# Test the backward-compatible API wrapper.
body = b'{"id": "secret", "foo": "bar", "baz": "qux"}'
request = Request("POST", "http://google.com", body, {})
request.headers["Content-Type"] = "application/json"
remove_post_data_parameters(request, ["id"])
request_body_json = json.loads(request.body.decode("utf-8"))
expected_json = json.loads(b'{"foo": "bar", "baz": "qux"}'.decode("utf-8"))
assert request_body_json == expected_json
def test_remove_all_json_post_data_parameters():
body = b'{"id": "secret", "foo": "bar"}'
request = Request("POST", "http://google.com", body, {})
request.headers["Content-Type"] = "application/json"
replace_post_data_parameters(request, [("id", None), ("foo", None)])
assert request.body == b"{}"
def test_decode_response_uncompressed():
recorded_response = {
"status": {"message": "OK", "code": 200},
"headers": {
"content-length": ["10806"],
"date": ["Fri, 24 Oct 2014 18:35:37 GMT"],
"content-type": ["text/html; charset=utf-8"],
},
"body": {"string": b""},
}
assert decode_response(recorded_response) == recorded_response
def test_decode_response_deflate():
body = b"deflate message"
deflate_response = {
"body": {"string": zlib.compress(body)},
"headers": {
"access-control-allow-credentials": ["true"],
"access-control-allow-origin": ["*"],
"connection": ["keep-alive"],
"content-encoding": ["deflate"],
"content-length": ["177"],
"content-type": ["application/json"],
"date": ["Wed, 02 Dec 2015 19:44:32 GMT"],
"server": ["nginx"],
},
"status": {"code": 200, "message": "OK"},
}
decoded_response = decode_response(deflate_response)
assert decoded_response["body"]["string"] == body
assert decoded_response["headers"]["content-length"] == [str(len(body))]
def test_decode_response_gzip():
body = b"gzip message"
buf = BytesIO()
f = gzip.GzipFile("a", fileobj=buf, mode="wb")
f.write(body)
f.close()
compressed_body = buf.getvalue()
buf.close()
gzip_response = {
"body": {"string": compressed_body},
"headers": {
"access-control-allow-credentials": ["true"],
"access-control-allow-origin": ["*"],
"connection": ["keep-alive"],
"content-encoding": ["gzip"],
"content-length": ["177"],
"content-type": ["application/json"],
"date": ["Wed, 02 Dec 2015 19:44:32 GMT"],
"server": ["nginx"],
},
"status": {"code": 200, "message": "OK"},
}
decoded_response = decode_response(gzip_response)
assert decoded_response["body"]["string"] == body
assert decoded_response["headers"]["content-length"] == [str(len(body))]
|
|
from datetime import datetime
import pytz
from django.conf import settings
from django.db import connection
from django.db.models import DateField, DateTimeField, IntegerField, TimeField
from django.db.models.functions import (
Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth,
ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate,
TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime,
TruncYear,
)
from django.test import TestCase, override_settings
from django.utils import timezone
from .models import DTModel
def microsecond_support(value):
return value if connection.features.supports_microsecond_precision else value.replace(microsecond=0)
def truncate_to(value, kind, tzinfo=None):
# Convert to target timezone before truncation
if tzinfo is not None:
value = value.astimezone(tzinfo)
def truncate(value, kind):
if kind == 'second':
return value.replace(microsecond=0)
if kind == 'minute':
return value.replace(second=0, microsecond=0)
if kind == 'hour':
return value.replace(minute=0, second=0, microsecond=0)
if kind == 'day':
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0)
return value
if kind == 'month':
if isinstance(value, datetime):
return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(day=1)
# otherwise, truncate to year
if isinstance(value, datetime):
return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=1, day=1)
value = truncate(value, kind)
if tzinfo is not None:
# If there was a daylight saving transition, then reset the timezone.
value = timezone.make_aware(value.replace(tzinfo=None), tzinfo)
return value
@override_settings(USE_TZ=False)
class DateFunctionTests(TestCase):
def create_model(self, start_datetime, end_datetime):
return DTModel.objects.create(
name=start_datetime.isoformat(),
start_datetime=start_datetime, end_datetime=end_datetime,
start_date=start_datetime.date(), end_date=end_datetime.date(),
start_time=start_datetime.time(), end_time=end_datetime.time(),
duration=(end_datetime - start_datetime),
)
def test_extract_year_exact_lookup(self):
"""
Extract year uses a BETWEEN filter to compare the year to allow indexes
to be used.
"""
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
qs = DTModel.objects.filter(start_datetime__year__exact=2015)
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
# exact is implied and should be the same
qs = DTModel.objects.filter(start_datetime__year=2015)
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
# date and datetime fields should behave the same
qs = DTModel.objects.filter(start_date__year=2015)
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
def test_extract_year_greaterthan_lookup(self):
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
qs = DTModel.objects.filter(start_datetime__year__gt=2015)
self.assertEqual(qs.count(), 1)
self.assertEqual(str(qs.query).lower().count('extract'), 0)
qs = DTModel.objects.filter(start_datetime__year__gte=2015)
self.assertEqual(qs.count(), 2)
self.assertEqual(str(qs.query).lower().count('extract'), 0)
def test_extract_year_lessthan_lookup(self):
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
qs = DTModel.objects.filter(start_datetime__year__lt=2016)
self.assertEqual(qs.count(), 1)
self.assertEqual(str(qs.query).count('extract'), 0)
qs = DTModel.objects.filter(start_datetime__year__lte=2016)
self.assertEqual(qs.count(), 2)
self.assertEqual(str(qs.query).count('extract'), 0)
def test_extract_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
with self.assertRaisesMessage(ValueError, 'lookup_name must be provided'):
Extract('start_datetime')
msg = 'Extract input expression must be DateField, DateTimeField, or TimeField.'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(extracted=Extract('name', 'hour')))
with self.assertRaisesMessage(
ValueError, "Cannot extract time component 'second' from DateField 'start_date'."):
list(DTModel.objects.annotate(extracted=Extract('start_date', 'second')))
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'year')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'month')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'day')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'week')).order_by('start_datetime'),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'week_day')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1)
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'hour')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'minute')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'second')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__year=Extract('start_datetime', 'year')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_datetime__hour=Extract('start_datetime', 'hour')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_date__month=Extract('start_date', 'month')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_time__hour=Extract('start_time', 'hour')).count(), 2)
def test_extract_year_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractYear('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractYear('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__year=ExtractYear('start_datetime')).count(), 2)
def test_extract_month_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMonth('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMonth('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__month=ExtractMonth('start_datetime')).count(), 2)
def test_extract_day_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractDay('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractDay('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__day=ExtractDay('start_datetime')).count(), 2)
def test_extract_week_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeek('start_datetime')).order_by('start_datetime'),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeek('start_date')).order_by('start_datetime'),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted)
)
# both dates are from the same week.
self.assertEqual(DTModel.objects.filter(start_datetime__week=ExtractWeek('start_datetime')).count(), 2)
def test_extract_week_func_boundaries(self):
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
week_52_day_2014 = microsecond_support(datetime(2014, 12, 27, 13, 0)) # Sunday
week_1_day_2014_2015 = microsecond_support(datetime(2014, 12, 31, 13, 0)) # Wednesday
week_53_day_2015 = microsecond_support(datetime(2015, 12, 31, 13, 0)) # Thursday
if settings.USE_TZ:
week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015, is_dst=False)
week_52_day_2014 = timezone.make_aware(week_52_day_2014, is_dst=False)
week_53_day_2015 = timezone.make_aware(week_53_day_2015, is_dst=False)
days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015]
self.create_model(week_53_day_2015, end_datetime)
self.create_model(week_52_day_2014, end_datetime)
self.create_model(week_1_day_2014_2015, end_datetime)
qs = DTModel.objects.filter(start_datetime__in=days).annotate(
extracted=ExtractWeek('start_datetime'),
).order_by('start_datetime')
self.assertQuerysetEqual(qs, [
(week_52_day_2014, 52),
(week_1_day_2014_2015, 1),
(week_53_day_2015, 53),
], lambda m: (m.start_datetime, m.extracted))
def test_extract_weekday_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeekDay('start_datetime')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeekDay('start_date')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__week_day=ExtractWeekDay('start_datetime')).count(), 2)
def test_extract_hour_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractHour('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractHour('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__hour=ExtractHour('start_datetime')).count(), 2)
def test_extract_minute_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMinute('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMinute('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__minute=ExtractMinute('start_datetime')).count(), 2)
def test_extract_second_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractSecond('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractSecond('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__second=ExtractSecond('start_datetime')).count(), 2)
def test_trunc_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
msg = 'output_field must be either DateField, TimeField, or DateTimeField'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField())))
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField())))
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, kind)),
(end_datetime, truncate_to(end_datetime, kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), kind)),
(end_datetime, truncate_to(end_datetime.date(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), kind)),
(end_datetime, truncate_to(end_datetime.time(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
def test_trunc_year_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'year')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncYear('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'year')),
(end_datetime, truncate_to(end_datetime, 'year')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncYear('start_date')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), 'year')),
(end_datetime, truncate_to(end_datetime.date(), 'year')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncYear('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncYear('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncYear('start_time', output_field=TimeField())))
def test_trunc_month_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'month')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMonth('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'month')),
(end_datetime, truncate_to(end_datetime, 'month')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMonth('start_date')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), 'month')),
(end_datetime, truncate_to(end_datetime.date(), 'month')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncMonth('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMonth('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMonth('start_time', output_field=TimeField())))
def test_trunc_date_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncDate('start_datetime')).order_by('start_datetime'),
[
(start_datetime, start_datetime.date()),
(end_datetime, end_datetime.date()),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__date=TruncDate('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"):
list(DTModel.objects.annotate(truncated=TruncDate('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"):
list(DTModel.objects.annotate(truncated=TruncDate('start_time', output_field=TimeField())))
def test_trunc_time_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'),
[
(start_datetime, start_datetime.time()),
(end_datetime, end_datetime.time()),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField())))
def test_trunc_day_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'day')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncDay('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'day')),
(end_datetime, truncate_to(end_datetime, 'day')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncDay('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncDay('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncDay('start_time', output_field=TimeField())))
def test_trunc_hour_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'hour')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncHour('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'hour')),
(end_datetime, truncate_to(end_datetime, 'hour')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncHour('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'hour')),
(end_datetime, truncate_to(end_datetime.time(), 'hour')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncHour('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncHour('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncHour('start_date', output_field=DateField())))
def test_trunc_minute_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'minute')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMinute('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'minute')),
(end_datetime, truncate_to(end_datetime, 'minute')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMinute('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'minute')),
(end_datetime, truncate_to(end_datetime.time(), 'minute')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncMinute('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMinute('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMinute('start_date', output_field=DateField())))
def test_trunc_second_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'second')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncSecond('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'second')),
(end_datetime, truncate_to(end_datetime, 'second'))
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncSecond('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'second')),
(end_datetime, truncate_to(end_datetime.time(), 'second'))
],
lambda m: (m.start_datetime, m.extracted)
)
result = 1 if connection.features.supports_microsecond_precision else 2
self.assertEqual(DTModel.objects.filter(start_datetime=TruncSecond('start_datetime')).count(), result)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncSecond('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncSecond('start_date', output_field=DateField())))
@override_settings(USE_TZ=True, TIME_ZONE='UTC')
class DateFunctionWithTimeZoneTests(DateFunctionTests):
def test_extract_func_with_timezone(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 23, 30, 1, 321))
end_datetime = microsecond_support(datetime(2015, 6, 16, 13, 11, 27, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
qs = DTModel.objects.annotate(
day=Extract('start_datetime', 'day'),
day_melb=Extract('start_datetime', 'day', tzinfo=melb),
week=Extract('start_datetime', 'week', tzinfo=melb),
weekday=ExtractWeekDay('start_datetime'),
weekday_melb=ExtractWeekDay('start_datetime', tzinfo=melb),
hour=ExtractHour('start_datetime'),
hour_melb=ExtractHour('start_datetime', tzinfo=melb),
).order_by('start_datetime')
utc_model = qs.get()
self.assertEqual(utc_model.day, 15)
self.assertEqual(utc_model.day_melb, 16)
self.assertEqual(utc_model.week, 25)
self.assertEqual(utc_model.weekday, 2)
self.assertEqual(utc_model.weekday_melb, 3)
self.assertEqual(utc_model.hour, 23)
self.assertEqual(utc_model.hour_melb, 9)
with timezone.override(melb):
melb_model = qs.get()
self.assertEqual(melb_model.day, 16)
self.assertEqual(melb_model.day_melb, 16)
self.assertEqual(melb_model.week, 25)
self.assertEqual(melb_model.weekday, 3)
self.assertEqual(melb_model.weekday_melb, 3)
self.assertEqual(melb_model.hour, 9)
self.assertEqual(melb_model.hour_melb, 9)
def test_extract_func_explicit_timezone_priority(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 23, 30, 1, 321))
end_datetime = microsecond_support(datetime(2015, 6, 16, 13, 11, 27, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
with timezone.override(melb):
model = DTModel.objects.annotate(
day_melb=Extract('start_datetime', 'day'),
day_utc=Extract('start_datetime', 'day', tzinfo=timezone.utc),
).order_by('start_datetime').get()
self.assertEqual(model.day_melb, 16)
self.assertEqual(model.day_utc, 15)
def test_trunc_timezone_applied_before_truncation(self):
start_datetime = microsecond_support(datetime(2016, 1, 1, 1, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
pacific = pytz.timezone('US/Pacific')
model = DTModel.objects.annotate(
melb_year=TruncYear('start_datetime', tzinfo=melb),
pacific_year=TruncYear('start_datetime', tzinfo=pacific),
).order_by('start_datetime').get()
self.assertEqual(model.start_datetime, start_datetime)
self.assertEqual(model.melb_year, truncate_to(start_datetime, 'year', melb))
self.assertEqual(model.pacific_year, truncate_to(start_datetime, 'year', pacific))
self.assertEqual(model.start_datetime.year, 2016)
self.assertEqual(model.melb_year.year, 2016)
self.assertEqual(model.pacific_year.year, 2015)
def test_trunc_func_with_timezone(self):
"""
If the truncated datetime transitions to a different offset (daylight
saving) then the returned value will have that new timezone/offset.
"""
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
melb = pytz.timezone('Australia/Melbourne')
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.astimezone(melb), kind, melb)),
(end_datetime, truncate_to(end_datetime.astimezone(melb), kind, melb))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), kind)),
(end_datetime, truncate_to(end_datetime.date(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), kind)),
(end_datetime, truncate_to(end_datetime.time(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
|
|
# -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.forms.models import fields_for_model
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from rollyourown.seo.utils import get_seo_content_types
from rollyourown.seo.systemviews import get_seo_views
# TODO Use groups as fieldsets
# Varients without sites support
class PathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path',)
class ModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id')
class ModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type',)
class ViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', )
# Varients with sites support
class SitePathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_site')
list_filter = ('_site',)
class SiteModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id', '_site')
list_filter = ('_site', '_content_type')
class SiteModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type', '_site')
list_filter = ('_site',)
class SiteViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', '_site')
list_filter = ('_site',)
def register_seo_admin(admin_site, metadata_class):
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
class ModelInstanceAdmin(model_instance_admin):
pass
_register_admin(admin_site, metadata_class._meta.get_model('path'), PathAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('modelinstance'), ModelInstanceAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('model'), ModelAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('view'), ViewAdmin)
def _register_admin(admin_site, model, admin_class):
""" Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model.
"""
try:
admin_site.register(model, admin_class)
except admin.sites.AlreadyRegistered:
pass
class MetadataFormset(generic.BaseGenericInlineFormSet):
def _construct_form(self, i, **kwargs):
""" Override the method to change the form attribute empty_permitted """
form = super(MetadataFormset, self)._construct_form(i, **kwargs)
# Monkey patch the form to always force a save.
# It's unfortunate, but necessary because we always want an instance
# Affect on performance shouldn't be too great, because ther is only
# ever one metadata attached
form.empty_permitted = False
form.has_changed = lambda: True
# Set a marker on this object to prevent automatic metadata creation
# This is seen by the post_save handler, which then skips this instance.
if self.instance:
self.instance.__seo_metadata_handled = True
return form
def get_inline(metadata_class):
attrs = {
'max_num': 1,
'extra': 1,
'model': metadata_class._meta.get_model('modelinstance'),
'ct_field': "_content_type",
'ct_fk_field': "_object_id",
'formset': MetadataFormset,
}
return type('MetadataInline', (generic.GenericStackedInline,), attrs)
def get_model_form(metadata_class):
model_class = metadata_class._meta.get_model('model')
# Restrict content type choices to the models set in seo_models
content_types = get_seo_content_types(metadata_class._meta.seo_models)
content_type_choices = [(x._get_pk_val(), smart_unicode(x)) for x in ContentType.objects.filter(id__in=content_types)]
# Get a list of fields, with _content_type at the start
important_fields = ['_content_type'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_content_type = forms.ChoiceField(label=capfirst(_("model")), choices=content_type_choices)
class Meta:
model = model_class
fields = _fields
def clean__content_type(self):
value = self.cleaned_data['_content_type']
try:
return ContentType.objects.get(pk=int(value))
except (ContentType.DoesNotExist, ValueError):
raise forms.ValidationError("Invalid ContentType")
return ModelMetadataForm
def get_path_form(metadata_class):
model_class = metadata_class._meta.get_model('path')
# Get a list of fields, with _view at the start
important_fields = ['_path'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def get_view_form(metadata_class):
model_class = metadata_class._meta.get_model('view')
# Restrict content type choices to the models set in seo_models
view_choices = [(key, " ".join(key.split("_"))) for key in get_seo_views(metadata_class)]
view_choices.insert(0, ("", "---------"))
# Get a list of fields, with _view at the start
important_fields = ['_view'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_view = forms.ChoiceField(label=capfirst(_("view")), choices=view_choices, required=False)
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def core_choice_fields(metadata_class):
""" If the 'optional' core fields (_site and _language) are required,
list them here.
"""
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):
""" Monkey patch the inline onto the given admin_class instance. """
if model in metadata_class._meta.seo_models:
# *Not* adding to the class attribute "inlines", as this will affect
# all instances from this class. Explicitly adding to instance attribute.
admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]
# Because we've missed the registration, we need to perform actions
# that were done then (on admin class instantiation)
inline_instance = inline_class(admin_class_instance.model, admin_site)
admin_class_instance.inline_instances.append(inline_instance)
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register
def auto_register_inlines(admin_site, metadata_class):
""" This is a questionable function that automatically adds our metadata
inline to all relevant models in the site.
"""
inline_class = get_inline(metadata_class)
for model, admin_class_instance in admin_site._registry.items():
_monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)
# Monkey patch the register method to automatically add an inline for this site.
# _with_inline() is a decorator that wraps the register function with the same injection code
# used above (_monkey_inline).
admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)
|
|
"""Support for WeMo switches."""
import asyncio
from datetime import datetime, timedelta
import logging
import async_timeout
from pywemo import discovery
import requests
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import STATE_OFF, STATE_ON, STATE_STANDBY, STATE_UNKNOWN
from homeassistant.exceptions import PlatformNotReady
from homeassistant.util import convert
from . import SUBSCRIPTION_REGISTRY
from .const import DOMAIN
SCAN_INTERVAL = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
ATTR_SENSOR_STATE = "sensor_state"
ATTR_SWITCH_MODE = "switch_mode"
ATTR_CURRENT_STATE_DETAIL = "state_detail"
ATTR_COFFEMAKER_MODE = "coffeemaker_mode"
MAKER_SWITCH_MOMENTARY = "momentary"
MAKER_SWITCH_TOGGLE = "toggle"
WEMO_ON = 1
WEMO_OFF = 0
WEMO_STANDBY = 8
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up discovered WeMo switches."""
if discovery_info is not None:
location = discovery_info["ssdp_description"]
mac = discovery_info["mac_address"]
try:
device = discovery.device_from_description(location, mac)
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
) as err:
_LOGGER.error("Unable to access %s (%s)", location, err)
raise PlatformNotReady
if device:
add_entities([WemoSwitch(device)])
class WemoSwitch(SwitchDevice):
"""Representation of a WeMo switch."""
def __init__(self, device):
"""Initialize the WeMo switch."""
self.wemo = device
self.insight_params = None
self.maker_params = None
self.coffeemaker_mode = None
self._state = None
self._mode_string = None
self._available = True
self._update_lock = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.info("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
@property
def unique_id(self):
"""Return the ID of this WeMo switch."""
return self._serialnumber
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {"name": self._name, "identifiers": {(DOMAIN, self._serialnumber)}}
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if self.maker_params:
# Is the maker sensor on or off.
if self.maker_params["hassensor"]:
# Note a state of 1 matches the WeMo app 'not triggered'!
if self.maker_params["sensorstate"]:
attr[ATTR_SENSOR_STATE] = STATE_OFF
else:
attr[ATTR_SENSOR_STATE] = STATE_ON
# Is the maker switch configured as toggle(0) or momentary (1).
if self.maker_params["switchmode"]:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_MOMENTARY
else:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_TOGGLE
if self.insight_params or (self.coffeemaker_mode is not None):
attr[ATTR_CURRENT_STATE_DETAIL] = self.detail_state
if self.insight_params:
attr["on_latest_time"] = WemoSwitch.as_uptime(self.insight_params["onfor"])
attr["on_today_time"] = WemoSwitch.as_uptime(self.insight_params["ontoday"])
attr["on_total_time"] = WemoSwitch.as_uptime(self.insight_params["ontotal"])
attr["power_threshold_w"] = (
convert(self.insight_params["powerthreshold"], float, 0.0) / 1000.0
)
if self.coffeemaker_mode is not None:
attr[ATTR_COFFEMAKER_MODE] = self.coffeemaker_mode
return attr
@staticmethod
def as_uptime(_seconds):
"""Format seconds into uptime string in the format: 00d 00h 00m 00s."""
uptime = datetime(1, 1, 1) + timedelta(seconds=_seconds)
return "{:0>2d}d {:0>2d}h {:0>2d}m {:0>2d}s".format(
uptime.day - 1, uptime.hour, uptime.minute, uptime.second
)
@property
def current_power_w(self):
"""Return the current power usage in W."""
if self.insight_params:
return convert(self.insight_params["currentpower"], float, 0.0) / 1000.0
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
if self.insight_params:
miliwatts = convert(self.insight_params["todaymw"], float, 0.0)
return round(miliwatts / (1000.0 * 1000.0 * 60), 2)
@property
def detail_state(self):
"""Return the state of the device."""
if self.coffeemaker_mode is not None:
return self._mode_string
if self.insight_params:
standby_state = int(self.insight_params["state"])
if standby_state == WEMO_ON:
return STATE_ON
if standby_state == WEMO_OFF:
return STATE_OFF
if standby_state == WEMO_STANDBY:
return STATE_STANDBY
return STATE_UNKNOWN
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self._state
@property
def available(self):
"""Return true if switch is available."""
return self._available
@property
def icon(self):
"""Return the icon of device based on its type."""
if self._model_name == "CoffeeMaker":
return "mdi:coffee"
return None
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.wemo.on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.wemo.off()
async def async_added_to_hass(self):
"""Wemo switch added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = SUBSCRIPTION_REGISTRY
await self.hass.async_add_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo switch is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning("Lost connection to %s", self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_job(self._update, force_update)
def _update(self, force_update):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
if self._model_name == "Insight":
self.insight_params = self.wemo.insight_params
self.insight_params["standby_state"] = self.wemo.get_standby_state
elif self._model_name == "Maker":
self.maker_params = self.wemo.maker_params
elif self._model_name == "CoffeeMaker":
self.coffeemaker_mode = self.wemo.mode
self._mode_string = self.wemo.mode_string
if not self._available:
_LOGGER.info("Reconnected to %s", self.name)
self._available = True
except AttributeError as err:
_LOGGER.warning("Could not update status for %s (%s)", self.name, err)
self._available = False
|
|
from multiprocessing import Process
from threading import Timer
import json
import os
import time
import argparse
from adb import ADBException, ADBConnection
from utils import get_monitoring_methods, extract_method_classes
from jdwp import JDWPConnection, JDWPHelper
def monitor_func(device_id, apk_path_list, droidbot_out_dir,
trace_comparator_out_dir, output_dir,
timeout, tracing_interval, interval, is_emulator):
"""
test apks on the assigned vm/device
"""
for apk_path in apk_path_list:
apk_label = apk_path.split("/")[-1][:-len(".apk")]
full_output_dir = "%s/%s/" % (output_dir, device_id)
if os.system("mkdir -p %s" % full_output_dir):
print "failed mkdir -p %s" % full_output_dir
continue
app_droidbot_out_dir = "%s/%s" % (droidbot_out_dir, apk_label)
# get package name by dumpsys_package file name
package_name = [x for x in os.walk(app_droidbot_out_dir).next()[2]
if x.startswith("dumpsys")][0][len("dumpsys_package_"):-len(".txt")]
# get event sequences by events/event_*.json
droidbot_events = []
event_paths = sorted([os.path.join(app_droidbot_out_dir, "events", x) for x in
os.walk(os.path.join(app_droidbot_out_dir, "events")).next()[2]
if x.endswith(".json")])
# skip the first HOME KEY event
for event_path in event_paths[1:]:
with open(event_path, "r") as event_file:
droidbot_events.append(json.load(event_file)["event"])
# get monitoring method list from trace_comparator_output_dir
# tid: methods
monitoring_methods_list = []
comparator_result_paths = ["%s/%s" % (trace_comparator_out_dir, x)
for x in os.walk(trace_comparator_out_dir).next()[2]
if x.startswith(apk_label)]
comparator_result_labels = sorted([x.split("/")[-1][:-len(".json")] for x in comparator_result_paths])
for comparator_result_path in comparator_result_paths:
with open(comparator_result_path, "r") as comparator_result_file:
comparator_result = json.load(comparator_result_file)
unmatched_idx = "emulator" if is_emulator else "real_device"
matched_idx = "emu_api" if is_emulator else "real_api"
monitoring_methods = set()
for thread_info in comparator_result["unmatched_threads"][unmatched_idx]:
unmatched_methods = get_monitoring_methods(thread_info["api"])
monitoring_methods = monitoring_methods.union(unmatched_methods)
for thread_info in comparator_result["matched_threads"]:
if thread_info[matched_idx] is not None:
matched_methods = get_monitoring_methods(thread_info[matched_idx])
monitoring_methods = monitoring_methods.union(matched_methods)
monitoring_methods_list.append(monitoring_methods)
# intialize ADB
adb = ADBConnection(device_id)
# install the app, set debug-app and start app
adb.run_cmd("install -r -g %s" % apk_path)
adb.shell("am set-debug-app -w %s" % package_name)
adb.shell("am start %s" % droidbot_events[0]["intent"].split()[-1])
# adb forward
app_pid = adb.get_app_pid(package_name)
print "%s pid=%d" % (package_name, app_pid)
port = 7335 if is_emulator else 7336
adb.forward(app_pid, port)
# jdwp init & attach
jdwp = JDWPConnection("localhost", port)
jdwp.start()
jdwp_helper = JDWPHelper(jdwp)
trace_result = []
for event_idx, monitoring_methods in enumerate(monitoring_methods_list):
print droidbot_events[event_idx]
# suspend vm for configuration
jdwp_helper.VirtualMachine_Suspend()
# prepare classes to listen, and listen to them
class_list = extract_method_classes(monitoring_methods)
jdwp_helper.update_class_method_info_by_class_names(class_list)
event_ids = []
for class_pattern in class_list:
ent = jdwp_helper.EventRequest_Set_METHOD_ENTRY(class_pattern)
ext = jdwp_helper.EventRequest_Set_METHOD_EXIT_WITH_RETURN_VALUE(class_pattern)
pre = jdwp_helper.EventRequest_Set_CLASS_PREPARE(class_pattern)
event_ids.append(ent)
event_ids.append(ext)
event_ids.append(pre)
# start sampling
jdwp.plug()
jdwp_helper.VirtualMachine_Resume()
# fire event after the first event
if event_idx > 0:
droidbot_event = droidbot_events[event_idx]
if droidbot_event["event_type"] == "touch":
adb.shell("input tap %d %d" % (droidbot_event["x"], droidbot_event["y"]))
elif droidbot_event["event_type"] == "intent":
adb.shell(droidbot_event["intent"])
elif droidbot_event["event_type"] == "key":
adb.shell("input keyevent %s" % droidbot_event["name"])
time.sleep(tracing_interval)
# stop sampling
jdwp.unplug()
time.sleep(interval)
# clear plugs
for event_id in event_ids:
jdwp_helper.EventRequest_Clear(event_id[0], event_id[1])
parsed_result = jdwp_helper.parse_cmd_packets(jdwp.get_cmd_packets())
#print monitoring_methods
#print parsed_result[0]
trace_result.append([x for x in parsed_result if x["classMethodName"] in monitoring_methods])
with open("%s/%s.json" % (full_output_dir, comparator_result_labels[event_idx]), "w") as trace_result_file:
json.dump({
"package_name": package_name,
"monitor_result": trace_result[event_idx]
}, trace_result_file, indent=2)
# jdwp un-attach
jdwp.stop()
# uninstall the app
adb.run_cmd(["uninstall", package_name])
def run(config_json_path):
"""
parse config file
assign work to multiple phases
"""
config_json = json.load(open(os.path.abspath(config_json_path), "r"))
real_device_droidbot_out_dir = os.path.abspath(config_json["real_device_droidbot_out_dir"])
emulator_droidbot_out_dir = os.path.abspath(config_json["emulator_droidbot_out_dir"])
trace_comparator_out_dir = os.path.abspath(config_json["trace_comparator_out_dir"])
output_dir = os.path.abspath(config_json["output_dir"])
real_device_id = config_json["real_device_id"]
emulator_id = config_json["emulator_id"]
apk_dir = os.path.abspath(config_json["apk_dir"])
apk_path_list = ["%s/%s" % (apk_dir, x) for x in
[x for x in os.walk(apk_dir).next()[2] if x.endswith("apk")]]
timeout = config_json["timeout"]
tracing_interval = config_json["tracing_interval"]
interval = config_json["interval"]
# start monitors
real_device_monitor = Process(target=monitor_func, args=(
real_device_id, apk_path_list, real_device_droidbot_out_dir,
trace_comparator_out_dir, output_dir,
timeout, tracing_interval, interval, False))
emulator_monitor = Process(target=monitor_func, args=(
emulator_id, apk_path_list, emulator_droidbot_out_dir,
trace_comparator_out_dir, output_dir,
timeout, tracing_interval, interval, True))
real_device_monitor.start()
emulator_monitor.start()
real_device_monitor.join()
emulator_monitor.join()
def parse_args():
"""
parse command line input
"""
parser = argparse.ArgumentParser(description="Monitor trace details including return values")
parser.add_argument("-c", action="store", dest="config_json_path",
required=True, help="path/to/trace_monitor_config.json")
options = parser.parse_args()
return options
def main():
"""
the main function
"""
opts = parse_args()
run(opts.config_json_path)
return
if __name__ == "__main__":
main()
|
|
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class ChunkedReader(object):
def __init__(self, req, unreader):
self.req = req
self.parser = self.parse_chunked(unreader)
self.buf = StringIO()
def read(self, size=None):
if size == 0:
return ""
if size < 0:
size = None
if not self.parser:
return self.buf.getvalue()
while size is None or self.buf.tell() < size:
try:
self.buf.write(self.parser.next())
except StopIteration:
self.parser = None
break
if size is None or self.buf.tell() < size:
ret = self.buf.getvalue()
self.buf.truncate(0)
return ret
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf.truncate(0)
self.buf.write(rest)
return ret
def parse_trailers(self, unreader, data):
buf = StringIO()
buf.write(data)
idx = buf.getvalue().find("\r\n\r\n")
done = buf.getvalue()[:2] == "\r\n"
while idx < 0 and not done:
self.get_data(unreader, buf)
idx = buf.getvalue().find("\r\n\r\n")
done = buf.getvalue()[:2] == "\r\n"
if done:
unreader.unread(buf.getvalue()[2:])
return ""
self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx])
unreader.unread(buf.getvalue()[idx+4:])
def parse_chunked(self, unreader):
(size, rest) = self.parse_chunk_size(unreader)
while size > 0:
while size > len(rest):
size -= len(rest)
yield rest
rest = unreader.read()
if not rest:
raise NoMoreData()
yield rest[:size]
# Remove \r\n after chunk
rest = rest[size:]
while len(rest) < 2:
rest += unreader.read()
if rest[:2] != '\r\n':
raise ChunkMissingTerminator(rest[:2])
(size, rest) = self.parse_chunk_size(unreader, data=rest[2:])
def parse_chunk_size(self, unreader, data=None):
buf = StringIO()
if data is not None:
buf.write(data)
idx = buf.getvalue().find("\r\n")
while idx < 0:
self.get_data(unreader, buf)
idx = buf.getvalue().find("\r\n")
data = buf.getvalue()
line, rest_chunk = data[:idx], data[idx+2:]
chunk_size = line.split(";", 1)[0].strip()
try:
chunk_size = int(chunk_size, 16)
except ValueError:
raise InvalidChunkSize(chunk_size)
if chunk_size == 0:
self.parse_trailers(unreader, rest_chunk)
return (0, None)
return (chunk_size, rest_chunk)
def get_data(self, unreader, buf):
data = unreader.read()
if not data:
raise NoMoreData()
buf.write(data)
class LengthReader(object):
def __init__(self, unreader, length):
self.unreader = unreader
self.length = length
def read(self, size=None):
if size is not None and not isinstance(size, (int, long)):
raise TypeError("size must be an integral type")
if size == 0 or self.length <= 0:
return ""
if size < 0 or size is None:
size = self.length
buf = StringIO()
data = self.unreader.read()
while data:
buf.write(data)
if buf.tell() >= size:
break
data = self.unreader.read()
buf = buf.getvalue()
ret, rest = buf[:size], buf[size:]
self.unreader.unread(rest)
self.length -= size
return ret
class EOFReader(object):
def __init__(self, unreader):
self.unreader = unreader
self.buf = StringIO()
self.finished = False
def read(self, size=None):
if size == 0 or self.finished:
return ""
if size < 0:
size = None
data = self.unreader.read()
while data:
buf.write(data)
if size is not None and buf.tell() > size:
data = buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf.truncate(0)
self.buf.write(rest)
return ret
data = self.unreader.read()
self.finished = True
ret = self.buf.getvalue()
self.buf.truncate(0)
return ret
class Body(object):
def __init__(self, reader):
self.reader = StringIO(reader)
self.buf = StringIO()
def __iter__(self):
return self
def next(self):
ret = self.readline()
if not ret:
raise StopIteration()
return ret
def read(self, size=None):
if size == 0:
return ''
if size is not None and not isinstance(size, (int, long)):
raise TypeError("size must be an integral type")
if size < 0:
ret = self.reader.read()
return ret
if size is not None and size < self.buf.tell():
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf.truncate(0)
self.buf.write(rest)
return ret
if size > 0:
size -= self.buf.tell()
else:
size = None
if size == None:
ret = self.buf.getvalue() + self.reader.read()
else:
ret = self.buf.getvalue() + self.reader.read(size)
self.buf.truncate(0)
return ret
def readline(self, size=None):
if size == 0:
return ""
if size < 0:
size = None
idx = -1
while idx < 0:
data = self.reader.read(1024)
if not len(data):
break
self.buf.write(data)
if size is not None and self.buf.tell() > size:
break
idx = self.buf.getvalue().find("\r\n")
if idx < 0 and size is not None:
idx = size
elif idx < 0:
idx = self.buf.tell()
data = self.buf.getvalue()
ret, rest = data[:idx], data[idx:]
self.buf.truncate(0)
self.buf.write(rest)
return ret
def readlines(self, size=None):
ret = []
data = self.read()
while len(data):
pos = data.find("\n")
if pos < 0:
ret.append(data)
data = ""
else:
line, data = data[:pos+1], data[pos+1:]
ret.append(line)
return ret
|
|
import sys
import os
import argparse
import json
import pickle
import functools
from itertools import chain
from . import delphin
from . import config
from . import gram
"""typediff.py
Author: Ned Letcher
https://github.com/ned2/typediff
Typediff is a tool to allow you to quickly explore the types used in
the processing of input by DELPH-IN grammars.
"""
HELP = """Usage:
$ typediff [options] GRAMMAR_NAME pos_sent1 pos_sent2 ... @ neg_sent1 neg_sent2 ...
Options:
The remainder of the options are only relevant to the command line mode:
-d
Operate in difference mode (default).
-i
Operate in intersection mode.
-u
Operate in union mode.
-n count
The number of trees ACE is limited to returning.
--profiles
--frags
Include fragment readings (only supported by ERG currently).
--all
Take types from all of the parses returned by ACE instead of just the best.
--supers
Include the super types in the output.
--raw
Don't sort and colorize the list of types.
"""
# TODO
# move help text into argparse
# update various config files to reflect LOGONROOT variable
def argparser():
argparser = argparse.ArgumentParser()
argparser.add_argument("grammar", metavar="GRAMMAR NAME")
argparser.add_argument("--count", default=10)
argparser.add_argument("--all", action='store_true')
argparser.add_argument("--tagger")
argparser.add_argument("--fragments", action='store_true')
argparser.add_argument("--supers", action='store_true')
argparser.add_argument("--profiles", action='store_true')
argparser.add_argument("--raw", action='store_true')
group = argparser.add_mutually_exclusive_group(required=False)
group.add_argument("-i", action='store_true')
group.add_argument("-d", action='store_true')
group.add_argument("-u", action='store_true')
argparser.add_argument("sentences", nargs=argparse.REMAINDER)
return argparser
class ColorText(object):
WHITE = '\033[97m'
CYAN = '\033[96m'
PURPLE = '\033[95m'
BLUE = '\033[94m'
YELLOW = '\033[93m'
GREEN = '\033[92m'
RED = '\033[91m'
BLACK = '\033[90m'
END = '\033[0m'
def __init__(self, text, color):
self.text = text
self.color = getattr(self, color.upper())
def __str__(self):
return ''.join((self.color, self.text, self.END))
def pretty_print_types(types, hierarchy):
"""
Print the type list to the terminal, sorting and colorizing as
specified by the TYPES variable.
"""
def descendants(s):
if s == 'other':
return []
else:
return set(t.name for t in hierarchy[s].descendants())
kinds = [(descendants(t), col) for t, _rgba, col in config.TYPES]
def keyfunc(t):
for i, x in enumerate(kinds):
if t.lstrip('^') in kinds[i][0]:
return i
return 1000
types.sort(key=keyfunc)
output = []
for t in types:
for ds, col in kinds:
if t.lstrip('^') in ds:
output.append(str(ColorText(t, col)))
break
else:
output.append(t)
return '\n'.join(output)
def compare_types(pos_types, neg_types, arg):
if arg.d:
types = pos_types - neg_types
elif arg.i:
# currently only works when there are pos and neg items
types = set.intersection(pos_types, neg_types)
else:
types = pos_types | neg_types
return types
@functools.lru_cache(maxsize=32)
def get_hierarchy(grammar):
return delphin.load_hierarchy(grammar.types_path)
@functools.lru_cache(maxsize=32)
def load_descendants(grammar):
hierarchy = get_hierarchy(grammar)
desc_func = lambda x: set(t.name for t in hierarchy[x].descendants())
kinds = [name for name, _rgba, _col in config.TYPES if name != 'other']
descendants = {}
for kind in kinds:
for t in desc_func(kind):
descendants[t] = kind
return descendants
def type_data():
return {t:{'rank':i+1, 'col':rgba}
for i, (t, rgba, _col) in enumerate(config.TYPES)}
def typediff_web(pos_items, neg_items, opts):
data = {
'pos-items' : pos_items,
'neg-items' : neg_items,
'descendants' : load_descendants(opts.grammar) if opts.desc else False,
'typeData': type_data(),
'grammar': opts.grammar.alias,
'treebank': opts.treebank,
}
if opts.supers:
hierarchy = get_hierarchy(opts.grammar)
for item in chain(pos_items, neg_items):
item.load_supers(hierarchy)
return data
def typediff(pos_items, neg_items, opts):
"""pos_items and neg_items are lists of either Fragment or Reading objects"""
# currently assuming that the Reading objects are only coming from gold
# profiles, therefore only one per item. otherwise we'd need to be using s
# list of Reading objects or probably could be defining an ProfileItem
# class that emulates the relevant interface to Fragment
tfunc = lambda x:x.types.keys() if opts.all else x.best.types.keys()
pos_types = set(chain.from_iterable(tfunc(x) for x in pos_items))
neg_types = set(chain.from_iterable(tfunc(x) for x in neg_items))
if len(pos_types) + len(neg_types) > 1:
typelist = list(compare_types(pos_types, neg_types, opts))
else:
typelist = list(max(pos_types, neg_types))
if opts.raw:
return '\n'.join(typelist)
hierarchy = delphin.load_hierarchy(opts.grammar.types_path)
if opts.supers:
for group in (pos, neg):
for item in group:
item.load_supers(hierarchy)
sfunc = lambda x:x.supers
pos_supers = set(chain.from_iterable(sfunc(x) for x in pos))
neg_supers = set(chain.from_iterable(sfunc(x) for x in neg))
supers = compare_types(pos_supers, neg_supers, opts)
typelist.extend('^'+t for t in supers)
return pretty_print_types(typelist, hierarchy)
def process_sentences(inputs, opts):
def process(sentence):
return delphin.Fragment(
sentence,
opts.grammar,
fragments=opts.fragments,
count=opts.count,
tnt=opts.get('tnt', False),
dat_path=opts.grammar.dat_path,
ace_path=config.ACEBIN,
typifier=config.TYPIFIERBIN,
logpath=config.LOGPATH
)
return [process(i) for i in inputs]
def process_profiles(queries, opts):
# assume queries is a string of the form: PROFILE_PATH:opt_tsql_query
sep = ':'
items = []
# support both list of queries and single query
if isinstance(queries, str):
queries = [queries]
for query in queries:
if query.find(sep) >= 0:
path, condition = query.split(':')
condition = None if condition == '' else condition
else:
path = query
condition = None
items.extend(process_gold_profile(
path,
condition=condition,
grammar=opts.grammar,
))
return items
def process_gold_profile(path, condition=None, grammar=None):
return delphin.get_profile_results(
[path],
gold=True,
grammar=grammar,
condition=condition,
typifier=config.TYPIFIERBIN
)
def main():
arg = argparser().parse_args()
arg.grammar = gram.get_grammar(arg.grammar)
if '@' in arg.sentences and not (arg.u or arg.i or arg.d):
arg.d = True
pos, neg = [], []
# assign the inputs into pos and neg lists accordingly
stype = pos
for s in arg.sentences:
if s =='@':
stype = neg
else:
stype.append(s)
process_func = process_profiles if arg.profiles else process_sentences
pos_items = process_func(pos, arg)
neg_items = process_func(neg, arg)
result = typediff(pos_items, neg_items, arg)
print(result)
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
from neutron.tests import base
NETWORK_ID = "fake_network"
PORT_ID = "fake_port"
class FakeNetworkContext(api.NetworkContext):
def __init__(self, segments):
self._network_segments = segments
@property
def current(self):
return {'id': NETWORK_ID}
@property
def original(self):
return None
@property
def network_segments(self):
return self._network_segments
class FakePortContext(api.PortContext):
def __init__(self, agent_type, agents, segments,
vnic_type=portbindings.VNIC_NORMAL):
self._agent_type = agent_type
self._agents = agents
self._network_context = FakeNetworkContext(segments)
self._bound_vnic_type = vnic_type
self._bound_segment_id = None
self._bound_vif_type = None
self._bound_vif_details = None
@property
def current(self):
return {'id': PORT_ID,
'binding:vnic_type': self._bound_vnic_type}
@property
def original(self):
return None
@property
def network(self):
return self._network_context
@property
def bound_segment(self):
if self._bound_segment_id:
for segment in self._network_context.network_segments:
if segment[api.ID] == self._bound_segment_id:
return segment
def host_agents(self, agent_type):
if agent_type == self._agent_type:
return self._agents
else:
return []
def set_binding(self, segment_id, vif_type, vif_details):
self._bound_segment_id = segment_id
self._bound_vif_type = vif_type
self._bound_vif_details = vif_details
class AgentMechanismBaseTestCase(base.BaseTestCase):
# These following must be overriden for the specific mechanism
# driver being tested:
VIF_TYPE = None
CAP_PORT_FILTER = None
AGENT_TYPE = None
AGENTS = None
AGENTS_DEAD = None
AGENTS_BAD = None
def _check_unbound(self, context):
self.assertIsNone(context._bound_segment_id)
self.assertIsNone(context._bound_vif_type)
self.assertIsNone(context._bound_vif_details)
def _check_bound(self, context, segment):
self.assertEqual(context._bound_segment_id, segment[api.ID])
self.assertEqual(context._bound_vif_type, self.VIF_TYPE)
vif_details = context._bound_vif_details
self.assertIsNotNone(vif_details)
self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER],
self.CAP_PORT_FILTER)
class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase):
UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'}]
def test_unknown_type(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.UNKNOWN_TYPE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase):
LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'local_segment_id',
api.NETWORK_TYPE: 'local'}]
def test_type_local(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.LOCAL_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.LOCAL_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_local_dead(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_DEAD,
self.LOCAL_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase):
FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'flat_segment_id',
api.NETWORK_TYPE: 'flat',
api.PHYSICAL_NETWORK: 'fake_physical_network'}]
def test_type_flat(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.FLAT_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.FLAT_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_flat_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.FLAT_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase):
VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'fake_physical_network',
api.SEGMENTATION_ID: 1234}]
def test_type_vlan(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.VLAN_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_vlan_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.VLAN_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismGreTestCase(AgentMechanismBaseTestCase):
GRE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'gre_segment_id',
api.NETWORK_TYPE: 'gre',
api.SEGMENTATION_ID: 1234}]
def test_type_gre(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.GRE_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_gre_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import os
from collections import defaultdict, Mapping
import third_party.json_schema_compiler.json_parse as json_parse
import third_party.json_schema_compiler.model as model
import third_party.json_schema_compiler.idl_schema as idl_schema
import third_party.json_schema_compiler.idl_parser as idl_parser
def _RemoveNoDocs(item):
if json_parse.IsDict(item):
if item.get('nodoc', False):
return True
for key, value in item.items():
if _RemoveNoDocs(value):
del item[key]
elif type(item) == list:
to_remove = []
for i in item:
if _RemoveNoDocs(i):
to_remove.append(i)
for i in to_remove:
item.remove(i)
return False
def _DetectInlineableTypes(schema):
"""Look for documents that are only referenced once and mark them as inline.
Actual inlining is done by _InlineDocs.
"""
if not schema.get('types'):
return
ignore = frozenset(('value', 'choices'))
refcounts = defaultdict(int)
# Use an explicit stack instead of recursion.
stack = [schema]
while stack:
node = stack.pop()
if isinstance(node, list):
stack.extend(node)
elif isinstance(node, Mapping):
if '$ref' in node:
refcounts[node['$ref']] += 1
stack.extend(v for k, v in node.iteritems() if k not in ignore)
for type_ in schema['types']:
if not 'noinline_doc' in type_:
if refcounts[type_['id']] == 1:
type_['inline_doc'] = True
def _InlineDocs(schema):
"""Replace '$ref's that refer to inline_docs with the json for those docs.
"""
types = schema.get('types')
if types is None:
return
inline_docs = {}
types_without_inline_doc = []
# Gather the types with inline_doc.
for type_ in types:
if type_.get('inline_doc'):
inline_docs[type_['id']] = type_
for k in ('description', 'id', 'inline_doc'):
type_.pop(k, None)
else:
types_without_inline_doc.append(type_)
schema['types'] = types_without_inline_doc
def apply_inline(node):
if isinstance(node, list):
for i in node:
apply_inline(i)
elif isinstance(node, Mapping):
ref = node.get('$ref')
if ref and ref in inline_docs:
node.update(inline_docs[ref])
del node['$ref']
for k, v in node.iteritems():
apply_inline(v)
apply_inline(schema)
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
"""Inserts commas every three digits for integer values. It is magic.
"""
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
class _JSCModel(object):
"""Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
"""
def __init__(self, json, ref_resolver, disable_refs, idl=False):
self._ref_resolver = ref_resolver
self._disable_refs = disable_refs
clean_json = copy.deepcopy(json)
if _RemoveNoDocs(clean_json):
self._namespace = None
else:
if idl:
_DetectInlineableTypes(clean_json)
_InlineDocs(clean_json)
self._namespace = model.Namespace(clean_json, clean_json['namespace'])
def _FormatDescription(self, description):
if self._disable_refs:
return description
return self._ref_resolver.ResolveAllLinks(description,
namespace=self._namespace.name)
def _GetLink(self, link):
if self._disable_refs:
type_name = link.split('.', 1)[-1]
return { 'href': '#type-%s' % type_name, 'text': link, 'name': link }
return self._ref_resolver.SafeGetLink(link, namespace=self._namespace.name)
def ToDict(self):
if self._namespace is None:
return {}
return {
'name': self._namespace.name,
'types': self._GenerateTypes(self._namespace.types.values()),
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties)
}
def _GenerateTypes(self, types):
return [self._GenerateType(t) for t in types]
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
if (function.parent is not None and
not isinstance(function.parent, model.Namespace)):
function_dict['parent_name'] = function.parent.simple_name
if function.returns:
function_dict['returns'] = self._GenerateType(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function.callback is not None:
# Show the callback as an extra parameter.
function_dict['parameters'].append(
self._GenerateCallbackProperty(function.callback))
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'parameters': [self._GenerateProperty(p) for p in event.params],
'callback': self._GenerateCallback(event.callback),
'filters': [self._GenerateProperty(f) for f in event.filters],
'conditions': [self._GetLink(condition)
for condition in event.conditions],
'actions': [self._GetLink(action) for action in event.actions],
'supportsRules': event.supports_rules,
'id': _CreateId(event, 'event')
}
if (event.parent is not None and
not isinstance(event.parent, model.Namespace)):
event_dict['parent_name'] = event.parent.simple_name
if event.callback is not None:
# Show the callback as an extra parameter.
event_dict['parameters'].append(
self._GenerateCallbackProperty(event.callback))
if len(event_dict['parameters']) > 0:
event_dict['parameters'][-1]['last'] = True
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()]
def _GenerateProperty(self, property_):
if not hasattr(property_, 'type_'):
for d in dir(property_):
if not d.startswith('_'):
print ('%s -> %s' % (d, getattr(property_, d)))
type_ = property_.type_
# Make sure we generate property info for arrays, too.
# TODO(kalman): what about choices?
if type_.property_type == model.PropertyType.ARRAY:
properties = type_.item_type.properties
else:
properties = type_.properties
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'parameters': [],
'returns': None,
'id': _CreateId(property_, 'property')
}
if type_.property_type == model.PropertyType.FUNCTION:
function = type_.function
for param in function.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if function.returns:
property_dict['returns'] = self._GenerateType(function.returns)
if (property_.parent is not None and
not isinstance(property_.parent, model.Namespace)):
property_dict['parent_name'] = property_.parent.simple_name
value = property_.value
if value is not None:
if isinstance(value, int):
property_dict['value'] = _FormatValue(value)
else:
property_dict['value'] = value
else:
self._RenderTypeInformation(type_, property_dict)
return property_dict
def _GenerateCallbackProperty(self, callback):
property_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'optional': callback.optional,
'id': _CreateId(callback, 'property'),
'simple_type': 'function',
}
if (callback.parent is not None and
not isinstance(callback.parent, model.Namespace)):
property_dict['parent_name'] = callback.parent.simple_name
return property_dict
def _RenderTypeInformation(self, type_, dst_dict):
dst_dict['is_object'] = type_.property_type == model.PropertyType.OBJECT
if type_.property_type == model.PropertyType.CHOICES:
dst_dict['choices'] = self._GenerateTypes(type_.choices)
# We keep track of which == last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif type_.property_type == model.PropertyType.REF:
dst_dict['link'] = self._GetLink(type_.ref_type)
elif type_.property_type == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateType(type_.item_type)
elif type_.property_type == model.PropertyType.ENUM:
dst_dict['enum_values'] = []
for enum_value in type_.enum_values:
dst_dict['enum_values'].append({'name': enum_value})
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif type_.instance_of is not None:
dst_dict['simple_type'] = type_.instance_of.lower()
else:
dst_dict['simple_type'] = type_.property_type.name.lower()
class _LazySamplesGetter(object):
"""This class is needed so that an extensions API page does not have to fetch
the apps samples page and vice versa.
"""
def __init__(self, api_name, samples):
self._api_name = api_name
self._samples = samples
def get(self, key):
return self._samples.FilterSamples(key, self._api_name)
class APIDataSource(object):
"""This class fetches and loads JSON APIs from the FileSystem passed in with
|compiled_fs_factory|, so the APIs can be plugged into templates.
"""
class Factory(object):
def __init__(self, compiled_fs_factory, base_path):
def create_compiled_fs(fn, category):
return compiled_fs_factory.Create(fn, APIDataSource, category=category)
self._permissions_cache = create_compiled_fs(self._LoadPermissions,
'permissions')
self._json_cache = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, False),
'json')
self._idl_cache = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, False),
'idl')
# These caches are used if an APIDataSource does not want to resolve the
# $refs in an API. This is needed to prevent infinite recursion in
# ReferenceResolver.
self._json_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, True),
'json-no-refs')
self._idl_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, True),
'idl-no-refs')
self._idl_names_cache = create_compiled_fs(self._GetIDLNames, 'idl-names')
self._names_cache = create_compiled_fs(self._GetAllNames, 'names')
self._base_path = base_path
# These must be set later via the SetFooDataSourceFactory methods.
self._ref_resolver_factory = None
self._samples_data_source_factory = None
def SetSamplesDataSourceFactory(self, samples_data_source_factory):
self._samples_data_source_factory = samples_data_source_factory
def SetReferenceResolverFactory(self, ref_resolver_factory):
self._ref_resolver_factory = ref_resolver_factory
def Create(self, request, disable_refs=False):
"""Create an APIDataSource. |disable_refs| specifies whether $ref's in
APIs being processed by the |ToDict| method of _JSCModel follows $ref's
in the API. This prevents endless recursion in ReferenceResolver.
"""
if self._samples_data_source_factory is None:
# Only error if there is a request, which means this APIDataSource is
# actually being used to render a page.
if request is not None:
logging.error('SamplesDataSource.Factory was never set in '
'APIDataSource.Factory.')
samples = None
else:
samples = self._samples_data_source_factory.Create(request)
if not disable_refs and self._ref_resolver_factory is None:
logging.error('ReferenceResolver.Factory was never set in '
'APIDataSource.Factory.')
return APIDataSource(self._permissions_cache,
self._json_cache,
self._idl_cache,
self._json_cache_no_refs,
self._idl_cache_no_refs,
self._names_cache,
self._idl_names_cache,
self._base_path,
samples,
disable_refs)
def _LoadPermissions(self, file_name, json_str):
return json_parse.Parse(json_str)
def _LoadJsonAPI(self, api, disable_refs):
return _JSCModel(
json_parse.Parse(api)[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs).ToDict()
def _LoadIdlAPI(self, api, disable_refs):
idl = idl_parser.IDLParser().ParseData(api)
return _JSCModel(
idl_schema.IDLSchema(idl).process()[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs,
idl=True).ToDict()
def _GetIDLNames(self, base_dir, apis):
return self._GetExtNames(apis, ['idl'])
def _GetAllNames(self, base_dir, apis):
return self._GetExtNames(apis, ['json', 'idl'])
def _GetExtNames(self, apis, exts):
return [model.UnixName(os.path.splitext(api)[0]) for api in apis
if os.path.splitext(api)[1][1:] in exts]
def __init__(self,
permissions_cache,
json_cache,
idl_cache,
json_cache_no_refs,
idl_cache_no_refs,
names_cache,
idl_names_cache,
base_path,
samples,
disable_refs):
self._base_path = base_path
self._permissions_cache = permissions_cache
self._json_cache = json_cache
self._idl_cache = idl_cache
self._json_cache_no_refs = json_cache_no_refs
self._idl_cache_no_refs = idl_cache_no_refs
self._names_cache = names_cache
self._idl_names_cache = idl_names_cache
self._samples = samples
self._disable_refs = disable_refs
def _GetFeatureFile(self, filename):
perms = self._permissions_cache.GetFromFile('%s/%s' %
(self._base_path, filename))
return dict((model.UnixName(k), v) for k, v in perms.iteritems())
def _GetFeatureData(self, path):
# Remove 'experimental_' from path name to match the keys in
# _permissions_features.json.
path = model.UnixName(path.replace('experimental_', ''))
for filename in ['_permission_features.json', '_manifest_features.json']:
feature_data = self._GetFeatureFile(filename).get(path, None)
if feature_data is not None:
break
# There are specific cases in which the feature is actually a list of
# features where only one needs to match; but currently these are only
# used to whitelist features for specific extension IDs. Filter those out.
if isinstance(feature_data, list):
feature_list = feature_data
feature_data = None
for single_feature in feature_list:
if 'whitelist' in single_feature:
continue
if feature_data is not None:
# Note: if you are seeing the exception below, add more heuristics as
# required to form a single feature.
raise ValueError('Multiple potential features match %s. I can\'t '
'decide which one to use. Please help!' % path)
feature_data = single_feature
if feature_data and feature_data['channel'] in ('trunk', 'dev', 'beta'):
feature_data[feature_data['channel']] = True
return feature_data
def _GenerateHandlebarContext(self, handlebar_dict, path):
handlebar_dict['permissions'] = self._GetFeatureData(path)
handlebar_dict['samples'] = _LazySamplesGetter(path, self._samples)
return handlebar_dict
def _GetAsSubdirectory(self, name):
if name.startswith('experimental_'):
parts = name[len('experimental_'):].split('_', 1)
parts[1] = 'experimental_%s' % parts[1]
return '/'.join(parts)
return name.replace('_', '/', 1)
def get(self, key):
if key.endswith('.html') or key.endswith('.json') or key.endswith('.idl'):
path, ext = os.path.splitext(key)
else:
path = key
unix_name = model.UnixName(path)
idl_names = self._idl_names_cache.GetFromFileListing(self._base_path)
names = self._names_cache.GetFromFileListing(self._base_path)
if unix_name not in names and self._GetAsSubdirectory(unix_name) in names:
unix_name = self._GetAsSubdirectory(unix_name)
if self._disable_refs:
cache, ext = (
(self._idl_cache_no_refs, '.idl') if (unix_name in idl_names) else
(self._json_cache_no_refs, '.json'))
else:
cache, ext = ((self._idl_cache, '.idl') if (unix_name in idl_names) else
(self._json_cache, '.json'))
return self._GenerateHandlebarContext(
cache.GetFromFile('%s/%s%s' % (self._base_path, unix_name, ext)),
path)
|
|
#!/usr/bin/env python
"""
Automatic building of SeqAn apps and releases.
"""
import subprocess
import optparse
import os.path
import sys
import shutil
import tempfile
# The SVN command to use.
SVN_BINARY='svn'
# The CMake command to use.
CMAKE_BINARY='cmake'
# The default value for the SVN tags.
DEFAULT_TAGS_URL='http://svn.seqan.de/seqan/tags'
# The default value for the SVN trunk.
DEFAULT_TRUNK_URL='http://svn.seqan.de/seqan/trunk'
# The default minimal revision of that tags must have.
DEFAULT_START_REVISION=13708
# The path to the package repository.
DEFAULT_PACKAGE_DB='.'
class MinisculeSvnWrapper(object):
"""Minimal SVN wrapper."""
def ls(self, url):
"""Execute 'svn ls ${url}'."""
print >>sys.stderr, 'Executing "%s %s %s"' % (SVN_BINARY, 'ls -v', url)
popen = subprocess.Popen([SVN_BINARY, 'ls', '-v', url],
stdout=subprocess.PIPE)
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during SVN call.'
return 1
lines = out_data.splitlines()
revs_tags = [(int(line.split()[0]), line.split()[-1]) for line in lines]
res = []
for rev, tag in revs_tags:
if tag == './':
continue # Skip dot-slash.
tag2 = tag[:-1]
res.append((rev, tag2))
return res
def co(self, url, dest_dir):
"""Execute 'svn co ${url} ${dest_dir}'."""
print >>sys.stderr, 'Executing "%s %s %s %s"' % (SVN_BINARY, 'co', url, dest_dir)
popen = subprocess.Popen([SVN_BINARY, 'co', url, dest_dir])
popen.wait()
return popen.returncode
class Package(object):
"""Represent a package with a given name, version, OS and architeture."""
def __init__(self, name, version, os, word_size, pkg_format):
self.name = name
self.version = version
self.os = os
self.word_size = word_size
SYS_NAMES = {'Windows': {'32': 'win32-i686', '64': 'win64-x86_64'},
'Linux': {'32': 'Linux-i686', '64': 'Linux-x86_64'},
'Mac': {'32': 'Darwin-i686', '64': 'Darwin-x86_64'}}
self.system_name = SYS_NAMES[os][word_size]
self.pkg_format = pkg_format
def fileName(self):
if self.name == 'seqan-library':
return '%s-%s.%s' % (self.name, self.version, self.pkg_format)
else:
return '%s-%s-%s.%s' % (self.name, self.version, self.system_name,
self.pkg_format)
class BuildStep(object):
"""Management of one build step."""
def __init__(self, path, name, version, os, word_size, pkg_formats,
svn_url, make_args, options, tmp_dir=None):
self.base_path = path
self.name = name
self.version = version # TODO(holtgrew): Unused, overwritten below.
self.major_version = int(version.split('.')[0])
self.minor_version = int(version.split('.')[1])
self.patch_version = 0
if len(version.split('.')) > 2:
self.patch_version = int(version.split('.')[2])
self.version = '%d.%d.%d' % (self.major_version, self.minor_version, self.patch_version)
self.os = os
self.word_size = word_size
self.pkg_formats = pkg_formats
if name == 'seqan':
self.packages = [Package(name + suffix, self.version, os, word_size, f)
for f in pkg_formats for suffix in ['-apps', '-library']]
else:
self.packages = [Package(name, self.version, os, word_size, f)
for f in pkg_formats]
self.svn_url = svn_url
self.make_args = make_args
self.options = options
# If set then this is used instead of a random name in TMPDIR.
self.tmp_dir = tmp_dir
def buildNeeded(self):
"""Returns whether one of the package files is missing."""
for p in self.packages:
package_path = os.path.join(self.base_path, p.name, p.fileName())
if 'x86' in package_path and 'x86_64' not in package_path: # fix processor name
package_path = package_path.replace('x86', 'x86_64')
if 'win32' in package_path or 'win64' in package_path: # fix OS name
package_path = package_path.replace('win32', 'Windows').replace('win64', 'Windows')
if 'Darwin' in package_path: # fix OS name
package_path = package_path.replace('Darwin', 'Mac')
if not os.path.exists(package_path):
if self.options.verbosity >= 1:
print >>sys.stderr, 'File %s does not exist yet.' % package_path
return True
elif self.options.verbosity >= 1:
print >>sys.stderr, 'File %s exists.' % package_path
return False
def copyArchives(self, build_dir):
"""Copy built packages to base_path directory."""
for p in self.packages:
from_ = os.path.join(build_dir, p.fileName())
if os.path.exists(from_):
to = os.path.join(self.base_path, p.name, os.path.basename(from_))
if not os.path.exists(os.path.dirname(to)): # Create directory if necessary.
os.makedirs(os.path.dirname(to))
print >>sys.stderr, "Copying %s => %s" % (from_, to)
if 'x86' in to and 'x86_64' not in to: # fix processor name
to = to.replace('x86', 'x86_64')
if 'win32' in to or 'win64' in to: # fix OS name
to = to.replace('win32', 'Windows').replace('win64', 'Windows')
if 'Darwin' in to: # fix OS name
to = to.replace('Darwin', 'Mac')
shutil.copyfile(from_, to)
else:
print >>sys.stderr, '%s does not exist (not fatal)' % from_
def buildSeqAnRelease(self, checkout_dir, build_dir):
"""Build SeqAn release: Apps and library build."""
# Build seqan-apps.
#
# Create build directory.
print >>sys.stderr, 'Creating build directory %s' % (build_dir,)
os.mkdir(build_dir)
# Execute CMake.
cmake_args = [CMAKE_BINARY, checkout_dir,
'-DSEQAN_BUILD_SYSTEM=SEQAN_RELEASE_APPS']
# Use appropriate CMake flags for OS and processor.
if self.word_size == '32':
cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=i686')
if self.os != 'Windows':
cmake_args.append('-DCMAKE_CXX_FLAGS=-m32')
else:
cmake_args += ['-G', 'Visual Studio 10']
else: # self.word_size == '64'
if self.os == 'Windows':
cmake_args += ['-G', 'Visual Studio 10 Win64']
print >>sys.stderr, 'Executing CMake: "%s"' % (' '.join(cmake_args),)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during make call.'
print out_data
print err_data
return 1
# Execute Make.
cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package'] + self.make_args
print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(cmake_args),)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during make call.'
print out_data
print err_data
return 1
# Copy over the archives.
self.copyArchives(build_dir)
# Remove build directory.
print >>sys.stderr, 'Removing build directory %s' % build_dir
shutil.rmtree(build_dir)
# Build seqan-library.
#
# Create build directory.
if not os.path.exists(build_dir):
print >>sys.stderr, "Creating build directory %s" % (build_dir,)
os.mkdir(build_dir)
# Execute CMake.
cmake_args = [CMAKE_BINARY, checkout_dir,
"-DSEQAN_BUILD_SYSTEM=SEQAN_RELEASE_LIBRARY"]
print >>sys.stderr, 'Executing CMake: "%s"' % (' '.join(cmake_args),)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during make call.'
print out_data
print err_data
return 1
# Build Docs
cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'docs'] + self.make_args
print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(cmake_args),)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during make docs call.'
print out_data
print err_data
# Execute Make.
cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package'] + self.make_args
print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(cmake_args),)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during make call.'
print out_data
print err_data
return 1
self.copyArchives(build_dir)
# Remove build directory.
print >>sys.stderr, 'Removing build directory %s' % build_dir
shutil.rmtree(build_dir)
def buildApp(self, checkout_dir, build_dir):
"""Build an application."""
# Create build directory.
print >>sys.stderr, "Creating build directory %s" % (build_dir,)
if not os.path.exists(build_dir):
os.mkdir(build_dir)
# Execute CMake.
cmake_args = [CMAKE_BINARY, checkout_dir,# '-G', 'Visual Studio 10',
"-DCMAKE_BUILD_TYPE=Release",
"-DSEQAN_BUILD_SYSTEM=APP:%s" % self.name,
"-DSEQAN_APP_VERSION=%d.%d.%d" %
(self.major_version, self.minor_version, self.patch_version)]
# Use appropriate CMake flags for OS and processor.
if self.word_size == '32':
cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=i686')
if self.os != 'Windows':
cmake_args.append('-DCMAKE_CXX_FLAGS=-m32')
else:
cmake_args += ['-G', 'Visual Studio 10']
else: # self.word_size == '64'
cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=x86_64')
if self.os == 'Windows':
cmake_args += ['-G', 'Visual Studio 10 Win64']
print >>sys.stderr, 'Executing CMake: "%s"' % (' '.join(cmake_args),)
#for key in sorted(os.environ.keys()):
# print key, ': ', os.environ[key]
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during make call.'
print out_data
print err_data
return 1
# Build and package project.
make_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package', '--config', 'Release']
if self.options.verbosity > 1:
make_args.insert(1, 'VERBOSE=1')
print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(make_args),)
popen = subprocess.Popen(make_args, cwd=build_dir)
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print >>sys.stderr, 'ERROR during make call.'
print out_data
print err_data
return 1
# Copy out archives.
self.copyArchives(build_dir)
# Remove build directory.
print >>sys.stderr, 'Removing build directory %s' % build_dir
shutil.rmtree(build_dir)
def tmpDir(self):
print 'self.tmp_dir = %s' % self.tmp_dir
if self.tmp_dir:
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
return self.tmp_dir
else:
return tempfile.mkdtemp()
def execute(self):
"""Execute build step."""
# Create temporary directory.
tmp_dir = self.tmpDir()
print >>sys.stderr, 'Temporary directory is %s' % (tmp_dir,)
# Create SVN checkout in temporary directory.
checkout_dir = os.path.join(tmp_dir, os.path.basename(self.svn_url))
print >>sys.stderr, 'Creating checkout in %s' % checkout_dir
svn = MinisculeSvnWrapper()
svn.co(self.svn_url, checkout_dir)
# Create build directory.
suffix = '-build-%s-%s' % (self.os, self.word_size)
build_dir = os.path.join(tmp_dir, os.path.basename(self.svn_url) + suffix)
if os.path.exists(build_dir):
print >>sys.stderr, 'Removing build directory %s' % (build_dir,)
shutil.rmtree(build_dir)
# Perform the build. We have to separate between app and whole SeqAn releases.
if self.name == 'seqan':
self.buildSeqAnRelease(checkout_dir, build_dir)
else:
self.buildApp(checkout_dir, build_dir)
if not self.options.keep_co_dir:
print >>sys.stderr, 'Removing checkout directory %s' % (checkout_dir,)
shutil.rmtree(checkout_dir)
# Remove temporary directory again.
if not self.tmp_dir and not self.options.keep_tmp_dir:
# Only remove if not explicitely given and not forced to keep.
print >>sys.stderr, 'Removing temporary directory %s' % (tmp_dir,)
shutil.rmtree(tmp_dir)
def workTags(options):
"""Run the individual steps for tags."""
# Get the revisions and tag names.
svn = MinisculeSvnWrapper()
revs_tags = [(rev, tag) for (rev, tag) in svn.ls(options.tags_url)
if rev >= options.start_revision and
'-' in tag]
# Enumerate all package names that we could enumerate.
print 'revs_tags = %s' % revs_tags
print 'word_sizes = %s' % options.word_sizes
for rev, tag in revs_tags:
name, version = tag.rsplit('-', 1)
for word_size in options.word_sizes.split(','):
# Create build step for this package name.
pkg_formats = options.package_formats.split(',')
svn_url = options.tags_url + '/' + tag
build_step = BuildStep(options.package_db, name, version, options.os,
word_size, pkg_formats, svn_url,
options.make_args.split(), options, options.tmp_dir)
# Check whether we need to build this.
if not build_step.buildNeeded():
continue # Skip
# Execute build step.
build_step.execute()
return 0
def workTrunk(options):
"""Run the individual steps for the trunk with fake tag name."""
# Get the revisions and tag names.
svn = MinisculeSvnWrapper()
# Enumerate all package names that we could enumerate.
print 'fake tag = %s' % options.build_trunk_as
print 'word_sizes = %s' % options.word_sizes
name, version = options.build_trunk_as.rsplit('-', 1)
for word_size in options.word_sizes.split(','):
# Create build step for this package name.
pkg_formats = options.package_formats.split(',')
svn_url = options.trunk_url
build_step = BuildStep(options.package_db, name, version, options.os,
word_size, pkg_formats, svn_url,
options.make_args.split(), options, options.tmp_dir)
# Check whether we need to build this.
if not build_step.buildNeeded():
continue # Skip
# Execute build step.
build_step.execute()
return 0
def work(options):
"""Run the steps."""
if not options.build_trunk_as:
return workTags(options)
else:
return workTrunk(options)
def main():
"""Program entry point."""
# Parse Arguments.
parser = optparse.OptionParser()
parser.add_option('-t', '--tags-url', dest='tags_url',
default=DEFAULT_TAGS_URL,
help='This URL is searched for tags.', metavar='URL')
parser.add_option('--trunk-url', dest='trunk_url',
default=DEFAULT_TRUNK_URL,
help='This URL is searched for trunk.', metavar='URL')
parser.add_option('--package-db', dest='package_db', type='string',
default=DEFAULT_PACKAGE_DB,
help='Path the directory with the packages.')
parser.add_option('-s', '--start-revision', dest='start_revision',
default=DEFAULT_START_REVISION,
type='int', help='Ignore all tags with smaller revision.')
parser.add_option('-v', dest='verbosity', action='count', default=1,
help='Increase verbosity.')
parser.add_option('--package-formats', dest='package_formats',
default='tar.bz2,zip',
help='Expect the following packages to be created.')
parser.add_option('--os', dest='os', help='Expect the packages to be created for this OS.',
default='Linux')
parser.add_option('--word-sizes', dest='word_sizes', default='32,64',
help='Build binaries with the given word sizes')
parser.add_option('--make-args', dest='make_args', type="string", default='',
help='Arguments for make.')
parser.add_option('--tmp-dir', dest='tmp_dir', type='string', default=None,
help='Temporary directory to use. Use this to reuse the same checkout.')
parser.add_option('--build-trunk-as', dest='build_trunk_as', type='string', default=None,
help='Build current trunk with this string as a tag name.')
parser.add_option('--keep-tmp-dir', dest='keep_tmp_dir', default=False,
action='store_true', help='Keep temporary directory.')
parser.add_option('--keep-co-dir', dest='keep_co_dir', default=False,
action='store_true', help='Keep checkout directory.')
parser.epilog = ('The program will use the environment variable TMPDIR as '
'the directory for temporary files.')
options, args = parser.parse_args()
if args:
parser.error('No arguments expected!')
return 1
# Fire up work.
print >>sys.stderr, 'Running SeqAn Auto Builder'
return work(options)
|
|
#!/usr/bin/env python
#
# Setup script for Review Board.
#
# A big thanks to Django project for some of the fixes used in here for
# MacOS X and data files installation.
import os
import subprocess
import sys
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from setuptools.command.egg_info import egg_info
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import Command
from reviewboard import (get_package_version,
VERSION,
django_version)
# Make sure this is a version of Python we are compatible with. This should
# prevent people on older versions from unintentionally trying to install
# the source tarball, and failing.
if sys.hexversion < 0x02050000:
sys.stderr.write(
'Review Board %s is incompatible with your version of Python.\n'
'Please install Review Board 1.6.x or upgrade Python to at least '
'2.6.x (preferably 2.7).\n' % get_package_version())
sys.exit(1)
elif sys.hexversion < 0x02060500:
sys.stderr.write(
'Review Board %s is incompatible with your version of Python.\n'
'Please install Review Board 1.7.x or upgrade Python to at least '
'2.6.5 (preferably 2.7).\n' % get_package_version())
sys.exit(1)
# Make sure we're actually in the directory containing setup.py.
root_dir = os.path.dirname(__file__)
if root_dir != "":
os.chdir(root_dir)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is
# /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an
# Apple-specific fix for this in distutils.command.install_data#306. It
# fixes install_lib but not install_data, which is why we roll our own
# install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is
# set to the fixed directory, so we set the installdir to install_lib.
# The install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
class BuildEggInfo(egg_info):
def run(self):
if ('sdist' in sys.argv or
'bdist_egg' in sys.argv or
'install' in sys.argv):
self.run_command('build_media')
self.run_command('build_i18n')
egg_info.run(self)
class BuildMedia(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
retcode = subprocess.call([
sys.executable, 'contrib/internal/build-media.py'])
if retcode != 0:
raise RuntimeError('Failed to build media files')
class BuildI18n(Command):
description = 'Compile message catalogs to .mo'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
retcode = subprocess.call([
sys.executable, 'contrib/internal/build-i18n.py'])
if retcode != 0:
raise RuntimeError('Failed to build i18n files')
cmdclasses = {
'install_data': install_data,
'egg_info': BuildEggInfo,
'build_media': BuildMedia,
'build_i18n': BuildI18n,
}
if sys.platform == "darwin":
cmdclasses['install_data'] = osx_install_data
PACKAGE_NAME = 'ReviewBoard'
download_url = 'http://downloads.reviewboard.org/releases/%s/%s.%s/' % \
(PACKAGE_NAME, VERSION[0], VERSION[1])
# Build the reviewboard package.
setup(name=PACKAGE_NAME,
version=get_package_version(),
license="MIT",
description="Review Board, a web-based code review tool",
url="https://www.reviewboard.org/",
download_url=download_url,
author="The Review Board Project",
author_email="[email protected]",
maintainer="Christian Hammond",
maintainer_email="[email protected]",
packages=find_packages(),
entry_points={
'console_scripts': [
'rb-site = reviewboard.cmdline.rbsite:main',
'rbssh = reviewboard.cmdline.rbssh:main',
],
'reviewboard.hosting_services': [
'beanstalk = reviewboard.hostingsvcs.beanstalk:Beanstalk',
'bitbucket = reviewboard.hostingsvcs.bitbucket:Bitbucket',
'bugzilla = reviewboard.hostingsvcs.bugzilla:Bugzilla',
'codebasehq = reviewboard.hostingsvcs.codebasehq:CodebaseHQ',
'fedorahosted = '
'reviewboard.hostingsvcs.fedorahosted:FedoraHosted',
'fogbugz = reviewboard.hostingsvcs.fogbugz:FogBugz',
'github = reviewboard.hostingsvcs.github:GitHub',
'gitlab = reviewboard.hostingsvcs.gitlab:GitLab',
'gitorious = reviewboard.hostingsvcs.gitorious:Gitorious',
'googlecode = reviewboard.hostingsvcs.googlecode:GoogleCode',
'jira = reviewboard.hostingsvcs.jira:JIRA',
'kiln = reviewboard.hostingsvcs.kiln:Kiln',
'rbgateway = reviewboard.hostingsvcs.rbgateway:ReviewBoardGateway',
'redmine = reviewboard.hostingsvcs.redmine:Redmine',
'sourceforge = reviewboard.hostingsvcs.sourceforge:SourceForge',
'trac = reviewboard.hostingsvcs.trac:Trac',
'unfuddle = reviewboard.hostingsvcs.unfuddle:Unfuddle',
'versionone = reviewboard.hostingsvcs.versionone:VersionOne',
],
'reviewboard.scmtools': [
'bzr = reviewboard.scmtools.bzr:BZRTool',
'clearcase = reviewboard.scmtools.clearcase:ClearCaseTool',
'cvs = reviewboard.scmtools.cvs:CVSTool',
'git = reviewboard.scmtools.git:GitTool',
'hg = reviewboard.scmtools.hg:HgTool',
'perforce = reviewboard.scmtools.perforce:PerforceTool',
'plastic = reviewboard.scmtools.plastic:PlasticTool',
'svn = reviewboard.scmtools.svn:SVNTool',
],
'reviewboard.auth_backends': [
'ad = reviewboard.accounts.backends:ActiveDirectoryBackend',
'ldap = reviewboard.accounts.backends:LDAPBackend',
'nis = reviewboard.accounts.backends:NISBackend',
'x509 = reviewboard.accounts.backends:X509Backend',
'digest = reviewboard.accounts.backends:HTTPDigestBackend',
],
},
cmdclass=cmdclasses,
install_requires=[
django_version,
'django_evolution>=0.7.5,<=0.7.999',
'django-haystack>=2.3.1',
'django-multiselectfield',
'Djblets>=0.9rc1,<=0.9.999',
'docutils',
'markdown>=2.4.0,<2.4.999',
'mimeparse>=0.1.3',
'paramiko>=1.12',
'pycrypto>=2.6',
'Pygments>=1.6',
'python-dateutil==1.5',
'python-memcached',
'pytz',
'recaptcha-client',
'Whoosh>=2.6',
],
dependency_links=[
'http://downloads.reviewboard.org/mirror/',
'http://downloads.reviewboard.org/releases/Djblets/0.9/',
'http://downloads.reviewboard.org/releases/django-evolution/0.7/',
],
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Quality Assurance",
])
|
|
'''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
from ARFuncs import *
from Common_BuildConfigureLibrary import *
from Common_RunAntScript import *
from Android_CreateFiles import *
from Common_HandlePrebuiltDep import *
import shutil
def Android_BuildLibrary(target, lib, clean=False, debug=False, nodeps=False, inhouse=False, requestedArchs=None):
args = dict(locals())
StartDumpArgs(**args)
ANDROID_SDK_VERSION = os.environ.get('AR_ANDROID_API_VERSION')
# Check that ANDROID_SDK_PATH and ANDROID_NDK_PATH environment variables are set
if not os.environ.get('ANDROID_SDK_PATH') or not os.environ.get('ANDROID_NDK_PATH'):
ARLog('ANDROID_SDK_PATH and ANDROID_NDK_PATH environment variabled must be set to build a library for %(target)s' % locals())
return EndDumpArgs(res=False, **args)
# Sanity check : is library valid for this target
if not lib.isAvailableForTarget(target):
ARLog('lib%(lib)s does not need to be built for %(target)s' % locals())
return EndDumpArgs(res=True, **args)
KnownArchs = [{ 'arch' : 'arm', 'eabi' : 'armeabi', 'host' : 'arm-linux-androideabi' },
{ 'arch' : 'arm', 'eabi' : 'armeabi-v7a', 'host' : 'arm-linux-androideabi' },
{ 'arch' : 'mips', 'eabi' : 'mips', 'host' : 'mipsel-linux-android' },
{ 'arch' : 'x86', 'eabi' : 'x86', 'host' : 'i686-linux-android' },
]
KnownEabis = [ arch['eabi'] for arch in KnownArchs ]
ValidArchs = []
if requestedArchs:
ValidArchs = [ arch for arch in KnownArchs if arch['eabi'] in requestedArchs ]
for ra in requestedArchs:
if ra not in KnownEabis:
ARLog('Error : requested arch %(ra)s is not available for target Android' % locals())
ARLog(' Avaiable archs : %(KnownEabis)s' % locals())
return EndDumpArgs(res=False, **args)
if not ValidArchs:
ValidArchs = KnownArchs
# First thing : build deps
if not nodeps:
for pb in lib.pbdeps:
abis = [arch['eabi'] for arch in ValidArchs]
if not Common_HandlePrebuiltDep(target, pb, outputSuffixes=abis):
ARLog('Error while handling prebuilt library %(pb)s' % locals())
return EndDumpArgs(res=False, **args)
for dep in lib.deps:
ARLog('Building lib%(dep)s (dependancy of lib%(lib)s)' % locals())
if target.hasAlreadyBuilt(dep):
ARLog('Dependancy lib%(dep)s already built for %(target)s' % locals())
elif not dep.isAvailableForTarget(target):
ARLog('Dependancy lib%(dep)s does not need to be built for %(target)s' % locals())
elif Android_BuildLibrary(target, dep, clean, debug, nodeps, inhouse, requestedArchs):
ARLog('Dependancy lib%(dep)s built' % locals())
else:
ARLog('Error while building dependancy lib%(dep)s' %locals())
return EndDumpArgs(res=False, **args)
else:
ARLog('Skipping deps building for %(lib)s' % locals())
target.addTriedLibrary(lib)
res = True
libLower = lib.name.lower()
suffix = '_dbg' if debug else ''
libPrefix = 'lib' if not libLower.startswith('lib') else ''
hasNative = False
# 1> Autotools part (Optionnal)
if Common_IsConfigureLibrary(lib):
hasNative = True
for archInfos in ValidArchs:
# Read archInfos
arch = archInfos['arch']
eabi = archInfos['eabi']
host = archInfos['host']
# Check that the compiler is in the path
compilerTestName = '%(host)s-gcc' % locals()
if not ARExistsInPath(compilerTestName):
ARLog('%(compilerTestName)s is not in your path' % locals())
ARLog('You need to install it as a standalone toolchain to use this build script')
ARLog('(See NDK Doc)')
return EndDumpArgs(res=False, **args)
# Add extra configure flags
ExtraConfFlags = ['--host=%(host)s' % locals(),
'--disable-static',
'--enable-shared']
LdFlagsArr=['-llog -lz']
if not lib.ext:
LdFlagsString = 'LDFLAGS=" ' + ARListAsBashArg(LdFlagsArr) + '"'
else:
if lib.name == 'curl':
# Staticaly link openssl to curl
EabiLibDir = os.getcwd() + '/Targets/%(target)s/Install/%(eabi)s/lib' % locals()
LdFlagsString = 'LIBS=" ' + ARListAsBashArg(LdFlagsArr) + ' %(EabiLibDir)s/libssl.a %(EabiLibDir)s/libcrypto.a"' % locals()
else:
LdFlagsString = 'LIBS=" ' + ARListAsBashArg(LdFlagsArr) + '"'
ExtraConfFlags.append(LdFlagsString)
if not lib.ext:
ExtraConfFlags.append('--disable-so-version')
if eabi == 'armeabi':
ExtraConfFlags.append('CFLAGS=" -march=armv5te"')
elif eabi == 'armeabi-v7a':
ExtraConfFlags.append('CFLAGS=" -march=armv7-a"')
# Call configure/make/make install
stripVersionNumber = lib.ext and not clean
forcedMalloc = ARSetEnvIfEmpty('ac_cv_func_malloc_0_nonnull', 'yes')
forcedRealloc = ARSetEnvIfEmpty('ac_cv_func_realloc_0_nonnull', 'yes')
retStatus = Common_BuildConfigureLibrary(target, lib, extraArgs=ExtraConfFlags, clean=clean, debug=debug, confdirSuffix=eabi, installSubDir=eabi, stripVersionNumber=stripVersionNumber, inhouse=inhouse)
if forcedMalloc:
ARUnsetEnv('ac_cv_func_malloc_0_nonnull')
if forcedRealloc:
ARUnsetEnv('ac_cv_func_realloc_0_nonnull')
if not retStatus:
return EndDumpArgs(res=False, **args)
# 2 Java part (Pure Java or Java + JNI), mandatory
# Declare path
JniPath = lib.path + '/JNI'
AndroidPath = lib.path + '/Android'
JavaBuildDir = ARPathFromHere('Targets/%(target)s/Build/%(libPrefix)s%(lib)s_Java' % locals())
JavaBuildDirDbg = ARPathFromHere('Targets/%(target)s/Build/%(libPrefix)s%(lib)s_Java_dbg' % locals())
OutputJarDir = ARPathFromHere('Targets/%(target)s/Install/jars/release/' % locals())
OutputJar = '%(OutputJarDir)s/%(libPrefix)s%(lib)s.jar' % locals()
OutputJarDirDbg = ARPathFromHere('Targets/%(target)s/Install/jars/debug/' % locals())
OutputJarDbg = '%(OutputJarDirDbg)s/%(libPrefix)s%(lib)s_dbg.jar' % locals()
AndroidSoLib = '%(libPrefix)s%(libLower)s_android.' % locals() + target.soext
AndroidSoLibDbg = '%(libPrefix)s%(libLower)s_android_dbg.' % locals() + target.soext
# Select build path depending on debug flag
ActualJavaBuildDir = JavaBuildDir if not debug else JavaBuildDirDbg
ActualOutputJarDir = OutputJarDir if not debug else OutputJarDirDbg
ActualOutputJar = OutputJar if not debug else OutputJarDbg
ActualAndroidSoLib = AndroidSoLib if not debug else AndroidSoLibDbg
# Check for full java Android projects
if os.path.exists(AndroidPath):
BuildXmlFile = '%(AndroidPath)s/build.xml' % locals()
if not os.path.exists(BuildXmlFile):
ARLog('Unable to build %(libPrefix)s%(lib)s -> Missing build.xml script' % locals())
return EndDumpArgs(res=False, **args)
ClassPath=os.environ.get('ANDROID_SDK_PATH') + '/platforms/android-%(ANDROID_SDK_VERSION)s/android.jar' % locals()
for dep in lib.deps:
ClassPath += ':%(ActualOutputJarDir)s/lib%(dep)s%(suffix)s.jar' % locals()
for pbdep in lib.pbdeps:
ClassPath += ':%(OutputJarDir)s/%(pbdep)s.jar' % locals()
if not os.path.exists(ActualJavaBuildDir):
os.makedirs(ActualJavaBuildDir)
if clean:
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(OutputJarDir)s -Dbuild.dir=%(JavaBuildDir)s -Dproject.classpath=%(ClassPath)s clean' % locals()):
return EndDumpArgs(res=False, **args)
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(OutputJarDirDbg)s -Dbuild.dir=%(JavaBuildDirDbg)s -Dproject.classpath=%(ClassPath)s clean' % locals()):
return EndDumpArgs(res=False, **args)
elif debug:
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(ActualOutputJarDir)s -Dbuild.dir=%(ActualJavaBuildDir)s -Dproject.classpath=%(ClassPath)s debug' % locals()):
return EndDumpArgs(res=False, **args)
else:
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(ActualOutputJarDir)s -Dbuild.dir=%(ActualJavaBuildDir)s -Dproject.classpath=%(ClassPath)s release' % locals()):
return EndDumpArgs(res=False, **args)
# Else, search for JNI subprojects
elif os.path.exists(JniPath):
mustRunAnt = False
# Declare dirs
JniJavaDir = '%(JniPath)s/java' % locals()
JniCDir = '%(JniPath)s/c' % locals()
BuildSrcDir = '%(ActualJavaBuildDir)s/src' % locals()
BuildJniDir = '%(ActualJavaBuildDir)s/jni' % locals()
if not clean:
# Copy files from JNI Dirs to Build Dir
if not os.path.exists(ActualJavaBuildDir):
os.makedirs(ActualJavaBuildDir)
ARCopyAndReplace(JniJavaDir, BuildSrcDir, deletePrevious=True)
ARCopyAndReplace(JniCDir, BuildJniDir, deletePrevious=True, ignoreRegexpsForDeletion=[r'.*mk'])
# Create Android.mk / Application.mk / AndroidManifest.xml
Android_CreateApplicationMk(ActualJavaBuildDir, [arch['eabi'] for arch in ValidArchs])
Android_CreateAndroidManifest(ActualJavaBuildDir, lib)
Android_CreateAndroidMk(target, ActualJavaBuildDir, ARPathFromHere('Targets/%(target)s/Install' % locals()), lib, debug, hasNative, inhouse=inhouse)
# Call ndk-build
buildDir = Chdir(ActualJavaBuildDir)
ndk_debug = ''
if debug:
ndk_debug = 'NDK_DEBUG=1'
res = ARExecute(os.environ.get('ANDROID_NDK_PATH') + '/ndk-build -j ' + ndk_debug)
buildDir.exit()
if not res:
ARLog('Error while running ndk-build')
return EndDumpArgs(res=False, **args)
# Call java build (+ make jar)
classpath = ' -cp ' + os.environ.get('ANDROID_SDK_PATH') + '/platforms/android-%(ANDROID_SDK_VERSION)s/android.jar' % locals()
if lib.deps or lib.pbdeps:
classpath += ':"%(ActualOutputJarDir)s/*"' % locals()
JavaFilesDir = '%(BuildSrcDir)s/com/parrot/arsdk/%(libLower)s/' % locals()
JavaFiles = ARExecuteGetStdout(['find', JavaFilesDir, '-name', '*.java']).replace('\n', ' ')
if not ARExecute('javac -source 1.6 -target 1.6 -sourcepath %(BuildSrcDir)s %(JavaFiles)s %(classpath)s' % locals()):
ARLog('Error while building java sources')
return EndDumpArgs(res=False, **args)
if not os.path.exists(ActualOutputJarDir):
os.makedirs(ActualOutputJarDir)
# Move good files in a ./lib directory (instead of ./libs)
for archInfos in ValidArchs:
eabi = archInfos['eabi']
JarLibDir = '%(ActualJavaBuildDir)s/lib/%(eabi)s' % locals()
if not os.path.exists(JarLibDir):
os.makedirs(JarLibDir)
for baseDir, directories, files in os.walk('%(ActualJavaBuildDir)s/libs/%(eabi)s' % locals()):
for _file in files:
if _file == '%(libPrefix)s%(libLower)s%(suffix)s.' % locals() + target.soext or _file == ActualAndroidSoLib:
shutil.copy2(os.path.join(baseDir, _file), os.path.join(JarLibDir, _file))
# Create JAR File
if not ARExecute('jar cf %(ActualOutputJar)s -C %(ActualJavaBuildDir)s ./lib -C %(BuildSrcDir)s .' % locals()):
ARLog('Error while creating jar file')
return EndDumpArgs(res=False, **args)
# Copy output so libraries into target dir
for archInfos in ValidArchs:
eabi = archInfos['eabi']
shutil.copy2('%(ActualJavaBuildDir)s/libs/%(eabi)s/%(ActualAndroidSoLib)s' % locals(),
ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(ActualAndroidSoLib)s' % locals()))
else:
ARDeleteIfExists(OutputJarDbg, OutputJar, JavaBuildDir, JavaBuildDirDbg)
for archInfos in ValidArchs:
eabi = archInfos['eabi']
LibRelease = ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(AndroidSoLib)s' % locals())
LibDebug = ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(AndroidSoLibDbg)s' % locals())
ARDeleteIfExists(LibRelease, LibDebug)
# For autotools only library, just make a jar containing the .so file
elif Common_IsConfigureLibrary(lib):
if not clean:
if not os.path.exists(ActualOutputJarDir):
os.makedirs(ActualOutputJarDir)
LibsDir = '%(ActualJavaBuildDir)s/lib' % locals()
if lib.customBuild is None:
ARDeleteIfExists (LibsDir)
if not os.path.exists(LibsDir):
os.makedirs(LibsDir)
for archInfos in ValidArchs:
eabi = archInfos['eabi']
eabiDir = '%(LibsDir)s/%(eabi)s' % locals()
if not os.path.exists(eabiDir):
os.makedirs(eabiDir)
for soname in lib.soLibs:
shutil.copy2(ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(soname)s' % locals()), '%(eabiDir)s/%(soname)s' % locals())
if not ARExecute('jar cf %(ActualOutputJar)s -C %(ActualJavaBuildDir)s ./lib' % locals()):
ARLog('Error while creating jar file')
return EndDumpArgs(res=False, **args)
else:
ARDeleteIfExists(OutputJarDbg, OutputJar)
# Mark library as built if all went good
if res:
target.addBuiltLibrary(lib)
return EndDumpArgs(res, **args)
|
|
import warnings
from operator import itemgetter
from itertools import product
import numpy as np
import colorsys
import param
from ..core import util
from ..core.data import (ArrayInterface, NdElementInterface, DictInterface)
from ..core import (Dimension, NdMapping, Element2D,
Overlay, Element, Dataset, NdElement)
from ..core.boundingregion import BoundingRegion, BoundingBox
from ..core.sheetcoords import SheetCoordinateSystem, Slice
from ..core.util import pd
from .chart import Curve
from .tabular import Table
from .util import compute_edges, toarray
try:
from ..core.data import PandasInterface
except ImportError:
PandasInterface = None
class Raster(Element2D):
"""
Raster is a basic 2D element type for presenting either numpy or
dask arrays as two dimensional raster images.
Arrays with a shape of (N,M) are valid inputs for Raster wheras
subclasses of Raster (e.g. RGB) may also accept 3D arrays
containing channel information.
Raster does not support slicing like the Image or RGB subclasses
and the extents are in matrix coordinates if not explicitly
specified.
"""
group = param.String(default='Raster', constant=True)
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2), constant=True, doc="""
The label of the x- and y-dimension of the Raster in form
of a string or dimension object.""")
vdims = param.List(default=[Dimension('z')], bounds=(1, 1), doc="""
The dimension description of the data held in the data array.""")
def __init__(self, data, extents=None, **params):
if extents is None:
(d1, d2) = data.shape[:2]
extents = (0, 0, d2, d1)
super(Raster, self).__init__(data, extents=extents, **params)
@property
def _zdata(self):
return self.data
def __getitem__(self, slices):
if slices in self.dimensions(): return self.dimension_values(slices)
slices = util.process_ellipses(self,slices)
if not isinstance(slices, tuple):
slices = (slices, slice(None))
elif len(slices) > (2 + self.depth):
raise KeyError("Can only slice %d dimensions" % 2 + self.depth)
elif len(slices) == 3 and slices[-1] not in [self.vdims[0].name, slice(None)]:
raise KeyError("%r is the only selectable value dimension" % self.vdims[0].name)
slc_types = [isinstance(sl, slice) for sl in slices[:2]]
data = self.data.__getitem__(slices[:2][::-1])
if all(slc_types):
return self.clone(data, extents=None)
elif not any(slc_types):
return toarray(data, index_value=True)
else:
return self.clone(np.expand_dims(data, axis=slc_types.index(True)),
extents=None)
def _coord2matrix(self, coord):
return int(round(coord[1])), int(round(coord[0]))
@classmethod
def collapse_data(cls, data_list, function, kdims=None, **kwargs):
if isinstance(function, np.ufunc):
return function.reduce(data_list)
else:
return function(np.dstack(data_list), axis=-1, **kwargs)
def sample(self, samples=[], **sample_values):
"""
Sample the Raster along one or both of its dimensions,
returning a reduced dimensionality type, which is either
a ItemTable, Curve or Scatter. If two dimension samples
and a new_xaxis is provided the sample will be the value
of the sampled unit indexed by the value in the new_xaxis
tuple.
"""
if isinstance(samples, tuple):
X, Y = samples
samples = zip(X, Y)
params = dict(self.get_param_values(onlychanged=True),
vdims=self.vdims)
params.pop('extents', None)
params.pop('bounds', None)
if len(sample_values) == self.ndims or len(samples):
if not len(samples):
samples = zip(*[c if isinstance(c, list) else [c] for _, c in
sorted([(self.get_dimension_index(k), v) for k, v in
sample_values.items()])])
table_data = [c+(self._zdata[self._coord2matrix(c)],)
for c in samples]
params['kdims'] = self.kdims
return Table(table_data, **params)
else:
dimension, sample_coord = list(sample_values.items())[0]
if isinstance(sample_coord, slice):
raise ValueError(
'Raster sampling requires coordinates not slices,'
'use regular slicing syntax.')
# Indices inverted for indexing
sample_ind = self.get_dimension_index(dimension)
if sample_ind is None:
raise Exception("Dimension %s not found during sampling" % dimension)
other_dimension = [d for i, d in enumerate(self.kdims) if
i != sample_ind]
# Generate sample slice
sample = [slice(None) for i in range(self.ndims)]
coord_fn = (lambda v: (v, 0)) if not sample_ind else (lambda v: (0, v))
sample[sample_ind] = self._coord2matrix(coord_fn(sample_coord))[abs(sample_ind-1)]
# Sample data
x_vals = self.dimension_values(other_dimension[0].name, False)
ydata = self._zdata[sample[::-1]]
if hasattr(self, 'bounds') and sample_ind == 0: ydata = ydata[::-1]
data = list(zip(x_vals, ydata))
params['kdims'] = other_dimension
return Curve(data, **params)
def reduce(self, dimensions=None, function=None, **reduce_map):
"""
Reduces the Raster using functions provided via the
kwargs, where the keyword is the dimension to be reduced.
Optionally a label_prefix can be provided to prepend to
the result Element label.
"""
function, dims = self._reduce_map(dimensions, function, reduce_map)
if len(dims) == self.ndims:
if isinstance(function, np.ufunc):
return function.reduce(self.data, axis=None)
else:
return function(self.data)
else:
dimension = dims[0]
other_dimension = [d for d in self.kdims if d.name != dimension]
oidx = self.get_dimension_index(other_dimension[0])
x_vals = self.dimension_values(other_dimension[0].name, False)
reduced = function(self._zdata, axis=oidx)
if oidx and hasattr(self, 'bounds'):
reduced = reduced[::-1]
data = zip(x_vals, reduced)
params = dict(dict(self.get_param_values(onlychanged=True)),
kdims=other_dimension, vdims=self.vdims)
params.pop('bounds', None)
params.pop('extents', None)
return Table(data, **params)
def dimension_values(self, dim, expanded=True, flat=True):
"""
The set of samples available along a particular dimension.
"""
dim_idx = self.get_dimension_index(dim)
if not expanded and dim_idx == 0:
return np.array(range(self.data.shape[1]))
elif not expanded and dim_idx == 1:
return np.array(range(self.data.shape[0]))
elif dim_idx in [0, 1]:
values = np.mgrid[0:self.data.shape[1], 0:self.data.shape[0]][dim_idx]
return values.flatten() if flat else values
elif dim_idx == 2:
return toarray(self.data.T).flatten()
else:
return super(Raster, self).dimension_values(dim)
@property
def depth(self):
return 1 if len(self.data.shape) == 2 else self.data.shape[2]
@property
def mode(self):
"""
Mode specifying the color space for visualizing the array data
and is a function of the depth. For a depth of one, a colormap
is used as determined by the style. If the depth is 3 or 4,
the mode is 'rgb' or 'rgba' respectively.
"""
if self.depth == 1: return 'cmap'
elif self.depth == 3: return 'rgb'
elif self.depth == 4: return 'rgba'
else:
raise Exception("Mode cannot be determined from the depth")
class QuadMesh(Raster):
"""
QuadMesh is a Raster type to hold x- and y- bin values
with associated values. The x- and y-values of the QuadMesh
may be supplied either as the edges of each bin allowing
uneven sampling or as the bin centers, which will be converted
to evenly sampled edges.
As a secondary but less supported mode QuadMesh can contain
a mesh of quadrilateral coordinates that is not laid out in
a grid. The data should then be supplied as three separate
2D arrays for the x-/y-coordinates and grid values.
"""
group = param.String(default="QuadMesh", constant=True)
kdims = param.List(default=[Dimension('x'), Dimension('y')])
vdims = param.List(default=[Dimension('z')], bounds=(1,1))
def __init__(self, data, **params):
data = self._process_data(data)
Element2D.__init__(self, data, **params)
self.data = self._validate_data(self.data)
self._grid = self.data[0].ndim == 1
@property
def depth(self): return 1
def _process_data(self, data):
data = tuple(np.array(el) for el in data)
x, y, zarray = data
ys, xs = zarray.shape
if x.ndim == 1 and len(x) == xs:
x = compute_edges(x)
if y.ndim == 1 and len(y) == ys:
y = compute_edges(y)
return (x, y, zarray)
@property
def _zdata(self):
return self.data[2]
def _validate_data(self, data):
x, y, z = data
if not z.ndim == 2:
raise ValueError("Z-values must be 2D array")
ys, xs = z.shape
shape_errors = []
if x.ndim == 1 and xs+1 != len(x):
shape_errors.append('x')
if x.ndim == 1 and ys+1 != len(y):
shape_errors.append('y')
if shape_errors:
raise ValueError("%s-edges must match shape of z-array." %
'/'.join(shape_errors))
return data
def __getitem__(self, slices):
if slices in self.dimensions(): return self.dimension_values(slices)
slices = util.process_ellipses(self,slices)
if not self._grid:
raise KeyError("Indexing of non-grid based QuadMesh"
"currently not supported")
if len(slices) > (2 + self.depth):
raise KeyError("Can only slice %d dimensions" % (2 + self.depth))
elif len(slices) == 3 and slices[-1] not in [self.vdims[0].name, slice(None)]:
raise KeyError("%r is the only selectable value dimension" % self.vdims[0].name)
slices = slices[:2]
if not isinstance(slices, tuple): slices = (slices, slice(None))
slc_types = [isinstance(sl, slice) for sl in slices]
if not any(slc_types):
indices = []
for idx, data in zip(slices, self.data[:self.ndims]):
indices.append(np.digitize([idx], data)-1)
return self.data[2][tuple(indices[::-1])]
else:
sliced_data, indices = [], []
for slc, data in zip(slices, self.data[:self.ndims]):
if isinstance(slc, slice):
low, high = slc.start, slc.stop
lidx = ([None] if low is None else
max((np.digitize([low], data)-1, 0)))[0]
hidx = ([None] if high is None else
np.digitize([high], data))[0]
sliced_data.append(data[lidx:hidx])
indices.append(slice(lidx, (hidx if hidx is None else hidx-1)))
else:
index = (np.digitize([slc], data)-1)[0]
sliced_data.append(data[index:index+2])
indices.append(index)
z = np.atleast_2d(self.data[2][tuple(indices[::-1])])
if not all(slc_types) and not slc_types[0]:
z = z.T
return self.clone(tuple(sliced_data+[z]))
@classmethod
def collapse_data(cls, data_list, function, kdims=None, **kwargs):
"""
Allows collapsing the data of a number of QuadMesh
Elements with a function.
"""
if not all(data[0].ndim == 1 for data in data_list):
raise Exception("Collapsing of non-grid based QuadMesh"
"currently not supported")
xs, ys, zs = zip(data_list)
if isinstance(function, np.ufunc):
z = function.reduce(zs)
else:
z = function(np.dstack(zs), axis=-1, **kwargs)
return xs[0], ys[0], z
def _coord2matrix(self, coord):
return tuple((np.digitize([coord[i]], self.data[i])-1)[0]
for i in [1, 0])
def range(self, dimension):
idx = self.get_dimension_index(dimension)
if idx in [0, 1]:
data = self.data[idx]
return np.min(data), np.max(data)
elif idx == 2:
data = self.data[idx]
return np.nanmin(data), np.nanmax(data)
super(QuadMesh, self).range(dimension)
def dimension_values(self, dimension, expanded=True, flat=True):
idx = self.get_dimension_index(dimension)
data = self.data[idx]
if idx in [0, 1]:
if not self._grid:
return data.flatten()
odim = self.data[2].shape[idx] if expanded else 1
vals = np.tile(np.convolve(data, np.ones((2,))/2, mode='valid'), odim)
if idx:
return np.sort(vals)
else:
return vals
elif idx == 2:
return data.flatten() if flat else data
else:
return super(QuadMesh, self).dimension_values(idx)
class HeatMap(Dataset, Element2D):
"""
HeatMap is an atomic Element used to visualize two dimensional
parameter spaces. It supports sparse or non-linear spaces, dynamically
upsampling them to a dense representation, which can be visualized.
A HeatMap can be initialized with any dict or NdMapping type with
two-dimensional keys. Once instantiated the dense representation is
available via the .data property.
"""
group = param.String(default='HeatMap', constant=True)
kdims = param.List(default=[Dimension('x'), Dimension('y')])
vdims = param.List(default=[Dimension('z')])
def __init__(self, data, extents=None, **params):
super(HeatMap, self).__init__(data, **params)
data, self.raster = self._compute_raster()
self.data = data.data
self.interface = data.interface
self.depth = 1
if extents is None:
(d1, d2) = self.raster.shape[:2]
self.extents = (0, 0, d2, d1)
else:
self.extents = extents
def _compute_raster(self):
d1keys = self.dimension_values(0, False)
d2keys = self.dimension_values(1, False)
coords = [(d1, d2, np.NaN) for d1 in d1keys for d2 in d2keys]
dtype = 'dataframe' if pd else 'dictionary'
dense_data = Dataset(coords, kdims=self.kdims, vdims=self.vdims, datatype=[dtype])
concat_data = self.interface.concatenate([dense_data, Dataset(self)], datatype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'Mean of empty slice')
data = concat_data.aggregate(self.kdims, np.nanmean)
array = data.dimension_values(2).reshape(len(d1keys), len(d2keys))
return data, np.flipud(array.T)
def __setstate__(self, state):
if '_data' in state:
data = state['_data']
if isinstance(data, NdMapping):
items = [tuple(k)+((v,) if np.isscalar(v) else tuple(v))
for k, v in data.items()]
kdims = state['kdims'] if 'kdims' in state else self.kdims
vdims = state['vdims'] if 'vdims' in state else self.vdims
data = Dataset(items, kdims=kdims, vdims=vdims).data
elif isinstance(data, Dataset):
data = data.data
kdims = data.kdims
vdims = data.vdims
state['data'] = data
state['kdims'] = kdims
state['vdims'] = vdims
self.__dict__ = state
if isinstance(self.data, NdElement):
self.interface = NdElementInterface
elif isinstance(self.data, np.ndarray):
self.interface = ArrayInterface
elif util.is_dataframe(self.data):
self.interface = PandasInterface
elif isinstance(self.data, dict):
self.interface = DictInterface
self.depth = 1
data, self.raster = self._compute_raster()
self.interface = data.interface
self.data = data.data
if 'extents' not in state:
(d1, d2) = self.raster.shape[:2]
self.extents = (0, 0, d2, d1)
super(HeatMap, self).__setstate__(state)
def dense_keys(self):
d1keys = self.dimension_values(0, False)
d2keys = self.dimension_values(1, False)
return list(zip(*[(d1, d2) for d1 in d1keys for d2 in d2keys]))
def dframe(self, dense=False):
if dense:
keys1, keys2 = self.dense_keys()
dense_map = self.clone({(k1, k2): self._data.get((k1, k2), np.NaN)
for k1, k2 in product(keys1, keys2)})
return dense_map.dframe()
return super(HeatMap, self).dframe()
class Image(SheetCoordinateSystem, Raster):
"""
Image is the atomic unit as which 2D data is stored, along with
its bounds object. The input data may be a numpy.matrix object or
a two-dimensional numpy array.
Allows slicing operations of the data in sheet coordinates or direct
access to the data, via the .data attribute.
"""
bounds = param.ClassSelector(class_=BoundingRegion, default=BoundingBox(), doc="""
The bounding region in sheet coordinates containing the data.""")
group = param.String(default='Image', constant=True)
vdims = param.List(default=[Dimension('z')],
bounds=(1, 1), doc="""
The dimension description of the data held in the matrix.""")
def __init__(self, data, bounds=None, extents=None, xdensity=None, ydensity=None, **params):
bounds = bounds if bounds is not None else BoundingBox()
if np.isscalar(bounds):
bounds = BoundingBox(radius=bounds)
elif isinstance(bounds, (tuple, list, np.ndarray)):
l, b, r, t = bounds
bounds = BoundingBox(points=((l, b), (r, t)))
if data is None: data = np.array([[0]])
l, b, r, t = bounds.lbrt()
extents = extents if extents else (None, None, None, None)
Element2D.__init__(self, data, extents=extents, bounds=bounds,
**params)
(dim1, dim2) = self.data.shape[1], self.data.shape[0]
xdensity = xdensity if xdensity else dim1/float(r-l)
ydensity = ydensity if ydensity else dim2/float(t-b)
SheetCoordinateSystem.__init__(self, bounds, xdensity, ydensity)
if len(self.data.shape) == 3:
if self.data.shape[2] != len(self.vdims):
raise ValueError("Input array has shape %r but %d value dimensions defined"
% (self.data.shape, len(self.vdims)))
def _convert_element(self, data):
if isinstance(data, (Raster, HeatMap)):
return data.data
else:
return super(Image, self)._convert_element(data)
def closest(self, coords=[], **kwargs):
"""
Given a single coordinate or multiple coordinates as
a tuple or list of tuples or keyword arguments matching
the dimension closest will find the closest actual x/y
coordinates.
"""
if kwargs and coords:
raise ValueError("Specify coordinate using as either a list "
"keyword arguments not both")
if kwargs:
coords = []
getter = []
for k, v in kwargs.items():
idx = self.get_dimension_index(k)
if np.isscalar(v):
coords.append((0, v) if idx else (v, 0))
else:
if isinstance(coords, tuple):
coords = [(0, c) if idx else (c, 0) for c in v]
if len(coords) not in [0, len(v)]:
raise ValueError("Length of samples must match")
elif len(coords):
coords = [(t[abs(idx-1)], c) if idx else (c, t[abs(idx-1)])
for c, t in zip(v, coords)]
getter.append(idx)
else:
getter = [0, 1]
getter = itemgetter(*sorted(getter))
coords = list(coords)
if len(coords) == 1:
coords = coords[0]
if isinstance(coords, tuple):
return getter(self.closest_cell_center(*coords))
else:
return [getter(self.closest_cell_center(*el)) for el in coords]
def __getitem__(self, coords):
"""
Slice the underlying numpy array in sheet coordinates.
"""
if coords in self.dimensions(): return self.dimension_values(coords)
coords = util.process_ellipses(self,coords)
if coords is () or coords == slice(None, None):
return self
if not isinstance(coords, tuple):
coords = (coords, slice(None))
if len(coords) > (2 + self.depth):
raise KeyError("Can only slice %d dimensions" % 2 + self.depth)
elif len(coords) == 3 and coords[-1] not in [self.vdims[0].name, slice(None)]:
raise KeyError("%r is the only selectable value dimension" % self.vdims[0].name)
coords = coords[:2]
if not any([isinstance(el, slice) for el in coords]):
return self.data[self.sheet2matrixidx(*coords)]
if all([isinstance(c, slice) for c in coords]):
l, b, r, t = self.bounds.lbrt()
xcoords, ycoords = coords
xstart = l if xcoords.start is None else max(l, xcoords.start)
xend = r if xcoords.stop is None else min(r, xcoords.stop)
ystart = b if ycoords.start is None else max(b, ycoords.start)
yend = t if ycoords.stop is None else min(t, ycoords.stop)
bounds = BoundingBox(points=((xstart, ystart), (xend, yend)))
else:
raise KeyError('Indexing requires x- and y-slice ranges.')
return self.clone(Slice(bounds, self).submatrix(self.data),
bounds=bounds)
def range(self, dim, data_range=True):
dim_idx = dim if isinstance(dim, int) else self.get_dimension_index(dim)
dim = self.get_dimension(dim_idx)
if dim.range != (None, None):
return dim.range
elif dim_idx in [0, 1]:
l, b, r, t = self.bounds.lbrt()
if dim_idx:
drange = (b, t)
else:
drange = (l, r)
elif dim_idx < len(self.vdims) + 2:
dim_idx -= 2
data = np.atleast_3d(self.data)[:, :, dim_idx]
drange = (np.nanmin(data), np.nanmax(data))
if data_range:
soft_range = [sr for sr in dim.soft_range if sr is not None]
if soft_range:
return util.max_range([drange, soft_range])
else:
return drange
else:
return dim.soft_range
def _coord2matrix(self, coord):
return self.sheet2matrixidx(*coord)
def dimension_values(self, dim, expanded=True, flat=True):
"""
The set of samples available along a particular dimension.
"""
dim_idx = self.get_dimension_index(dim)
if dim_idx in [0, 1]:
l, b, r, t = self.bounds.lbrt()
dim2, dim1 = self.data.shape[:2]
d1_half_unit = (r - l)/dim1/2.
d2_half_unit = (t - b)/dim2/2.
d1lin = np.linspace(l+d1_half_unit, r-d1_half_unit, dim1)
d2lin = np.linspace(b+d2_half_unit, t-d2_half_unit, dim2)
if expanded:
values = np.meshgrid(d2lin, d1lin)[abs(dim_idx-1)]
return values.flatten() if flat else values
else:
return d2lin if dim_idx else d1lin
elif dim_idx == 2:
# Raster arrays are stored with different orientation
# than expanded column format, reorient before expanding
data = np.flipud(self.data).T
return data.flatten() if flat else data
else:
super(Image, self).dimension_values(dim)
class RGB(Image):
"""
An RGB element is a Image containing channel data for the the
red, green, blue and (optionally) the alpha channels. The values
of each channel must be in the range 0.0 to 1.0.
In input array may have a shape of NxMx4 or NxMx3. In the latter
case, the defined alpha dimension parameter is appended to the
list of value dimensions.
"""
group = param.String(default='RGB', constant=True)
alpha_dimension = param.ClassSelector(default=Dimension('A',range=(0,1)),
class_=Dimension, instantiate=False, doc="""
The alpha dimension definition to add the value dimensions if
an alpha channel is supplied.""")
vdims = param.List(
default=[Dimension('R', range=(0,1)), Dimension('G',range=(0,1)),
Dimension('B', range=(0,1))], bounds=(3, 4), doc="""
The dimension description of the data held in the matrix.
If an alpha channel is supplied, the defined alpha_dimension
is automatically appended to this list.""")
@property
def rgb(self):
"""
Returns the corresponding RGB element.
Other than the updating parameter definitions, this is the
only change needed to implemented an arbitrary colorspace as a
subclass of RGB.
"""
return self
@classmethod
def load_image(cls, filename, height=1, array=False, bounds=None, bare=False, **kwargs):
"""
Returns an raster element or raw numpy array from a PNG image
file, using matplotlib.
The specified height determines the bounds of the raster
object in sheet coordinates: by default the height is 1 unit
with the width scaled appropriately by the image aspect ratio.
Note that as PNG images are encoded as RGBA, the red component
maps to the first channel, the green component maps to the
second component etc. For RGB elements, this mapping is
trivial but may be important for subclasses e.g. for HSV
elements.
Setting bare=True will apply options disabling axis labels
displaying just the bare image. Any additional keyword
arguments will be passed to the Image object.
"""
try:
from matplotlib import pyplot as plt
except:
raise ImportError("RGB.load_image requires matplotlib.")
data = plt.imread(filename)
if array: return data
(h, w, _) = data.shape
if bounds is None:
f = float(height) / h
xoffset, yoffset = w*f/2, h*f/2
bounds=(-xoffset, -yoffset, xoffset, yoffset)
rgb = cls(data, bounds=bounds, **kwargs)
if bare: rgb = rgb(plot=dict(xaxis=None, yaxis=None))
return rgb
def dimension_values(self, dim, expanded=True, flat=True):
"""
The set of samples available along a particular dimension.
"""
dim_idx = self.get_dimension_index(dim)
if self.ndims <= dim_idx < len(self.dimensions()):
data = np.flipud(self.data[:,:,dim_idx-self.ndims]).T
return data.flatten() if flat else data
return super(RGB, self).dimension_values(dim, expanded, flat)
def __init__(self, data, **params):
sliced = None
if isinstance(data, Overlay):
images = data.values()
if not all(isinstance(im, Image) for im in images):
raise ValueError("Input overlay must only contain Image elements")
shapes = [im.data.shape for im in images]
if not all(shape==shapes[0] for shape in shapes):
raise ValueError("Images in the input overlays must contain data of the consistent shape")
ranges = [im.vdims[0].range for im in images]
if any(None in r for r in ranges):
raise ValueError("Ranges must be defined on all the value dimensions of all the Images")
arrays = [(im.data - r[0]) / (r[1] - r[0]) for r,im in zip(ranges, images)]
data = np.dstack(arrays)
if not isinstance(data, Element):
if len(data.shape) != 3:
raise ValueError("Three dimensional matrices or arrays required")
elif data.shape[2] == 4:
sliced = data[:,:,:-1]
if len(params.get('vdims',[])) == 4:
alpha_dim = params['vdims'].pop(3)
params['alpha_dimension'] = alpha_dim
super(RGB, self).__init__(data if sliced is None else sliced, **params)
if sliced is not None:
self.vdims.append(self.alpha_dimension)
self.data = data
def __getitem__(self, coords):
"""
Slice the underlying numpy array in sheet coordinates.
"""
if coords in self.dimensions(): return self.dimension_values(coords)
coords = util.process_ellipses(self, coords)
if not isinstance(coords, slice) and len(coords) > self.ndims:
values = coords[self.ndims:]
channels = [el for el in values
if isinstance(el, (str, util.unicode, Dimension))]
if len(channels) == 1:
sliced = super(RGB, self).__getitem__(coords[:self.ndims])
if channels[0] not in self.vdims:
raise KeyError("%r is not an available value dimension"
% channels[0])
vidx = self.get_dimension_index(channels[0])
val_index = vidx - self.ndims
data = sliced.data[:,:, val_index]
return Image(data, **dict(util.get_param_values(self),
vdims=[self.vdims[val_index]]))
elif len(channels) > 1:
raise KeyError("Channels can only be selected once in __getitem__")
elif all(v==slice(None) for v in values):
coords = coords[:self.ndims]
else:
raise KeyError("Only empty value slices currently supported in RGB")
return super(RGB, self).__getitem__(coords)
class HSV(RGB):
"""
Example of a commonly used color space subclassed from RGB used
for working in a HSV (hue, saturation and value) color space.
"""
group = param.String(default='HSV', constant=True)
alpha_dimension = param.ClassSelector(default=Dimension('A',range=(0,1)),
class_=Dimension, instantiate=False, doc="""
The alpha dimension definition to add the value dimensions if
an alpha channel is supplied.""")
vdims = param.List(
default=[Dimension('H', range=(0,1), cyclic=True),
Dimension('S',range=(0,1)),
Dimension('V', range=(0,1))], bounds=(3, 4), doc="""
The dimension description of the data held in the array.
If an alpha channel is supplied, the defined alpha_dimension
is automatically appended to this list.""")
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
@property
def rgb(self):
"""
Conversion from HSV to RGB.
"""
hsv = self.hsv_to_rgb(self.data[:,:,0],
self.data[:,:,1],
self.data[:,:,2])
if len(self.vdims) == 4:
hsv += (self.data[:,:,3],)
return RGB(np.dstack(hsv), bounds=self.bounds,
group=self.group,
label=self.label)
|
|
from collections import OrderedDict
import numpy as np
from .base import labeller_func, validate_input, connectivity_from_array
@labeller_func(group_label="car_streetscene_view_0_8")
def car_streetscene_20_to_car_streetscene_view_0_8(pcloud):
r"""
Apply the 8-point semantic labels of "view 0" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- front
- bonnet
- windshield
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
front_indices = np.array([0, 1, 3, 2])
bonnet_indices = np.array([2, 3, 5, 4])
windshield_indices = np.array([4, 5, 7, 6])
front_connectivity = connectivity_from_array(front_indices, close_loop=True)
bonnet_connectivity = connectivity_from_array(bonnet_indices, close_loop=True)
windshield_connectivity = connectivity_from_array(
windshield_indices, close_loop=True
)
all_connectivity = np.vstack(
[front_connectivity, bonnet_connectivity, windshield_connectivity]
)
mapping = OrderedDict()
mapping["front"] = front_indices
mapping["bonnet"] = bonnet_indices
mapping["windshield"] = windshield_indices
ind = np.arange(8)
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
@labeller_func(group_label="car_streetscene_view_1_14")
def car_streetscene_20_to_car_streetscene_view_1_14(pcloud):
"""
Apply the 14-point semantic labels of "view 1" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- front
- bonnet
- windshield
- left_side
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
front_indices = np.array([0, 1, 3, 2])
bonnet_indices = np.array([2, 3, 5, 4])
windshield_indices = np.array([4, 5, 7, 6])
left_side_indices = np.array([0, 2, 4, 6, 8, 9, 10, 11, 13, 12])
front_connectivity = connectivity_from_array(front_indices, close_loop=True)
bonnet_connectivity = connectivity_from_array(bonnet_indices, close_loop=True)
windshield_connectivity = connectivity_from_array(
windshield_indices, close_loop=True
)
left_side_connectivity = connectivity_from_array(left_side_indices, close_loop=True)
all_connectivity = np.vstack(
[
front_connectivity,
bonnet_connectivity,
windshield_connectivity,
left_side_connectivity,
]
)
mapping = OrderedDict()
mapping["front"] = front_indices
mapping["bonnet"] = bonnet_indices
mapping["windshield"] = windshield_indices
mapping["left_side"] = left_side_indices
ind = np.hstack((np.arange(9), np.array([10, 12, 14, 16, 18])))
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
@labeller_func(group_label="car_streetscene_view_2_10")
def car_streetscene_20_to_car_streetscene_view_2_10(pcloud):
r"""
Apply the 10-point semantic labels of "view 2" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- left_side
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
left_side_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 9, 8])
left_side_connectivity = connectivity_from_array(left_side_indices, close_loop=True)
all_connectivity = left_side_connectivity
mapping = OrderedDict()
mapping["left_side"] = left_side_indices
ind = np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
@labeller_func(group_label="car_streetscene_view_3_14")
def car_streetscene_20_to_car_streetscene_view_3_14(pcloud):
r"""
Apply the 14-point semantic labels of "view 3" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- left_side
- rear windshield
- trunk
- rear
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
left_side_indices = np.array([0, 1, 2, 3, 4, 6, 8, 10, 13, 12])
rear_windshield_indices = np.array([4, 5, 7, 6])
trunk_indices = np.array([6, 7, 9, 8])
rear_indices = np.array([8, 9, 11, 10])
left_side_connectivity = connectivity_from_array(left_side_indices, close_loop=True)
rear_windshield_connectivity = connectivity_from_array(
rear_windshield_indices, close_loop=True
)
trunk_connectivity = connectivity_from_array(trunk_indices, close_loop=True)
rear_connectivity = connectivity_from_array(rear_indices, close_loop=True)
all_connectivity = np.vstack(
[
left_side_connectivity,
rear_windshield_connectivity,
trunk_connectivity,
rear_connectivity,
]
)
mapping = OrderedDict()
mapping["left_side"] = left_side_indices
mapping["rear_windshield"] = rear_windshield_indices
mapping["trunk"] = trunk_indices
mapping["rear"] = rear_indices
ind = np.array([0, 2, 4, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18])
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
@labeller_func(group_label="car_streetscene_view_4_14")
def car_streetscene_20_to_car_streetscene_view_4_14(pcloud):
r"""
Apply the 14-point semantic labels of "view 4" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- front
- bonnet
- windshield
- right_side
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
front_indices = np.array([0, 1, 3, 2])
bonnet_indices = np.array([2, 3, 5, 4])
windshield_indices = np.array([4, 5, 7, 6])
right_side_indices = np.array([8, 9, 10, 11, 13, 12, 1, 3, 5, 7])
front_connectivity = connectivity_from_array(front_indices, close_loop=True)
bonnet_connectivity = connectivity_from_array(bonnet_indices, close_loop=True)
windshield_connectivity = connectivity_from_array(
windshield_indices, close_loop=True
)
right_side_connectivity = connectivity_from_array(
right_side_indices, close_loop=True
)
all_connectivity = np.vstack(
[
front_connectivity,
bonnet_connectivity,
windshield_connectivity,
right_side_connectivity,
]
)
mapping = OrderedDict()
mapping["front"] = front_indices
mapping["bonnet"] = bonnet_indices
mapping["windshield"] = windshield_indices
mapping["right_side"] = right_side_indices
ind = np.hstack([np.arange(8), np.array([9, 11, 13, 15, 17, 19])])
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
@labeller_func(group_label="car_streetscene_view_5_10")
def car_streetscene_20_to_car_streetscene_view_5_10(pcloud):
r"""
Apply the 10-point semantic labels of "view 5" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- right_side
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
right_side_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 9, 8])
right_side_connectivity = connectivity_from_array(
right_side_indices, close_loop=True
)
all_connectivity = right_side_connectivity
mapping = OrderedDict()
mapping["right_side"] = right_side_indices
ind = np.array([1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
@labeller_func(group_label="car_streetscene_view_6_14")
def car_streetscene_20_to_car_streetscene_view_6_14(pcloud):
r"""
Apply the 14-point semantic labels of "view 6" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- right_side
- rear_windshield
- trunk
- rear
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
right_side_indices = np.array([0, 1, 2, 3, 5, 7, 9, 11, 13, 12])
rear_windshield_indices = np.array([4, 5, 7, 6])
trunk_indices = np.array([6, 7, 9, 8])
rear_indices = np.array([8, 9, 11, 10])
right_side_connectivity = connectivity_from_array(
right_side_indices, close_loop=True
)
rear_windshield_connectivity = connectivity_from_array(
rear_windshield_indices, close_loop=True
)
trunk_connectivity = connectivity_from_array(trunk_indices, close_loop=True)
rear_connectivity = connectivity_from_array(rear_indices, close_loop=True)
all_connectivity = np.vstack(
[
right_side_connectivity,
rear_windshield_connectivity,
trunk_connectivity,
rear_connectivity,
]
)
mapping = OrderedDict()
mapping["right_side"] = right_side_indices
mapping["rear_windshield"] = rear_windshield_indices
mapping["trunk"] = trunk_indices
mapping["rear"] = rear_indices
ind = np.array([1, 3, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19])
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
@labeller_func(group_label="car_streetscene_view_7_8")
def car_streetscene_20_to_car_streetscene_view_7_8(pcloud):
r"""
Apply the 8-point semantic labels of "view 7" from the MIT Street Scene
Car dataset (originally a 20-point markup).
The semantic labels applied are as follows:
- rear_windshield
- trunk
- rear
References
----------
.. [1] http://www.cs.cmu.edu/~vboddeti/alignment.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 20
validate_input(pcloud, n_expected_points)
rear_windshield_indices = np.array([0, 1, 3, 2])
trunk_indices = np.array([2, 3, 5, 4])
rear_indices = np.array([4, 5, 7, 6])
rear_windshield_connectivity = connectivity_from_array(
rear_windshield_indices, close_loop=True
)
trunk_connectivity = connectivity_from_array(trunk_indices, close_loop=True)
rear_connectivity = connectivity_from_array(rear_indices, close_loop=True)
all_connectivity = np.vstack(
[rear_windshield_connectivity, trunk_connectivity, rear_connectivity]
)
mapping = OrderedDict()
mapping["rear_windshield"] = rear_windshield_indices
mapping["trunk"] = trunk_indices
mapping["rear"] = rear_indices
ind = np.arange(8, 16)
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping
)
return new_pcloud, mapping
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPolicyDefinitionsOperations(object):
"""ServiceEndpointPolicyDefinitionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ServiceEndpoint policy definitions.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the Service Endpoint Policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicyDefinition"
"""Get the specified service endpoint policy definitions from service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
service_endpoint_policy_definitions, # type: "_models.ServiceEndpointPolicyDefinition"
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicyDefinition"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_endpoint_policy_definitions, 'ServiceEndpointPolicyDefinition')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
service_endpoint_policy_definitions, # type: "_models.ServiceEndpointPolicyDefinition"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServiceEndpointPolicyDefinition"]
"""Creates or updates a service endpoint policy definition in the specified service endpoint
policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:param service_endpoint_policy_definitions: Parameters supplied to the create or update service
endpoint policy operation.
:type service_endpoint_policy_definitions: ~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicyDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
service_endpoint_policy_definitions=service_endpoint_policy_definitions,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServiceEndpointPolicyDefinitionListResult"]
"""Gets all service endpoint policy definitions in a service end point policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions'} # type: ignore
|
|
#!/usr/bin/env python3
import collections
from math import *
class BigInt:
def __init__(self):
self.number = [0]
def skim(self):
carrier = 0
for i in range(len(self.number)):
self.number[i] += carrier
head = self.number[i] % 10
carrier = (self.number[i] - head) / 10
self.number[i] = int(head)
while carrier != 0:
head = carrier % 10
carrier = (carrier - head) / 10
self.number.append(int(head))
def add(self, factor):
self.number[0] += factor
self.skim();
def mul(self, factor):
carry = 0
for i in range(len(self.number)):
self.number[i] *= factor
self.number[i] += carry
carry = 0
if self.number[i] > 9:
head = int(self.number[i] % 10)
carry = int((self.number[i] - head) / 10)
self.number[i] = head
while carry != 0:
head = carry % 10
carry = (carry - head) / 10
self.number.append(int(head))
def pow(self, factor):
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
oldSelf = self.clone()
for _ in range(factor - 1):
self.bigMul(oldSelf)
def smartPow(self, factor):
# Inspired by: https://en.wikipedia.org/wiki/Exponentiation_by_squaring
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
if factor == 1:
return
if (factor % 2) == 0:
# Even
self.bigMul(self)
self.smartPow(factor / 2)
else:
# Odd
oldSelf = self.clone()
self.bigMul(self)
self.smartPow((factor - 1) / 2)
self.bigMul(oldSelf)
def smartPowIt(self, factor):
# Inspired by: https://en.wikipedia.org/wiki/Exponentiation_by_squaring
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
if factor == 1:
return
y = BigInt()
y.add(1)
while factor > 1:
if (factor % 2) == 0:
# Even
self.bigMul(self)
factor /= 2
else:
# Odd
y.bigMul(self)
self.bigMul(self)
factor = (factor - 1) / 2
self.bigMul(y)
def skimOne(self, i):
if self.number[i] > 9:
old = self.number[i]
self.number[i] = int(old % 10)
head = int((old - (old % 10)) / 10)
if i + 1 < len(self.number):
self.number[i + 1] += head
else:
self.number.append(head)
def bigAdd(self, bigInt):
# TODO: Self add does not work!
if len(self.number) < len(bigInt.number):
self.number += [0] * (len(bigInt.number) - len(self.number))
for (i, v) in enumerate(bigInt.number):
self.number[i] += bigInt.number[i]
self.skimOne(i)
# TODO: Bottleneck for smartpow is here!
# self.skim()
def bigMul(self, bigFactor):
# We can take the internal list because we construct a new list
# (in total)
# So even if we multiply with self this should still work out
total = BigInt()
# For each factor...
for (i, v) in enumerate(bigFactor.number):
# If v is zero, skip it, because then the order should be skipped
if v == 0:
continue
# Make a copy of the original
digitSelf = self.clone()
# Shift it the amount of places of the current digit
digitSelf.shift(i)
# If v is more than zero, multiply
if v > 1:
digitSelf.mul(v)
total.bigAdd(digitSelf)
# Set the end result
self.number = total.number
def getNumberArray(self):
return list(self.number)
def toString(self):
result = ""
for i in self.number:
result += str(i)
return result[::-1]
def clone(self):
newSelf = BigInt()
newSelf.number = self.getNumberArray()
return newSelf
def shift(self, n):
if n == 0:
return
if n < 0:
raise NotImplementedError("Negative shifts are not yet implemented")
oldLen = len(self.number)
self.number += [0] * n
for i in range(len(self.number) - 1, n - 1, -1):
self.number[i] = self.number[i - n]
self.number[i - n] = 0
def take(self, n):
if n == 0:
self.number = [0]
if n < 0:
raise ValueError("Non-negative takes are not supported")
self.number = self.number[:n]
def generatePrimeTable(lim):
numbers = [True] * lim
numbers[0] = False
numbers[1] = False
currNum = 4
while currNum < lim:
numbers[currNum] = False
currNum += 2
prime = 3
while prime < lim:
if numbers[prime]:
currNum = prime
currNum += prime
while currNum < lim:
numbers[currNum] = False
currNum += prime
prime += 2
return numbers
class NumberJuggler:
def __init__(self, lim):
print("Generating prime lookup table")
self.primeTable = generatePrimeTable(lim)
print("Generating prime list")
self.primeList = [i for i, b in enumerate(self.primeTable) if b]
print("Finished initializing number juggler")
def getFactorization(self, num):
factorisation = collections.defaultdict(int)
countdown = num
for prime in self.primeList:
if countdown == 1: break
while countdown % prime == 0:
countdown = countdown // prime
factorisation[prime] += 1
return factorisation
def getFactors(self, num):
factorisation = self.getFactorization(num)
result = []
for k, v in factorisation.items():
result.extend([k] * v)
return result
def getPrimeFactors(self, num):
return list(self.getFactorization(num).keys())
def getDivisors(self, num):
if num == 1: return [1]
factorization = self.getFactorization(num)
factors = list(factorization.keys())
factorCounts = [0] * len(factors)
factorCounts[0] = 1
run = True
divisors = [1]
while run:
divisor = 1;
for j in range(0, len(factors)):
if factorCounts[j] != 0:
divisor *= factors[j]**factorCounts[j]
if divisor != num:
divisors.append(divisor)
factorCounts[0] += 1
for j in range(0, len(factorCounts)):
if factorCounts[j] == factorization[factors[j]] + 1:
if j == len(factorCounts) - 1:
run = False
break
else:
factorCounts[j] = 0;
factorCounts[j + 1] += 1
return divisors
def relativePrimes(self, n):
nFactors = set(self.getPrimeFactors(n))
ps = []
for i in range(1, n):
# theSet = None
# if i in relCache:
# theSet = relCache[i]
# else:
if True:
theSet = set(self.getPrimeFactors(i))
# relCache[i] = theSet
if len(theSet & nFactors) == 0:
ps += [i]
return ps
def phi(self, n):
return len(self.relativePrimes(n))
def mergeSort(array):
if len(array) <= 1:
return array[:]
else:
mid = len(array) // 2
left = mergeSort(array[:mid])
right = mergeSort(array[mid:])
result = []
while len(left) > 0 and len(right) > 0:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
if len(left) > 0:
result.extend(left)
elif len(right) > 0:
result.extend(right)
return result
def removeDupsOrdered(array):
prev = array[0]
result = [prev]
for e in array[1:]:
if e != prev:
prev = e
result.append(e)
return result
def simplifyFraction(nj, numerator, denominator):
if denominator == 0:
return (0, 0)
if numerator == 0:
return (0, 0)
numFactors = nj.getFactors(numerator)
denFactors = nj.getFactors(denominator)
i = 0
while i < len(denFactors):
currFactor = denFactors[i]
if currFactor in denFactors and currFactor in numFactors:
denFactors.remove(currFactor)
numFactors.remove(currFactor)
else:
i += 1
newNumerator = 1
for f in numFactors:
newNumerator *= f
newDenominator = 1
for f in denFactors:
newDenominator *= f
return (newNumerator, newDenominator)
def isPandigital(num):
numStr = str(num)
seen = [False] * len(numStr)
total = 0
for c in numStr:
cInt = int(c)
if cInt < 1 or cInt > len(numStr):
total = -1
break
if not seen[cInt - 1]:
total += 1
seen[cInt - 1] = True
else:
total = -1
break
return total == len(numStr)
def generatePermutations(elements):
allPerms = []
if len(elements) == 1:
return [elements]
for i in range(0, len(elements)):
lessElements = list(elements)
del lessElements[i]
partialPerms = generatePermutations(lessElements)
for perm in partialPerms:
allPerms.append([elements[i]] + perm)
return allPerms
def isPermutationOf(a, b):
a = str(a)
b = str(b)
return sorted(a) == sorted(b)
# Taken from: https://www.reddit.com/r/learnpython/comments/2uhczk/all_possible_slices_of_a_list/
def sublists(s):
length = len(s)
for size in range(1, length + 1):
for start in range(0, (length - size) + 1):
yield s[start:start+size]
# From exercise 64
# http://www.maths.surrey.ac.uk/hosted-sites/R.Knott/Fibonacci/cfINTRO.html#section6
# See the HTML, part 6 for an explanation of the algorithm
# Step will do one iteration
# Given an a, b, c, s.t.:
#
# sqrt(a) + b
# x = -----------
# c
#
# step wil return (m, (a, d, e)), s.t.:
#
# sqrt(a) + f
# x' = -----------
# e
#
# where:
#
# m = floor(sqrt(x))
# d = b - m * c
# f = -d
# e = a - d^2
# -------
# c
#
def step(a, b, c):
x = (sqrt(a) + b) / c
m = floor(x)
d = b - m * c
f = -d
e = (a - d ** 2) / c
return (m, (a, f, e))
def continuedFractionsOfSquareRootOf(a):
a = a
b = 0
c = 1
ms = []
(_, (firstA, firstB, _)) = step(a, b, c)
# print("----")
while True:
res = step(a, b, c)
# print(res)
(m_, (a_, b_, c_)) = res
a = a_
b = b_
c = c_
ms += [m_]
if a == firstA and b == firstB and c == 1: break
ms += [int(b + ms[0])]
# print(ms)
return ms
def collapseFractionExpansion(ms):
numerator = 1
denominator = ms[-1]
ms.pop()
while len(ms) > 0:
m = ms[-1]
ms.pop()
numerator += m * denominator
if len(ms) > 0: numerator, denominator = denominator, numerator
return (numerator, denominator)
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
else:
for n in range(2, int(math.ceil(math.sqrt(num)) + 1)):
if num % n == 0:
return False
return True
def permute(xs):
k = None
for i in range(len(xs) - 1):
if xs[i] < xs[i + 1]: k = i
if k == None: return False
l = None
for i in range(k, len(xs)):
if xs[k] < xs[i]: l = i
xs[k], xs[l] = xs[l], xs[k]
xs[k+1:] = xs[k+1:][::-1]
return True
def permutations(xs):
indices = list(range(len(xs)))
yield [xs[i] for i in indices]
keepLooping = permute(indices)
while keepLooping:
keepLooping = permute(indices)
if keepLooping: yield [xs[i] for i in indices]
def isValidGon(ngon):
for i in range(3, len(ngon), 3):
if ngon[0] > ngon[i]:
return False
return True
if __name__ == "__main__":
print("Unit testing!")
print("Tests for BigInt")
bi = BigInt()
bi.add(123)
assert(bi.toString() == "123")
bi.shift(3)
assert(bi.toString() == "123000")
bi = BigInt()
bi.add(50)
bi.mul(5)
# print(bi.toString())
assert(bi.toString() == "250")
ba = BigInt()
ba.add(200)
bb = BigInt()
bb.add(12345)
bb.bigAdd(ba)
assert(bb.toString() == str(12345 + 200))
ba = BigInt()
ba.add(12345)
bb = BigInt()
bb.add(67890)
bb.bigMul(ba)
assert(bb.toString() == str(12345 * 67890))
ba = BigInt()
ba.add(3)
bb = BigInt()
bb.add(3)
ba.bigMul(bb)
ba.bigMul(bb)
assert(ba.toString() == "27")
bi = BigInt()
bi.add(3)
bi.pow(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.pow(80)
assert(bi.toString() == str(80 ** 80))
bi = BigInt()
bi.add(3)
bi.smartPow(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.smartPow(80)
assert(bi.toString() == str(80 ** 80))
bi = BigInt()
bi.add(3)
bi.smartPowIt(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.smartPowIt(80)
assert(bi.toString() == str(80 ** 80))
assert(isPermutationOf(1487, 4817))
|
|
import re
from datetime import datetime, timedelta
from string import capwords
from dateutil.parser import parse as parsedate
from loguru import logger
from flexget.utils import qualities
from flexget.utils.parsers.generic import ParseWarning, default_ignore_prefixes, name_to_re
from flexget.utils.parsers.parser import TitleParser
from flexget.utils.tools import ReList
logger = logger.bind(name='seriesparser')
ID_TYPES = ['ep', 'date', 'sequence', 'id'] # may also be 'special'
class SeriesParser(TitleParser):
"""
Parse series.
:name: series name
:data: data to parse
:expect_ep: expect series to be in season, ep format (ep_regexps)
:expect_id: expect series to be in id format (id_regexps)
"""
separators = '[/ -]'
roman_numeral_re = 'X{0,3}(?:IX|XI{0,4}|VI{0,4}|IV|V|I{1,4})'
english_numbers = [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
]
# Make sure none of these are found embedded within a word or other numbers
ep_regexps = ReList(
[
TitleParser.re_not_in_word(regexp)
for regexp in [
r'(?:series|season|s)\s?(\d{1,4})(?:\s(?:.*\s)?)?(?:episode|ep|e|part|pt)\s?(\d{1,3}|%s)(?:\s?e?(\d{1,2}))?'
% roman_numeral_re,
r'(?:series|season)\s?(\d{1,4})\s(\d{1,3})\s?of\s?(?:\d{1,3})',
r'(\d{1,2})\s?x\s?(\d+)(?:\s(\d{1,2}))?',
r'(\d{1,3})\s?of\s?(?:\d{1,3})',
r'(?:episode|e|ep|part|pt)\s?(\d{1,3}|%s)' % roman_numeral_re,
r'part\s(%s)' % '|'.join(map(str, english_numbers)),
]
]
)
season_pack_regexps = ReList(
[
# S01 or Season 1 but not Season 1 Episode|Part 2
r'(?:season\s?|s)(\d{1,})(?:\s|$)(?!(?:(?:.*?\s)?(?:episode|e|ep|part|pt)\s?(?:\d{1,3}|%s)|(?:\d{1,3})\s?of\s?(?:\d{1,3})))'
% roman_numeral_re,
r'(\d{1,3})\s?x\s?all', # 1xAll
]
)
unwanted_regexps = ReList(
[
r'(\d{1,3})\s?x\s?(0+)[^1-9]', # 5x0
r'S(\d{1,3})D(\d{1,3})', # S3D1
r'(?:s|series|\b)\s?\d\s?(?:&\s?\d)?[\s-]*(?:complete|full)',
r'disc\s\d',
]
)
# Make sure none of these are found embedded within a word or other numbers
date_regexps = ReList(
[
TitleParser.re_not_in_word(regexp)
for regexp in [
r'(\d{2,4})%s(\d{1,2})%s(\d{1,2})' % (separators, separators),
r'(\d{1,2})%s(\d{1,2})%s(\d{2,4})' % (separators, separators),
r'(\d{4})x(\d{1,2})%s(\d{1,2})' % separators,
r'(\d{1,2})(?:st|nd|rd|th)?%s([a-z]{3,10})%s(\d{4})' % (separators, separators),
]
]
)
sequence_regexps = ReList(
[
TitleParser.re_not_in_word(regexp)
for regexp in [
r'(\d{1,3})(?:v(?P<version>\d))?',
r'(?:pt|part)\s?(\d+|%s)' % roman_numeral_re,
]
]
)
unwanted_sequence_regexps = ReList([r'seasons?\s?\d{1,2}'])
id_regexps = ReList([])
clean_regexps = ReList([r'\[.*?\]', r'\(.*?\)'])
# ignore prefix regexps must be passive groups with 0 or 1 occurrences eg. (?:prefix)?
ignore_prefixes = default_ignore_prefixes
def __init__(
self,
name=None,
alternate_names=None,
identified_by='auto',
name_regexps=None,
ep_regexps=None,
date_regexps=None,
sequence_regexps=None,
id_regexps=None,
strict_name=False,
allow_groups=None,
allow_seasonless=True,
date_dayfirst=None,
date_yearfirst=None,
special_ids=None,
prefer_specials=False,
assume_special=False,
):
"""
Init SeriesParser.
:param string name: Name of the series parser is going to try to parse. If not supplied series name will be
guessed from data.
:param list alternate_names: Other names for this series that should be allowed.
:param string identified_by: What kind of episode numbering scheme is expected,
valid values are ep, date, sequence, id and auto (default).
:param list name_regexps: Regexps for name matching or None (default),
by default regexp is generated from name.
:param list ep_regexps: Regexps detecting episode,season format.
Given list is prioritized over built-in regexps.
:param list date_regexps: Regexps detecting date format.
Given list is prioritized over built-in regexps.
:param list sequence_regexps: Regexps detecting sequence format.
Given list is prioritized over built-in regexps.
:param list id_regexps: Custom regexps detecting id format.
Given list is prioritized over built in regexps.
:param boolean strict_name: If True name must be immediately be followed by episode identifier.
:param list allow_groups: Optionally specify list of release group names that are allowed.
:param date_dayfirst: Prefer day first notation of dates when there are multiple possible interpretations.
:param date_yearfirst: Prefer year first notation of dates when there are multiple possible interpretations.
This will also populate attribute `group`.
:param special_ids: Identifiers which will cause entry to be flagged as a special.
:param boolean prefer_specials: If True, label entry which matches both a series identifier and a special
identifier as a special.
"""
self.episodes = 1
self.name = name
self.alternate_names = alternate_names or []
self.data = ''
self.identified_by = identified_by
# Stores the type of identifier found, 'ep', 'date', 'sequence' or 'special'
self.id_type = None
self.name_regexps = ReList(name_regexps or [])
self.re_from_name = False
# If custom identifier regexps were provided, prepend them to the appropriate type of built in regexps
for mode in ID_TYPES:
listname = mode + '_regexps'
if locals()[listname]:
setattr(
self, listname, ReList(locals()[listname] + getattr(SeriesParser, listname))
)
self.specials = self.specials + [i.lower() for i in (special_ids or [])]
self.prefer_specials = prefer_specials
self.assume_special = assume_special
self.strict_name = strict_name
self.allow_groups = allow_groups or []
self.allow_seasonless = allow_seasonless
self.date_dayfirst = date_dayfirst
self.date_yearfirst = date_yearfirst
self.field = None
self._reset()
def _reset(self):
# parse produces these
self.season = None
self.episode = None
self.episodes = 1
self.id = None
self.id_type = None
self.id_groups = None
self.quality = None
self.proper_count = 0
self.special = False
# TODO: group is only produced with allow_groups
self.group = None
self.season_pack = None
# false if item does not match series
self.valid = False
def remove_dirt(self, data):
"""Replaces some characters with spaces"""
return re.sub(r'[_.,\[\]\(\): ]+', ' ', data).strip().lower()
def guess_name(self):
"""This will attempt to guess a series name based on the provided data."""
# We need to replace certain characters with spaces to make sure episode parsing works right
# We don't remove anything, as the match positions should line up with the original title
clean_title = re.sub(r'[_.,\[\]\(\):]', ' ', self.data)
if self.parse_unwanted(clean_title):
return
match = self.parse_date(clean_title)
if match:
self.identified_by = 'date'
else:
match = self.parse_season_packs(clean_title)
if not match:
match = self.parse_episode(clean_title)
self.identified_by = 'ep'
if not match:
return
if match['match'].start() > 1:
# We start using the original title here, so we can properly ignore unwanted prefixes.
# Look for unwanted prefixes to find out where the series title starts
start = 0
prefix = re.match('|'.join(self.ignore_prefixes), self.data)
if prefix:
start = prefix.end()
# If an episode id is found, assume everything before it is series name
name = self.data[start : match['match'].start()]
# Remove possible episode title from series name (anything after a ' - ')
name = name.split(' - ')[0]
# Replace some special characters with spaces
name = re.sub(r'[\._\(\) ]+', ' ', name).strip(' -')
# Normalize capitalization to title case
name = capwords(name)
self.name = name
return name
def parse(self, data=None, field=None, quality=None):
# Clear the output variables before parsing
self._reset()
self.field = field
if quality:
self.quality = quality
if data:
self.data = data
if not self.data:
raise ParseWarning(self, 'No data supplied to parse.')
if not self.name:
logger.trace('No name for series `{}` supplied, guessing name.', self.data)
if not self.guess_name():
logger.trace('Could not determine a series name')
return
logger.trace('Series name for {} guessed to be {}', self.data, self.name)
# check if data appears to be unwanted (abort)
if self.parse_unwanted(self.remove_dirt(self.data)):
raise ParseWarning(
self, '`{data}` appears to be an episode pack'.format(data=self.data)
)
name = self.remove_dirt(self.name)
logger.trace('name: {} data: {}', name, self.data)
# name end position
name_start = 0
name_end = 0
# regexp name matching
if not self.name_regexps:
# if we don't have name_regexps, generate one from the name
self.name_regexps = ReList(
name_to_re(name, self.ignore_prefixes, self)
for name in [self.name] + self.alternate_names
)
# With auto regex generation, the first regex group captures the name
self.re_from_name = True
# try all specified regexps on this data
for name_re in self.name_regexps:
match = re.search(name_re, self.data)
if match:
match_start, match_end = match.span(1 if self.re_from_name else 0)
# Always pick the longest matching regex
if match_end > name_end:
name_start, name_end = match_start, match_end
logger.trace('NAME SUCCESS: {} matched to {}', name_re.pattern, self.data)
if not name_end:
# leave this invalid
logger.trace(
'FAIL: name regexps {} do not match {}',
[regexp.pattern for regexp in self.name_regexps],
self.data,
)
return
# remove series name from raw data, move any prefix to end of string
data_stripped = self.data[name_end:] + ' ' + self.data[:name_start]
data_stripped = data_stripped.lower()
logger.trace('data stripped: {}', data_stripped)
# allow group(s)
if self.allow_groups:
for group in self.allow_groups:
group = group.lower()
for fmt in ['[%s]', '-%s', '(%s)']:
if fmt % group in data_stripped:
logger.trace('{} is from group {}', self.data, group)
self.group = group
data_stripped = data_stripped.replace(fmt % group, '')
break
if self.group:
break
else:
logger.trace('{} is not from groups {}', self.data, self.allow_groups)
return # leave invalid
# Find quality and clean from data
logger.trace('parsing quality ->')
quality = qualities.Quality(data_stripped)
if quality:
# Remove quality string from data
logger.trace('quality detected, using remaining data `{}`', quality.clean_text)
data_stripped = quality.clean_text
# Don't override passed in quality
if not self.quality:
self.quality = quality
# Remove unwanted words from data for ep / id parsing
data_stripped = self.remove_words(data_stripped, self.remove, not_in_word=True)
data_parts = re.split(r'[\W_]+', data_stripped)
for part in data_parts[:]:
if part in self.propers:
self.proper_count += 1
data_parts.remove(part)
elif part == 'fastsub':
# Subtract 5 to leave room for fastsub propers before the normal release
self.proper_count -= 5
data_parts.remove(part)
elif part in self.specials:
self.special = True
data_parts.remove(part)
data_stripped = ' '.join(data_parts).strip()
logger.trace("data for date/ep/id parsing '{}'", data_stripped)
# Try date mode before ep mode
if self.identified_by in ['date', 'auto']:
date_match = self.parse_date(data_stripped)
if date_match:
if self.strict_name:
if date_match['match'].start() > 1:
return
self.id = date_match['date']
self.id_groups = date_match['match'].groups()
self.id_type = 'date'
self.valid = True
if not (self.special and self.prefer_specials):
return
else:
logger.trace('-> no luck with date_regexps')
if self.identified_by in ['ep', 'auto'] and not self.valid:
ep_match = self.parse_episode(data_stripped)
if ep_match:
# strict_name
if self.strict_name:
if ep_match['match'].start() > 1:
return
if ep_match['end_episode'] and ep_match['end_episode'] > ep_match['episode'] + 2:
# This is a pack of too many episodes, ignore it.
logger.trace(
'Series pack contains too many episodes ({}). Rejecting',
ep_match['end_episode'] - ep_match['episode'],
)
return
self.season = ep_match['season']
self.episode = ep_match['episode']
if ep_match['end_episode']:
self.episodes = (ep_match['end_episode'] - ep_match['episode']) + 1
self.id = (self.season, self.episode)
self.id_type = 'ep'
self.valid = True
if not (self.special and self.prefer_specials):
return
else:
season_pack_match = self.parse_season_packs(data_stripped)
# If a title looks like a special, give it precedence over season pack
if season_pack_match and not self.special:
if self.strict_name and season_pack_match['match'].start() > 1:
return
self.season = season_pack_match['season']
self.season_pack = True
self.id = (season_pack_match['season'], 0)
self.id_type = 'ep'
self.valid = True
else:
logger.trace('-> no luck with ep_regexps')
if self.identified_by == 'ep' and not self.season_pack:
# we should be getting season, ep !
# try to look up idiotic numbering scheme 101,102,103,201,202
# ressu: Added matching for 0101, 0102... It will fail on
# season 11 though
logger.trace('ep identifier expected. Attempting SEE format parsing.')
match = re.search(
self.re_not_in_word(r'(\d?\d)(\d\d)'),
data_stripped,
re.IGNORECASE | re.UNICODE,
)
if match:
# strict_name
if self.strict_name:
if match.start() > 1:
return
self.season = int(match.group(1))
self.episode = int(match.group(2))
self.id = (self.season, self.episode)
logger.trace(self)
self.id_type = 'ep'
self.valid = True
return
else:
logger.trace('-> no luck with SEE')
# Check id regexps
if self.identified_by in ['id', 'auto'] and not self.valid:
for id_re in self.id_regexps:
match = re.search(id_re, data_stripped)
if match:
# strict_name
if self.strict_name:
if match.start() > 1:
return
found_id = '-'.join(g for g in match.groups() if g)
if not found_id:
# If match groups were all blank, don't accept this match
continue
self.id = found_id
self.id_type = 'id'
self.valid = True
logger.trace("found id '{}' with regexp '{}'", self.id, id_re.pattern)
if not (self.special and self.prefer_specials):
return
else:
break
else:
logger.trace('-> no luck with id_regexps')
# Other modes are done, check for unwanted sequence ids
if self.parse_unwanted_sequence(data_stripped):
return
# Check sequences last as they contain the broadest matches
if self.identified_by in ['sequence', 'auto'] and not self.valid:
for sequence_re in self.sequence_regexps:
match = re.search(sequence_re, data_stripped)
if match:
# strict_name
if self.strict_name:
if match.start() > 1:
return
# First matching group is the sequence number
try:
self.id = int(match.group(1))
except ValueError:
self.id = self.roman_to_int(match.group(1))
self.season = 0
self.episode = self.id
# If anime style version was found, overwrite the proper count with it
if 'version' in match.groupdict():
if match.group('version'):
self.proper_count = int(match.group('version')) - 1
self.id_type = 'sequence'
self.valid = True
logger.trace("found id '{}' with regexp '{}'", self.id, sequence_re.pattern)
if not (self.special and self.prefer_specials):
return
else:
break
else:
logger.trace('-> no luck with sequence_regexps')
# No id found, check if this is a special
if self.special or self.assume_special:
# Attempt to set id as the title of the special
self.id = data_stripped or 'special'
self.id_type = 'special'
self.valid = True
logger.trace("found special, setting id to '{}'", self.id)
return
if self.valid:
return
msg = 'Title `%s` looks like series `%s` but cannot find ' % (self.data, self.name)
if self.identified_by == 'auto':
msg += 'any series numbering.'
else:
msg += 'a(n) `%s` style identifier.' % self.identified_by
raise ParseWarning(self, msg)
def parse_unwanted(self, data):
"""Parses data for an unwanted hits. Return True if the data contains unwanted hits."""
for unwanted_re in self.unwanted_regexps:
match = re.search(unwanted_re, data)
if match:
logger.trace('unwanted regexp {} matched {}', unwanted_re.pattern, match.groups())
return True
def parse_unwanted_sequence(self, data):
"""Parses data for an unwanted id hits. Return True if the data contains unwanted hits."""
for seq_unwanted_re in self.unwanted_sequence_regexps:
match = re.search(seq_unwanted_re, data)
if match:
logger.trace('unwanted id regexp {} matched {}', seq_unwanted_re, match.groups())
return True
def parse_date(self, data):
"""
Parses :data: for a date identifier.
If found, returns the date and regexp match object
If no date is found returns False
"""
for date_re in self.date_regexps:
match = re.search(date_re, data)
if match:
# Check if this is a valid date
possdates = []
try:
# By default dayfirst and yearfirst will be tried as both True and False
# if either have been defined manually, restrict that option
dayfirst_opts = [True, False]
if self.date_dayfirst is not None:
dayfirst_opts = [self.date_dayfirst]
yearfirst_opts = [True, False]
if self.date_yearfirst is not None:
yearfirst_opts = [self.date_yearfirst]
kwargs_list = (
{'dayfirst': d, 'yearfirst': y}
for d in dayfirst_opts
for y in yearfirst_opts
)
for kwargs in kwargs_list:
possdate = parsedate(' '.join(match.groups()), **kwargs)
# Don't accept dates farther than a day in the future
if possdate > datetime.now() + timedelta(days=1):
continue
# Don't accept dates that are too old
if possdate < datetime(1970, 1, 1):
continue
if possdate not in possdates:
possdates.append(possdate)
except ValueError:
logger.trace('{} is not a valid date, skipping', match.group(0))
continue
if not possdates:
logger.trace('All possible dates for {} were in the future', match.group(0))
continue
possdates.sort()
# Pick the most recent date if there are ambiguities
bestdate = possdates[-1]
return {'date': bestdate, 'match': match}
return False
def parse_episode(self, data):
"""
Parses :data: for an episode identifier.
If found, returns a dict with keys for season, episode, end_episode and the regexp match object
If no episode id is found returns False
"""
# search for season and episode number
for ep_re in self.ep_regexps:
match = re.search(ep_re, data)
if match:
logger.trace(
'found episode number with regexp {} ({})', ep_re.pattern, match.groups()
)
matches = match.groups()
if len(matches) >= 2:
season = matches[0]
episode = matches[1]
elif self.allow_seasonless:
# assume season 1 if the season was not specified
season = 1
episode = matches[0]
else:
# Return False if we are not allowing seasonless matches and one is found
return False
# Convert season and episode to integers
try:
season = int(season)
if not episode.isdigit():
try:
idx = self.english_numbers.index(str(episode).lower())
episode = 1 + idx
except ValueError:
episode = self.roman_to_int(episode)
else:
episode = int(episode)
except ValueError:
logger.critical(
'Invalid episode number match {} returned with regexp `{}` for {}',
match.groups(),
ep_re.pattern,
self.data,
)
raise
end_episode = None
if len(matches) == 3 and matches[2]:
end_episode = int(matches[2])
if end_episode <= episode or end_episode > episode + 12:
# end episode cannot be before start episode
# Assume large ranges are not episode packs, ticket #1271 TODO: is this the best way?
end_episode = None
# Successfully found an identifier, return the results
return {
'season': season,
'episode': episode,
'end_episode': end_episode,
'match': match,
}
return False
def parse_season_packs(self, data):
"""Parses data for season packs. Return True if the data contains a hit"""
for season_pack_re in self.season_pack_regexps:
match = re.search(season_pack_re, data)
if match:
logger.trace(
'season pack regexp {} match {}', season_pack_re.pattern, match.groups()
)
matches = match.groups()
if len(matches) == 1:
# Single season full pack, no parts etc
season = int(matches[0])
return {'season': season, 'match': match}
elif len(matches) == 2:
# TODO support other formats of season packs: 1xall, s01-PART1, etc.
pass
def roman_to_int(self, roman):
"""Converts roman numerals up to 39 to integers"""
roman_map = [('X', 10), ('IX', 9), ('V', 5), ('IV', 4), ('I', 1)]
roman = roman.upper()
# Return False if this is not a roman numeral we can translate
for char in roman:
if char not in 'XVI':
raise ValueError('`%s` is not a valid roman numeral' % roman)
# Add up the parts of the numeral
i = result = 0
for numeral, integer in roman_map:
while roman[i : i + len(numeral)] == numeral:
result += integer
i += len(numeral)
return result
def __str__(self):
# for some fucking reason it's impossible to print self.field here, if someone figures out why please
# tell me!
valid = 'INVALID'
if self.valid:
valid = 'OK'
return '<SeriesParser(data=%s,name=%s,id=%s,season=%s,season_pack=%s,episode=%s,quality=%s,proper=%s,' 'status=%s)>' % (
self.data,
self.name,
str(self.id),
self.season,
self.season_pack,
self.episode,
self.quality,
self.proper_count,
valid,
)
|
|
from __future__ import print_function, division, absolute_import
import errno
import imp
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import warnings
import numpy as np
from numba import unittest_support as unittest
from numba import utils, vectorize, jit
from numba.config import NumbaWarning
from .support import TestCase
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
class TestDispatcher(TestCase):
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(nopython=True)(pyfunc)
return f, check
def test_numba_interface(self):
"""
Check that vectorize can accept a decorated object.
"""
vectorize('f8(f8)')(jit(dummy))
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except BaseException as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> \(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = "Signature mismatch: %d argument types given, but function takes 2 arguments"
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(utils.StringIO())
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class TestCache(TestCase):
here = os.path.dirname(__file__)
# The source file that will be copied
usecases_file = os.path.join(here, "cache_usecases.py")
# Make sure this doesn't conflict with another module
modname = "caching_test_fodder"
def setUp(self):
self.tempdir = tempfile.mkdtemp()
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
shutil.rmtree(self.tempdir)
def import_module(self):
# Import a fresh version of the test module
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
if sys.version_info >= (3,):
cached = [old.__cached__]
else:
if old.__file__.endswith(('.pyc', '.pyo')):
cached = [old.__file__]
else:
cached = [old.__file__ + 'c', old.__file__ + 'o']
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = __import__(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_cache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
def run_in_separate_process(self):
# Cached functions can be run from a distinct process
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
assert mod.add_usecase(2, 3) == 6
assert mod.add_objmode_usecase(2, 3) == 6
assert mod.outer(3, 2) == 2
packed_rec = mod.record_return(mod.packed_arr, 1)
assert tuple(packed_rec) == (2, 43.5), packed_rec
aligned_rec = mod.record_return(mod.aligned_arr, 1)
assert tuple(aligned_rec) == (2, 43.5), aligned_rec
""" % dict(tempdir=self.tempdir, modname=self.modname,
test_class=self.__class__.__name__)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_cache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_cache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_cache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_cache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_cache(6) # 2 index, 4 data
def test_caching(self):
self.check_cache(0)
mod = self.import_module()
self.check_cache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_cache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_cache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_cache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_cache(6) # 2 index, 4 data
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_cache(9) # 3 index, 6 data
# Check the code runs ok from another process
self.run_in_separate_process()
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_cache(2) # 1 index, 1 data
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_cache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_cache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_cache(4) # 2 index, 2 data
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_cache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_cache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_cache(0)
self.assertEqual(len(w), 1)
self.assertEqual(str(w[0].message),
'Cannot cache compiled function "looplifted" '
'as it uses lifted loops')
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_c_sin
self.assertPreciseEqual(f(0.0), 0.0)
self.check_cache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_c_sin"',
str(w[0].message))
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_cache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_objmode_usecase(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mtimes = self.get_cache_mtimes()
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
mod.add_usecase(2, 3)
mod.add_objmode_usecase(2, 3)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
if __name__ == '__main__':
unittest.main()
|
|
#----------------------------------------------------------------------
# Copyright (c) 2010-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''
URN creation and verification utilities.
'''
import re
from ...sfa.util.xrn import Xrn # for URN_PREFIX
class URN(object):
"""
A class that creates and extracts values from URNs
URN Convention:
urn:publicid:IDN+<authority>+<type>+<name>
Authority, type, and name are public ids transcribed into URN format
By convention a CH's name should be "ch" and an AM's should be "am"
The authority of the CH should be the prefix for all of your AM and user authorities
For instance: CH authority = "gcf//gpo//bbn", AM authority = "gcf//gpo/bbn//am1", user authority = "gcf//gpo//bbn"
EXAMPLES:
ch_urn = URN("gcf//gpo//bbn", "authority", "sa").urn_string() for a clearinghouse URN
am1_urn = URN("gcf//gpo//bbn//site1", "authority", "am").urn_string() for an AM at this authority
Looks like urn:publicid:IDN+gcf:gpo:bbn:site1+authority+am
am2_urn = URN("gcf//gpo//bbn//site2", "authority", "am").urn_string() for a second AM at this authority
Looks like urn:publicid:IDN+gcf:gpo:bbn:site2+authority+am
user_urn = URN("gcf//gpo//bbn", "user", "jane").urn_string() for a user made by the clearinghouse
Looks like urn:publicid:IDN+gcf:gpo:bbn+user+jane
slice_urn = URN("gcf//gpo//bbn", "slice", "my-great-experiment").urn_string()
Looks like urn:publicid:IDN+gcf:gpo:bbn+slice+my-great-experiment
resource_at_am1_urn = URN("gcf//gpo//bbn/site1", "node", "LinuxBox23").urn_string() for Linux Machine 23 managed by AM1 (at site 1)
Looks like urn:publicid:IDN+gcf:gpo:bbn:site1+node+LinuxBox23
"""
def __init__(self, authority=None, type=None, name=None, urn=None):
if not urn is None:
if not is_valid_urn(urn):
raise ValueError("Invalid URN %s" % urn)
spl = urn.split('+')
if len(spl) < 4:
raise ValueError("Invalid URN %s" % urn)
self.authority = urn_to_string_format(spl[1])
self.type = urn_to_string_format(spl[2])
self.name = urn_to_string_format('+'.join(spl[3:]))
self.urn = urn
else:
if not authority or not type or not name:
raise ValueError("Must provide either all of authority, type, and name, or a urn must be provided")
for i in [authority, type, name]:
if i.strip() == '':
raise ValueError("Parameter to create_urn was empty string")
self.authority = authority
self.type = type
self.name = name
# FIXME: check these are valid more?
if not is_valid_urn_string(authority):
authority = string_to_urn_format(authority)
if not is_valid_urn_string(type):
type = string_to_urn_format(type)
if not is_valid_urn_string(name):
name = string_to_urn_format(name)
self.urn = '%s+%s+%s+%s' % (Xrn.URN_PREFIX, authority, type, name)
if not is_valid_urn(self.urn):
raise ValueError("Failed to create valid URN from args %s, %s, %s" % (self.authority, self.type, self.name))
def __str__(self):
return self.urn_string()
def urn_string(self):
return self.urn
def getAuthority(self):
'''Get the authority in un-escaped publicid format'''
return self.authority
def getType(self):
'''Get the URN type in un-escaped publicid format'''
return self.type
def getName(self):
'''Get the name in un-escaped publicid format'''
return self.name
# Translate publicids to URN format.
# The order of these rules matters
# because we want to catch things like double colons before we
# translate single colons. This is only a subset of the rules.
# See the GENI Wiki: GAPI_Identifiers
# See http://www.faqs.org/rfcs/rfc3151.html
publicid_xforms = [('%', '%25'),
(';', '%3B'),
('+', '%2B'),
(' ', '+' ), # note you must first collapse WS
('#', '%23'),
('?', '%3F'),
("'", '%27'),
('::', ';' ),
(':', '%3A'),
('//', ':' ),
('/', '%2F')]
# FIXME: See sfa/util/xrn/Xrn.URN_PREFIX which is ...:IDN
publicid_urn_prefix = 'urn:publicid:'
def nameFromURN(instr):
'''Get the name from the given URN, or empty if not a valid URN'''
if not instr:
return ""
try:
urn = URN(urn=instr)
return urn.getName()
except Exception, e:
# print 'exception parsing urn: %s' % e
return ""
# validate urn
# Note that this is not sufficient but it is necessary
def is_valid_urn_string(instr):
'''Could this string be part of a URN'''
if instr is None or not (isinstance(instr, str) or
isinstance(instr, unicode)):
return False
#No whitespace
# no # or ? or /
if isinstance(instr, unicode):
instr = instr.encode('utf8')
if re.search("[\s|\?\/\#]", instr) is None:
return True
return False
# Note that this is not sufficient but it is necessary
def is_valid_urn(inurn):
''' Check that this string is a valid URN'''
# FIXME: This could pull out the type and do the type specific
# checks that are currently below
# FIXME: This should check for non empty authority and name pieces
return is_valid_urn_string(inurn) and \
inurn.startswith(publicid_urn_prefix) and \
len(inurn.split('+')) > 3
def is_valid_urn_bytype(inurn, urntype, logger=None):
if not is_valid_urn(inurn):
return False
urnObj = URN(urn=inurn)
if not urntype:
urntype = ""
urntype = urntype.lower()
if not urnObj.getType().lower() == urntype:
if logger:
logger.warn("URN %s not of right type: %s, not %s", inurn, urnObj.getType().lower(), urntype)
return False
if len(urnObj.getAuthority()) == 0:
if logger:
logger.warn("URN %s has empty authority", inurn)
return False
name = urnObj.getName()
if urntype == 'slice':
# Slice names are <=19 characters, only alphanumeric plus hyphen (no hyphen in first character): '^[a-zA-Z0-9][-a-zA-Z0-9]{0,18}$'
if len(name) > 19:
if logger:
logger.warn("URN %s too long. Slice names are max 19 characters", inurn)
return False
if not re.match("^[a-zA-Z0-9][-a-zA-Z0-9]{0,18}$", name):
if logger:
logger.warn("Slice names may only be alphanumeric plus hyphen (no leading hyphen): %s", name)
return False
elif urntype == 'sliver':
# May use only alphanumeric characters plus hyphen
# Note that EG uses a ':' as well.
if not re.match("^[-a-zA-Z0-9_\.]+$", name):
if logger:
logger.warn("Sliver names may only be alphanumeric plus hyphen, underscore, or period: %s", name)
return False
elif urntype == 'user':
# Usernames should begin with a letter and be alphanumeric or underscores; no hyphen or '.': ('^[a-zA-Z][\w]{0,7}$').
# Usernames are limited to 8 characters.
if len(name) > 8:
if logger:
logger.warn("URN %s too long. User names are max 8 characters", inurn)
return False
if not re.match("^[a-zA-Z][\w]{0,7}$", name):
if logger:
logger.warn("User names may only be alphanumeric plus underscore, beginning with a letter: %s", name)
return False
elif len(name) == 0:
if logger:
logger.warn("Empty name in URN %s", inurn)
return False
return True
def urn_to_publicid(urn):
'''Convert a URN like urn:publicid:... to a publicid'''
# Remove prefix
if urn is None or not is_valid_urn(urn):
# Erroneous urn for conversion
raise ValueError('Invalid urn: ' + urn)
publicid = urn[len(publicid_urn_prefix):]
# return the un-escaped string
return urn_to_string_format(publicid)
def publicid_to_urn(id):
'''Convert a publicid to a urn like urn:publicid:.....'''
# prefix with 'urn:publicid:' and escape chars
return publicid_urn_prefix + string_to_urn_format(id)
def string_to_urn_format(instr):
'''Make a string URN compatible, collapsing whitespace and escaping chars'''
if instr is None or instr.strip() == '':
raise ValueError("Empty string cant be in a URN")
# Collapse whitespace
instr = ' '.join(instr.strip().split())
for a, b in publicid_xforms:
instr = instr.replace(a, b)
return instr
def urn_to_string_format(urnstr):
'''Turn a part of a URN into publicid format, undoing transforms'''
if urnstr is None or urnstr.strip() == '':
return urnstr
publicid = urnstr
# Validate it is reasonable URN string?
for a, b in reversed(publicid_xforms):
publicid = publicid.replace(b, a)
return publicid
|
|
from hendrix import defaults
from optparse import make_option, OptionParser
import os
def cleanOptions(options):
"""
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
"""
daemonize = options.pop('daemonize')
_reload = options.pop('reload')
dev = options.pop('dev')
opts = []
store_true = [
'--nocache', '--global_cache', '--traceback', '--quiet', '--loud'
]
store_false = []
for key, value in options.iteritems():
key = '--' + key
if (key in store_true and value) or (key in store_false and not value):
opts += [key, ]
elif value:
opts += [key, str(value)]
return daemonize, _reload, opts
HX_OPTION_LIST = (
make_option(
'-v', '--verbosity',
action='store',
dest='verbosity',
default='1',
type='choice',
choices=['0', '1', '2', '3'],
help=(
'Verbosity level; 0=minimal output, 1=normal output, 2=verbose '
'output, 3=very verbose output'
)
),
make_option(
'--settings',
dest='settings',
type=str,
default='',
help=(
'The Python path to a settings module, e.g. "myproj.settings.x".'
' If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.'
)
),
make_option(
'--log',
dest='log',
type=str,
default=os.path.join(defaults.DEFAULT_LOG_PATH, 'hendrix.log'),
help=(
'file path to where the log files should live '
'[default: $PYTHON_PATH/lib/.../hendrix/hendrix.log]'
)
),
make_option(
'--pythonpath',
help=(
'A directory to add to the Python path, e.g. '
'"/home/djangoprojects/myproject".'
)
),
make_option(
'--reload',
action='store_true',
dest='reload',
default=False,
help=(
"Flag that watchdog should restart the server when changes to the "
"codebase occur. NOTE: Do NOT uset this flag with --daemonize "
"because it will not daemonize."
)
),
make_option(
'-l', '--loud',
action='store_true',
dest='loud',
default=False,
help="Use the custom verbose WSGI handler that prints in color"
),
make_option(
'-q', '--quiet',
action='store_true',
dest='quiet',
default=False,
help="Supress all output."
),
make_option(
'--http_port',
type=int,
dest='http_port',
default=defaults.HTTP_PORT,
help='Enter a port number for the server to serve content.'
),
make_option(
'--https_port',
type=int,
dest='https_port',
default=defaults.HTTPS_PORT,
help='Enter an ssl port number for the server to serve secure content.'
),
make_option(
'--cache_port',
type=int,
dest='cache_port',
default=defaults.CACHE_PORT,
help='Enter an cache port number to serve cached content.'
),
make_option(
'-g', '--global_cache',
dest='global_cache',
action='store_true',
default=False,
help='Make it so that there is only one cache server'
),
make_option(
'-c', '--cache',
dest='cache',
action='store_true',
default=False,
help='Disable page cache'
),
make_option(
'-w', '--workers',
type=int,
dest='workers',
default=0,
help='Number of processes to run'
),
make_option(
'--key',
type=str,
dest='key',
default=None,
help='Absolute path to SSL private key'
),
make_option(
'--cert',
type=str,
dest='cert',
default=None,
help='Absolute path to SSL public certificate'
),
make_option(
'--fd',
type=str,
dest='fd',
default=None,
help='DO NOT SET THIS'
),
make_option(
'-d', '--daemonize',
dest='daemonize',
action='store_true',
default=False,
help='Run in the background'
),
make_option(
'--dev',
dest='dev',
action='store_true',
default=False,
help=(
'Runs in development mode. Meaning it uses the development wsgi '
'handler subclass'
)
),
make_option(
'--wsgi',
dest='wsgi',
type=str,
default=None,
help=(
'Overrides the use of django settings for use in testing. N.B. '
'This option is not for use with hx or hx.py'
)
)
)
HendrixOptionParser = OptionParser(
description=(
'hx is the interface to hendrix, use to start and stop your server'
),
usage='hx start|stop [options]',
option_list=HX_OPTION_LIST
)
def options(argv=[]):
"""
A helper function that returns a dictionary of the default key-values pairs
"""
parser = HendrixOptionParser
return vars(parser.parse_args(argv)[0])
|
|
__author__ = 'Ryan Morlok ([email protected])'
from datetime import datetime
try:
import iso8601
except ImportError:
import _local_iso8601 as iso8601
from webob import exc
from pytracts import messages, to_url, util
#
# Decorators used to make handlers more explicit. Enable things like declarative, strongly typed query string parameters.
#
class base_query_parameter(object):
"""
Base class for decorators that provide query string parameters.
"""
def __init__(self, name, fself_converter=None, converter=None, validator=None, required=False, default=None, message_missing=None, message_bad_value=None, argument_name=None):
"""
Creates a new decorator to specify a query parameter that should come into an endpoint call.
:param name:
The name of the parameter as it should appear in the query string.
:param fself_converter:
A callable that takes the fself object in addition to the value to be converted. Takes precedence over
the ```converter``` parameter.
:param converter:
A callable that can convert a string value to the desired value type (int, array, etc) for
the parameter value. Only called if the parameter is present.
:param validator:
A lambda expression to validate the value of the parameter. Should return true or false to indicate if
the value is valid. Called with the output of converter, if converter is specified.
:param required:
Flag indicating that this query parameter is required. Will raise an HTTPBadRequest exception if not
present. If not required, None will be passed to the underlying handler.
:param default:
The default value returned if the query parameter is not present.
:param message_missing:
The message to include if the parameter is missing.
:param message_bad_value:
The message to include if the parameter is a bad value.
"""
self.name = name
self.argument_name = argument_name or name
self.fself_converter = fself_converter
self.converter = converter
self.validator = validator
self.required = required
self.message_missing = message_missing
self.message_bad_value = message_bad_value
self.default = default
def raise_bad_request_value_missing(self):
raise exc.HTTPBadRequest(self.message_missing or ("Required query paramter '%s' is missing." % self.name))
def raise_bad_request_bad_value(self):
raise exc.HTTPBadRequest(self.message_bad_value or ("Value for parameter '%s' is invalid." % self.name))
def __call__(self, f):
"""
Called once to wrap the function in question.
"""
def wrapper(fself, *arguments, **keywords):
"""
Called to invoke the actual function.
"""
param = fself.request.GET.get(self.name)
if param is None:
if self.required:
self.raise_bad_request_value_missing()
else:
keywords[self.argument_name] = self.default
else:
if self.fself_converter is not None:
try:
param = self.fself_converter(fself, param)
except Exception:
self.raise_bad_request_bad_value()
elif self.converter is not None:
try:
param = self.converter(param)
except Exception:
self.raise_bad_request_bad_value()
if self.validator is not None and not self.validator(param):
self.raise_bad_request_bad_value()
keywords[self.argument_name] = param
# Call the underlying function with parameter added
return f(fself, *arguments, **keywords)
return wrapper
class string(base_query_parameter):
"""
String query parameter.
"""
pass
class iso8601_date(base_query_parameter):
"""
Date query parameter formatted according to ISO8601
"""
def __init__(self, name, validator=None, required=False, message_missing=None, message_bad_value=None, argument_name=None):
"""
Creates a new decorator to specify a query parameter that should come into an endpoint call.
:name:
The name of the parameter as it should appear in the query string.
:validator:
A lambda expression to validate the value of the parameter. Should return true or false to indicate if
the value is valid. Called with the output of converter, if converter is specified.
:required:
Flag indicating that this query parameter is required. Will raise an HTTPBadRequest exception if not
present. If not required, None will be passed to the underlying handler.
:message:
The message to include if the parameter is missing or does not pass validation.
"""
super(iso8601_date, self).__init__(name=name, fself_converter=iso8601_date._parse_date, validator=validator, required=required, message_missing=message_missing, message_bad_value=message_bad_value, argument_name=argument_name)
@classmethod
def _parse_date(cls, fself, date_string):
# Parse the raw date
dt = iso8601.parse_date(date_string, default_timezone=None)
if dt.tzinfo is None:
if hasattr(fself, 'user'):
if hasattr(fself.user, 'tzinfo') and fself.user.tzinfo is not None:
return dt.replace(tzinfo=fself.user.tzinfo)
return dt
class custom_date(base_query_parameter):
"""
Date query parameter formatted with a custom date format
"""
def __init__(self, name, format, validator=None, required=False, message_missing=None, message_bad_value=None, argument_name=None):
"""
Creates a new decorator to specify a query parameter that should come into an endpoint call.
:name:
The name of the parameter as it should appear in the query string.
:format:
The format of the date used to parese the date using strptime
:validator:
A lambda expression to validate the value of the parameter. Should return true or false to indicate if
the value is valid. Called with the output of converter, if converter is specified.
:required:
Flag indicating that this query parameter is required. Will raise an HTTPBadRequest exception if not
present. If not required, None will be passed to the underlying handler.
:message:
The message to include if the parameter is missing or does not pass validation.
"""
self.format = format
super(custom_date, self).__init__(name=name, fself_converter=self._parse_date, validator=validator, required=required, message_missing=message_missing, message_bad_value=message_bad_value, argument_name=argument_name)
def _parse_date(self, fself, date_string):
# Parse the raw date
dt = datetime.strptime(date_string, self.format)
if dt.tzinfo is None:
if hasattr(fself, 'user'):
if hasattr(fself.user, 'tzinfo') and fself.user.tzinfo is not None:
return dt.replace(tzinfo=fself.user.tzinfo)
return dt
class integer(base_query_parameter):
"""
Integer query parameter
"""
def __init__(self, name, default=None, validator=None, required=False, message_missing=None, message_bad_value=None, argument_name=None):
"""
Creates a new decorator to specify a query parameter that should come into an endpoint call.
:name:
The name of the parameter as it should appear in the query string.
:validator:
A lambda expression to validate the value of the parameter. Should return true or false to indicate if
the value is valid. Called with the output of converter, if converter is specified.
:required:
Flag indicating that this query parameter is required. Will raise an HTTPBadRequest exception if not
present. If not required, None will be passed to the underlying handler.
:message:
The message to include if the parameter is missing or does not pass validation.
"""
super(integer, self).__init__(name=name, default=default, converter=lambda x: int(x), validator=validator, required=required, message_missing=message_missing, message_bad_value=message_bad_value, argument_name=argument_name)
class boolean(base_query_parameter):
"""
Boolean query parameter
"""
def __init__(self, name, default=None, validator=None, required=False, message_missing=None, message_bad_value=None, argument_name=None):
"""
Creates a new decorator to specify a query parameter that should come into an endpoint call.
:name:
The name of the parameter as it should appear in the query string.
:validator:
A lambda expression to validate the value of the parameter. Should return true or false to indicate if
the value is valid. Called with the output of converter, if converter is specified.
:required:
Flag indicating that this query parameter is required. Will raise an HTTPBadRequest exception if not
present. If not required, None will be passed to the underlying handler.
:message:
The message to include if the parameter is missing or does not pass validation.
"""
def _string_to_boolean(value):
value = value.lower()
if value == "true":
return True
elif value == "false":
return False
else:
raise ValueError("Invalid boolean '%s'" % value)
super(boolean, self).__init__(name=name, default=default, converter=lambda x: _string_to_boolean(x), validator=validator, required=required, message_missing=message_missing, message_bad_value=message_bad_value, argument_name=argument_name)
class comma_list(base_query_parameter):
"""
List query parameter. E.g. foo=a,b,c.
"""
def __init__(self, name, default=None, converter=None, validator=None, required=False, message_missing=None, message_bad_value=None, argument_name=None):
"""
Creates a new decorator to specify a query parameter that should come into an endpoint call.
:name:
The name of the parameter as it should appear in the query string.
:converter:
A lambda expression that can convert a string value to the desired value type (int, array, etc) for
each value in the list. Only called if the parameter is present.
:validator:
A lambda expression to validate the value of the parameter. Should return true or false to indicate if
the value is valid. Called with the output of converter, if converter is specified.
:required:
Flag indicating that this query parameter is required. Will raise an HTTPBadRequest exception if not
present. If not required, None will be passed to the underlying handler.
:message:
The message to include if the parameter is missing or does not pass validation.
"""
super(comma_list, self).__init__(name=name, default=default, converter=lambda lst: [converter(x) for x in lst.split(',')] if converter is not None else lst.split(','), validator=validator, required=required, message_missing=message_missing, message_bad_value=message_bad_value, argument_name=argument_name)
class integer_list(comma_list):
"""
Integer list query parameter. E.g. foo=1,2,3
"""
def __init__(self, name, default=None, validator=None, required=False, message_missing=None, message_bad_value=None, argument_name=None):
"""
Creates a new decorator to specify a query parameter that should come into an endpoint call.
:name:
The name of the parameter as it should appear in the query string.
:validator:
A lambda expression to validate the value of the parameter. Should return true or false to indicate if
the value is valid. Called with the output of converter, if converter is specified.
:required:
Flag indicating that this query parameter is required. Will raise an HTTPBadRequest exception if not
present. If not required, None will be passed to the underlying handler.
:message:
The message to include if the parameter is missing or does not pass validation.
"""
super(integer_list, self).__init__(name=name, default=default, converter=lambda x: int(x), validator=validator, required=required, message_missing=message_missing, message_bad_value=message_bad_value, argument_name=argument_name)
def message(*args, **kwargs):
"""
Decorator that allows an endpoint to use pytracts messages for the query parameters.
"""
if len(kwargs) > 1:
raise IndexError("Cannot have more than one mapping for query parameter message")
if len(args) > 1:
raise IndexError("Cannot have more than one mapping for query parameter message")
if len(args) >= 1 and len(kwargs) >= 1:
raise IndexError("Cannot specify both a named parameter and a positional parameter")
if len(kwargs) == 1:
message_param_name = kwargs.keys()[0]
message_param_type = kwargs.values()[0]
elif len(args) == 1:
message_param_name = None
message_param_type = args[0]
else:
raise IndexError("Must specify query parameter message type")
if not isinstance(message_param_type, messages.Message.__metaclass__):
raise TypeError("Message must be of type pytracts.messages.Message")
def get_wrapper(message_param_name, message_param_type, f):
def wrapper(self, *arguments, **keywords):
try:
m = to_url.decode_message_from_url(message_type=message_param_type, url=self.request.url)
if message_param_name:
keywords[message_param_name] = m
else:
arguments += (m,)
except (ValueError, messages.Error) as error:
raise exc.HTTPBadRequest(detail=(error.message or "Could not decode query parameters"))
return f(self, *arguments, **keywords)
return wrapper
return util.curry(get_wrapper, message_param_name, message_param_type)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import re
from collections import OrderedDict, defaultdict
from io import open
from threading import RLock, Thread
import click
import requests
import yaml
from pkg_resources import parse_version
from pkg_resources._vendor.packaging.version import Version
from .filters import CompositeFilter
from shellfoundry.models.shell_template import ShellTemplate
from shellfoundry.utilities import GEN_TWO, SEPARATOR
from shellfoundry.utilities.constants import (
SERVER_VERSION_KEY,
TEMPLATE_INFO_FILE,
TEMPLATES_YML,
)
class TemplateRetriever(object):
NAME_PLACEHOLDER = "name"
def get_templates(self, **kwargs):
"""Get templates.
:return: Dictionary of shellfoundry.ShellTemplate
"""
alternative_path = kwargs.get("alternative", None)
template_location = kwargs.get("template_location", None)
standards = kwargs.get("standards", {})
if alternative_path:
response = self._get_templates_from_path(alternative_path)
config = yaml.safe_load(response)
elif template_location:
config = self._get_local_templates(template_location=template_location)
else:
response = self._get_templates_from_github()
config = yaml.safe_load(response)
if not config or "templates" not in config:
return {}
templatesdic = defaultdict(list)
for template in config["templates"]:
if template["repository"]: # Online templates
standard_version = {}
else:
standard_version = template["standard_version"]
templatesdic[template["name"]].append(
ShellTemplate(
name=template["name"],
description=template["description"],
repository=template["repository"],
min_cs_ver=template["min_cs_ver"],
standard=self._get_standard_out_of_name(template["name"]),
standard_version=standard_version,
params=template["params"],
)
)
return self._filter_by_standards(templatesdic, standards)
@staticmethod
def _get_templates_from_github():
"""Get templates data from GitHub."""
session = requests.Session()
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=5))
return session.get(TEMPLATES_YML).text
@staticmethod
def _get_templates_from_path(alternative_path):
"""Get templates data from local file."""
with open(alternative_path, mode="r", encoding="utf8") as stream:
response = stream.read()
return response
def _get_local_templates(self, template_location):
"""Get templates from local storage."""
if not template_location or not os.path.exists(template_location):
raise click.ClickException("Local template location empty or doesn't exist")
else:
templ_info = []
for root, directories, filenames in os.walk(template_location):
for filename in filenames:
if filename == TEMPLATE_INFO_FILE:
full_path = os.path.join(root, filename)
standard_version = self._get_standard_version_from_template(
root
)
with open(full_path, mode="r", encoding="utf8") as f:
templ_data = json.load(f)
templ_info.append(
{
"name": templ_data.get("template_name", "Undefined"),
"description": templ_data.get(
"template_descr", "Undefined"
),
"min_cs_ver": templ_data.get(
SERVER_VERSION_KEY, "Undefined"
),
"repository": "",
"standard_version": {
standard_version: {
"repo": root,
"min_cs_ver": templ_data.get(
SERVER_VERSION_KEY, "Undefined"
),
}
},
"params": {
"project_name": templ_data.get(
"project_name", None
),
"family_name": templ_data.get("family_name", None),
},
}
)
if templ_info:
templates = {
"templates": sorted(
templ_info,
key=lambda data: list(data["standard_version"].keys())[0],
)
}
else:
templates = None
return templates
@staticmethod
def _get_standard_version_from_template(template_location):
"""Get standard version from template shell-definition file."""
for root, directories, filenames in os.walk(template_location):
for filename in filenames:
if filename == "shell-definition.yaml":
with open(
os.path.join(root, "shell-definition.yaml"), encoding="utf8"
) as stream:
match = re.search(
r"cloudshell_standard:\s*cloudshell_(?P<name>\S+)_standard_(?P<version>\S+)\.\w+$", # noqa: E501
stream.read(),
re.MULTILINE,
)
if match:
return str(match.groupdict()["version"].replace("_", "."))
@staticmethod
def _get_standard_out_of_name(template_name, default=None):
type_index = 0
standard_index = 1
template = template_name.split(SEPARATOR)
if template[type_index] != GEN_TWO:
return default
return template[standard_index]
@staticmethod
def _filter_by_standards(templates, standards):
"""Filter templates by available on CloudShell Standards.
:type templates collections.defaultdict(list)
:type standards dict
:return:
"""
if not standards:
return OrderedDict(sorted(templates.items()))
global filtered_templates
filtered_templates = defaultdict(list)
threads = []
lock = RLock()
for template_name, templates_list in templates.items():
template_thread = Thread(
target=TemplateRetriever._filter_in_threads,
args=(template_name, templates_list, standards, lock),
)
threads.append(template_thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return OrderedDict(sorted(filtered_templates.items()))
@staticmethod
def _filter_in_threads(template_name, templates_list, standards, lock):
clear_template_name = TemplateRetriever._get_standard_out_of_name(template_name)
if clear_template_name is None:
for template in templates_list:
lock.acquire()
filtered_templates[template_name].append(template)
lock.release()
elif clear_template_name in list(standards.keys()):
for template in templates_list:
if (
not template.standard_version
or list(template.standard_version.keys())[0]
in standards[clear_template_name]
):
if template.repository:
template.min_cs_ver = (
TemplateRetriever._get_min_cs_version(
repository=template.repository,
standard_name=template.standard,
standards=standards,
)
or template.min_cs_ver
)
lock.acquire()
filtered_templates[template_name].append(template)
lock.release()
@staticmethod
def _get_min_cs_version(repository, standard_name, standards, branch=None):
"""Get minimal CloudShell Server Version available for provided template."""
if not branch:
branch = str(
min(list(map(parse_version, standards[standard_name])))
) # determine minimal standard version
repository = repository.replace("https://github.com", "https://raw.github.com")
url = "/".join([repository, str(branch), "cookiecutter.json"])
session = requests.Session()
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=5))
responce = session.get(url)
if responce.status_code == requests.codes.ok:
return responce.json().get(SERVER_VERSION_KEY, None)
else:
return
def get_repo_branches(self, repository, github_login=None, github_password=None):
"""Get all available branches for provided repository."""
if repository.endswith("/"):
repository = repository[:-1]
request = "{}/branches".format(
repository.replace("https://github.com", "https://api.github.com/repos")
)
session = requests.Session()
if github_login and github_password:
session.auth = (github_login, github_password)
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=5))
response = session.get(request)
response.raise_for_status()
branches = [item[self.NAME_PLACEHOLDER] for item in response.json()]
repo_branches = []
for item in branches:
if item == "master":
repo_branches.append(item)
elif isinstance(parse_version(item), Version): # only numeric version
repo_branches.append(parse_version(item))
repo_branches.reverse()
return repo_branches
def get_latest_template(
self, repo, version, github_login=None, github_password=None
):
"""Get latest template version based on CloudShell version."""
for branch in self.get_repo_branches(repo, github_login, github_password):
cs_version = self._get_min_cs_version(
repository=repo, standard_name=None, standards=None, branch=branch
)
if cs_version:
try:
if parse_version(version) >= parse_version(cs_version):
return str(branch)
except Exception:
pass
class FilteredTemplateRetriever(object):
def __init__(self, template_type, template_retriever=None):
self.template_retriever = template_retriever or TemplateRetriever()
self.filter = CompositeFilter(template_type).filter
def get_templates(self, **kwargs):
templates = self.template_retriever.get_templates(**kwargs)
return OrderedDict((k, v) for k, v in templates.items() if self.filter(k))
|
|
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility back to Python 2.5 and (currently) has significant performance
advantages, even without using the optional C extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps(u'\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from simplejson.compat import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> obj = [1,2,3,{'4': 5, '6': 7}]
>>> json.dumps(obj, separators=(',',':'), sort_keys=True)
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' '))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from simplejson.compat import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError('Object of type %s is not JSON serializable' %
... obj.__class__.__name__)
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 3 (char 2)
Parsing multiple documents serialized as JSON lines (newline-delimited JSON)::
>>> import simplejson as json
>>> def loads_lines(docs):
... for doc in docs.splitlines():
... yield json.loads(doc)
...
>>> sum(doc["count"] for doc in loads_lines('{"count":1}\n{"count":2}\n{"count":3}\n'))
6
Serializing multiple objects to JSON lines (newline-delimited JSON)::
>>> import simplejson as json
>>> def dumps_lines(objs):
... for obj in objs:
... yield json.dumps(obj, separators=(',',':')) + '\n'
...
>>> ''.join(dumps_lines([{'count': 1}, {'count': 2}, {'count': 3}]))
'{"count":1}\n{"count":2}\n{"count":3}\n'
"""
from __future__ import absolute_import
__version__ = '3.16.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict', 'simple_first', 'RawJSON', 'c_extension'
]
__author__ = 'Bob Ippolito <[email protected]>'
from decimal import Decimal
from .errors import JSONDecodeError
from .raw_json import RawJSON
from .decoder import JSONDecoder
from .encoder import JSONEncoder, JSONEncoderForHTML
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
from . import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
iterable_as_array=False,
bigint_as_string=False,
item_sort_key=None,
for_json=False,
ignore_nan=False,
int_as_string_bitcount=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *allow_nan* is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the original JSON specification, instead of using
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
*ignore_nan* for ECMA-262 compliant behavior.
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
of subclassing whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, ``separators`` should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If *bigint_as_string* is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precendence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
from . import decoder as dec
from . import encoder as enc
from . import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def simple_first(kv):
"""Helper function to pass to item_sort_key to sort simple
elements to the top, then container elements.
"""
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
|
|
import os
import json
import random
import string
import uuid
import nova_v2_base
def heat_request(self,
url_detail,
request_type='get',
request_name=None,
data=None,
locust_name=None):
url = self.get_endpoint('orchestration')
if url_detail:
url = os.path.join(url, url_detail)
headers = {'X-Auth-Project-Id': self.keystone_tenant,
'X-Auth-Token': self.auth_token,
'Content-Type': 'application/json',
'Accept': 'application/json'}
if data:
response = getattr(self.client, request_type)(url,
headers=headers,
data=json.dumps(data),
name=locust_name)
else:
response = getattr(self.client, request_type)(url,
headers=headers,
name=locust_name)
self.output(url)
self.output("Response status code: %s" % response.status_code)
self.output("Response content: %s" % response.content)
return response
def get_stack_id(self):
""" Return a random stack from currently
available stacks
"""
response = heat_request(self, 'stacks', 'get')
stack_list = json.loads(response.content)['stacks']
stack_id = random.choice([i['id'] for i in stack_list])
return stack_id
def get_stack_name(self):
response = heat_request(self, 'stacks', 'get')
stack_list = json.loads(response.content)['stacks']
stack_name = random.choice([i['stack_name'] for i in stack_list])
return stack_name
def get_stack_name_and_id(self):
# :/
response = heat_request(self, 'stacks', 'get')
stack_list = json.loads(response.content)['stacks']
stack_choice = random.choice(stack_list)
stack_name = stack_choice['stack_name']
stack_id = stack_choice['id']
return stack_name, stack_id
def get_snapshot_id(self, stack_id=None):
""" Return a random snapshot from currently
available snapshots
"""
stack_name, stack_id = get_stack_name_and_id(self)
url_path = 'stacks/%s/%s/snapshots' % (stack_name, stack_id)
response = heat_request(self, url_path, 'get', locust_name='stacks/[name]/[id]/snapshots')
snapshot_list = json.loads(response.content)['snapshots']
snapshot_id = random.choice([i['id'] for i in snapshot_list])
return snapshot_id, stack_name, stack_id
def list_stacks(self):
return heat_request(self,
'stacks',
'get',
'heat_list_stacks')
def find_stack(self, stack_name=None):
if not stack_name:
stack_name = get_stack_name(self)
return heat_request(self,
'stacks/%s' % stack_name,
'get',
'heat_find_stack',
locust_name='stacks/[name]')
def list_stack_detail(self, stack_name=None, stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
return heat_request(self,
'stacks/%s/%s' % (stack_name, stack_id),
'get',
'heat_list_stack_detail',
locust_name='stacks/[name]/[id]')
def list_resource_types(self):
return heat_request(self,
'resource_types',
'get',
'heat_list_resource_types')
def list_snapshots(self, stack_name=None, stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
return heat_request(self,
'stacks/%s/%s/snapshots' % (stack_name, stack_id),
'get',
'heat_list_snapshots',
locust_name='stacks/[name]/[id]/snapshots')
def list_snapshot_detail(self, snapshot_id=None):
if not snapshot_id:
snapshot_id, stack_name, stack_id = get_snapshot_id(self)
return heat_request(self,
'stacks/%s/%s/snapshots/%s' % (stack_name, stack_id, snapshot_id),
'get',
'heat_list_snapshot_detail',
locust_name='stacks/[name]/[id]/snapshots/[snap_id]')
def find_stack_resources(self, stack_name=None):
if not stack_name:
stack_name = get_stack_name(self)
return heat_request(self,
'stacks/%s/resources' % stack_name,
'get',
'heat_find_stack_resources',
locust_name='stacks/[name]/resources')
def list_stack_resources(self, stack_name=None, stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
return heat_request(self,
'stacks/%s/%s/resources' % (stack_name, stack_id),
'get',
'heat_list_stack_resources',
locust_name='stacks/[name]/[id]/resources')
def find_stack_events(self, stack_name=None):
if not stack_name:
stack_name = get_stack_name(self)
return heat_request(self,
'stacks/%s/events' % stack_name,
'get',
'heat_find_stack_events',
locust_name='stacks/[name]/events')
def list_stack_events(self, stack_name=None, stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
return heat_request(self,
'stacks/%s/%s/events' % (stack_name, stack_id),
'get',
'heat_list_stack_events',
locust_name='stacks/[name]/[id]/events')
def get_stack_template(self, stack_name=None, stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
return heat_request(self,
'stacks/%s/%s/template' % (stack_name, stack_id),
'get',
'heat_get_stack_template',
locust_name='stacks/[name]/[id]/template')
def suspend_stack(self, stack_name=None, stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
data = {"suspend":None}
return heat_request(self,
'stacks/%s/%s/actions' % (stack_name, stack_id),
'post',
'heat_suspend_stack',
data,
locust_name='stacks/[name]/[id]/[suspend_stack]')
def resume_stack(self, stack_name=None, stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
data = {"resume":None}
return heat_request(self,
'stacks/%s/%s/actions' % (stack_name, stack_id),
'post',
'heat_resume_stack',
data,
locust_name='stacks/[name]/[id]/[resume_stack]')
def create_snapshot(self,
stack_name=None,
stack_id=None,
name=None,
force=False,
description=None):
# TODO: don't set name unless passed as param
if not name:
name = "stack-snapshot-%s" % uuid.uuid4()
data = { "name": name,
}
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
response = heat_request(self,
'stacks/%s/%s/snapshots' %(stack_name, stack_id),
'post',
'heat_create_snapshot',
data,
locust_name='stacks/[name]/[id]/snapshots')
return response
def delete_snapshot(self,
stack_name=None,
stack_id=None,
snapshot_id=None,
force=False,
description=None):
if stack_name:
snapshot_id = get_snapshot_id(stack_name=stack_name)
if not snapshot_id:
snapshot_id, stack_name, stack_id = get_snapshot_id(self)
response = heat_request(self,
'stacks/%s/%s/snapshots/%s' %(stack_name, stack_id, snapshot_id),
'delete',
'heat_delete_snapshot',
locust_name='stacks/[name]/[id]/snapshots/[delete_snapshot]')
return response
def restore_snapshot(self,
stack_name=None,
stack_id=None,
snapshot_id=None,
force=False,
description=None):
if stack_name:
snapshot_id = get_snapshot_id(self, stack_name=stack_name)
if not snapshot_id:
snapshot_id, stack_name, stack_id = get_snapshot_id(self)
response = heat_request(self,
'stacks/%s/%s/snapshots/%s/restore' %(stack_name, stack_id, snapshot_id),
'post',
'heat_restore_snapshot',
locust_name='stacks/[name]/[id]/snapshots/[restore_snapshot]')
return response
def abandon_stack(self,
stack_name=None,
stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
response = heat_request(self,
'stacks/%s/%s/abandon' % (stack_name, stack_id),
'delete',
'heat_abandon_stack',
locust_name='stacks/[name]/[id]/abandon')
return response
def adopt_stack(self,
stack_name=None,
template=None,
template_url=None,
timeout_mins=None,
adopt_stack_data=None):
if not stack_name:
# generate one
stack_name = 'test-stack-%s-%s' %(self.id, self.stack_count)
self.stack_count += 1
# TODO: generate other params if needed
data = {'stack_name': stack_name,
'template_url': template_url,
'timeout_mins': timeout_mins,
'adopt_stack_data': adopt_stack_data}
if template:
data['template'] = template
response = heat_request(self,
'stacks',
'post',
'heat_adopt_stack',
data,
locust_name='stacks/[adopt_stack]')
return response
def create_stack(self,
stack_name=None,
template=None,
template_url=None,
timeout_mins=None,
disable_rollback=True,
params=None):
if not stack_name:
# generate one
stack_name = 'test-stack-%s-%s' %(self.id, self.stack_count)
self.stack_count += 1
# TODO: generate other params if needed
data = {'stack_name': stack_name,
'template_url': template_url,
'timeout_mins': timeout_mins,
'parameters' : params,
'disable_rollback': disable_rollback}
if template:
data['template'] = template
response = heat_request(self,
'stacks',
'post',
'heat_create_stack',
data,
locust_name='stacks/[create_stack]')
return response
def delete_stack(self,
stack_name=None,
stack_id=None):
if not stack_name:
stack_name, stack_id = get_stack_name_and_id(self)
response = heat_request(self,
'stacks/%s/%s' % (stack_name, stack_id),
'delete',
'heat_delete_stack',
locust_name='stacks/[name]/[id]')
def update_stack(self,
stack_name=None,
stack_id=None,
template=None,
template_url=None,
timeout_mins=None,
disable_rollback=True,
params=None):
if not stack_name:
# get one
stack_name, stack_id = get_stack_name_and_id(self)
# TODO: generate other params if needed
data = {'stack_name': stack_name,
'template_url': template_url,
'timeout_mins': timeout_mins,
'parameters' : params,
'disable_rollback': disable_rollback}
if template:
data['template'] = template
response = heat_request(self,
'stacks/%s/%s' % (stack_name, stack_id),
'put',
'heat_update_stack',
data,
locust_name='stacks/[stack_name]/[stack_id]/[update_stack]')
return response
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""ResNets, implemented in Gluon."""
__all__ = ['ResNetV1', 'ResNetV2',
'BasicBlockV1', 'BasicBlockV2',
'BottleneckV1', 'BottleneckV2',
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'get_resnet']
import os
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from .... import base
from .... util import is_np_array
# Helpers
def _conv3x3(channels, stride, in_channels):
return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1,
use_bias=False, in_channels=in_channels)
# Blocks
class BasicBlockV1(HybridBlock):
r"""BasicBlock V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV1, self).__init__(**kwargs)
self.body = nn.HybridSequential()
self.body.add(_conv3x3(channels, stride, in_channels))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels, 1, channels))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential()
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
act = F.npx.activation if is_np_array() else F.Activation
x = act(residual+x, act_type='relu')
return x
class BottleneckV1(HybridBlock):
r"""Bottleneck V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV1, self).__init__(**kwargs)
self.body = nn.HybridSequential()
self.body.add(nn.Conv2D(channels//4, kernel_size=1, strides=stride))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels//4, 1, channels//4))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(nn.Conv2D(channels, kernel_size=1, strides=1))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential()
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
act = F.npx.activation if is_np_array() else F.Activation
x = act(x + residual, act_type='relu')
return x
class BasicBlockV2(HybridBlock):
r"""BasicBlock V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = _conv3x3(channels, stride, in_channels)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels, 1, channels)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
act = F.npx.activation if is_np_array() else F.Activation
x = act(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = act(x, act_type='relu')
x = self.conv2(x)
return x + residual
class BottleneckV2(HybridBlock):
r"""Bottleneck V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = nn.Conv2D(channels//4, kernel_size=1, strides=1, use_bias=False)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels//4, stride, channels//4)
self.bn3 = nn.BatchNorm()
self.conv3 = nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
act = F.npx.activation if is_np_array() else F.Activation
x = act(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = act(x, act_type='relu')
x = self.conv2(x)
x = self.bn3(x)
x = act(x, act_type='relu')
x = self.conv3(x)
return x + residual
# Nets
class ResNetV1(HybridBlock):
r"""ResNet V1 model from
`"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
block : gluon.HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV1, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
self.features = nn.HybridSequential()
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, in_channels=channels[i]))
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.Dense(classes, in_units=channels[-1])
def _make_layer(self, block, layers, channels, stride, in_channels=0):
layer = nn.HybridSequential()
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
class ResNetV2(HybridBlock):
r"""ResNet V2 model from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
block : gluon.HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV2, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
self.features = nn.HybridSequential()
self.features.add(nn.BatchNorm(scale=False, center=False))
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, in_channels=in_channels))
in_channels = channels[i+1]
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes, in_units=in_channels)
def _make_layer(self, block, layers, channels, stride, in_channels=0):
layer = nn.HybridSequential()
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
resnet_spec = {18: ('basic_block', [2, 2, 2, 2], [64, 64, 128, 256, 512]),
34: ('basic_block', [3, 4, 6, 3], [64, 64, 128, 256, 512]),
50: ('bottle_neck', [3, 4, 6, 3], [64, 256, 512, 1024, 2048]),
101: ('bottle_neck', [3, 4, 23, 3], [64, 256, 512, 1024, 2048]),
152: ('bottle_neck', [3, 8, 36, 3], [64, 256, 512, 1024, 2048])}
resnet_net_versions = [ResNetV1, ResNetV2]
resnet_block_versions = [{'basic_block': BasicBlockV1, 'bottle_neck': BottleneckV1},
{'basic_block': BasicBlockV2, 'bottle_neck': BottleneckV2}]
# Constructor
def get_resnet(version, num_layers, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
ResNet V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
version : int
Version of ResNet. Options are 1, 2.
num_layers : int
Numbers of layers. Options are 18, 34, 50, 101, 152.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
assert num_layers in resnet_spec, \
"Invalid number of layers: %d. Options are %s"%(
num_layers, str(resnet_spec.keys()))
block_type, layers, channels = resnet_spec[num_layers]
assert version >= 1 and version <= 2, \
"Invalid resnet version: %d. Options are 1 and 2."%version
resnet_class = resnet_net_versions[version-1]
block_class = resnet_block_versions[version-1][block_type]
net = resnet_class(block_class, layers, channels, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('resnet%d_v%d'%(num_layers, version),
root=root), ctx=ctx)
return net
def resnet18_v1(**kwargs):
r"""ResNet-18 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 18, **kwargs)
def resnet34_v1(**kwargs):
r"""ResNet-34 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 34, **kwargs)
def resnet50_v1(**kwargs):
r"""ResNet-50 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 50, **kwargs)
def resnet101_v1(**kwargs):
r"""ResNet-101 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 101, **kwargs)
def resnet152_v1(**kwargs):
r"""ResNet-152 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 152, **kwargs)
def resnet18_v2(**kwargs):
r"""ResNet-18 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 18, **kwargs)
def resnet34_v2(**kwargs):
r"""ResNet-34 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 34, **kwargs)
def resnet50_v2(**kwargs):
r"""ResNet-50 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 50, **kwargs)
def resnet101_v2(**kwargs):
r"""ResNet-101 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 101, **kwargs)
def resnet152_v2(**kwargs):
r"""ResNet-152 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 152, **kwargs)
|
|
# encoding: utf-8
# pylint: disable=missing-docstring
import logging
from six import itervalues
from flask_marshmallow import Schema, base_fields
from marshmallow import validate, validates_schema, ValidationError
log = logging.getLogger(__name__) # pylint: disable=invalid-name
class Parameters(Schema):
def __init__(self, **kwargs):
super(Parameters, self).__init__(strict=True, **kwargs)
def __contains__(self, field):
return field in self.fields
def make_instance(self, data):
# pylint: disable=unused-argument
"""
This is a no-op function which shadows ``ModelSchema.make_instance``
method (when inherited classes inherit from ``ModelSchema``). Thus, we
avoid a new instance creation because it is undesirable behaviour for
parameters (they can be used not only for saving new instances).
"""
return
class PostFormParameters(Parameters):
def __init__(self, *args, **kwargs):
super(PostFormParameters, self).__init__(*args, **kwargs)
for field in itervalues(self.fields):
if field.dump_only:
continue
if not field.metadata.get('location'):
field.metadata['location'] = 'form'
class PatchJSONParameters(Parameters):
"""
Base parameters class for handling PATCH arguments according to RFC 6902.
"""
# All operations described in RFC 6902
OP_ADD = 'add'
OP_REMOVE = 'remove'
OP_REPLACE = 'replace'
OP_MOVE = 'move'
OP_COPY = 'copy'
OP_TEST = 'test'
# However, we use only those which make sense in RESTful API
OPERATION_CHOICES = (
OP_TEST,
OP_ADD,
OP_REMOVE,
OP_REPLACE,
)
op = base_fields.String(required=True) # pylint: disable=invalid-name
PATH_CHOICES = None
path = base_fields.String(required=True)
NO_VALUE_OPERATIONS = (OP_REMOVE,)
value = base_fields.Raw(required=False)
def __init__(self, *args, **kwargs):
super(PatchJSONParameters, self).__init__(*args, many=True, **kwargs)
if not self.PATH_CHOICES:
raise ValueError("%s.PATH_CHOICES has to be set" % self.__class__.__name__)
# Make a copy of `validators` as otherwise we will modify the behaviour
# of all `marshmallow.Schema`-based classes
self.fields['op'].validators = \
self.fields['op'].validators + [validate.OneOf(self.OPERATION_CHOICES)]
self.fields['path'].validators = \
self.fields['path'].validators + [validate.OneOf(self.PATH_CHOICES)]
@validates_schema
def validate_patch_structure(self, data):
"""
Common validation of PATCH structure
Provide check that 'value' present in all operations expect it.
Provide check if 'path' is present. 'path' can be absent if provided
without '/' at the start. Supposed that if 'path' is present than it
is prepended with '/'.
Removing '/' in the beginning to simplify usage in resource.
"""
if data['op'] not in self.NO_VALUE_OPERATIONS and 'value' not in data:
raise ValidationError('value is required')
if 'path' not in data:
raise ValidationError('Path is required and must always begin with /')
else:
data['field_name'] = data['path'][1:]
@classmethod
def perform_patch(cls, operations, obj, state=None):
"""
Performs all necessary operations by calling class methods with
corresponding names.
"""
if state is None:
state = {}
for operation in operations:
if not cls._process_patch_operation(operation, obj=obj, state=state):
log.info(
"%s patching has stopped because of unknown operation %s",
(obj.__name__, operation)
)
raise ValidationError("Failed to update %s details." % obj.__name__)
return True
@classmethod
def _process_patch_operation(cls, operation, obj, state):
"""
Args:
operation (dict): one patch operation in RFC 6902 format.
obj (object): an instance which is needed to be patched.
state (dict): inter-operations state storage
Returns:
processing_status (bool): True if operation was handled, otherwise False.
"""
field_operaion = operation['op']
if field_operaion == cls.OP_REPLACE:
return cls.replace(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_TEST:
return cls.test(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_ADD:
return cls.add(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_MOVE:
return cls.move(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_COPY:
return cls.copy(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_REMOVE:
return cls.remove(obj, operation['field_name'], state=state)
return False
@classmethod
def replace(cls, obj, field, value, state):
"""
This is method for replace operation. It is separated to provide a
possibility to easily override it in your Parameters.
Args:
obj (object): an instance to change.
field (str): field name
value (str): new value
state (dict): inter-operations state storage
Returns:
processing_status (bool): True
"""
if not hasattr(obj, field):
raise ValidationError("Field '%s' does not exist, so it cannot be patched" % field)
setattr(obj, field, value)
return True
@classmethod
def test(cls, obj, field, value, state):
"""
This is method for test operation. It is separated to provide a
possibility to easily override it in your Parameters.
Args:
obj (object): an instance to change.
field (str): field name
value (str): new value
state (dict): inter-operations state storage
Returns:
processing_status (bool): True
"""
if getattr(obj, field) == value:
return True
else:
return False
@classmethod
def add(cls, obj, field, value, state):
raise NotImplementedError()
@classmethod
def remove(cls, obj, field, state):
raise NotImplementedError()
@classmethod
def move(cls, obj, field, value, state):
raise NotImplementedError()
@classmethod
def copy(cls, obj, field, value, state):
raise NotImplementedError()
|
|
#-----------------------------------------------------------------
# _ast_gen.py
#
# Generates the AST Node classes from a specification given in
# a .yaml file
#
# The design of this module was inspired by astgen.py from the
# Python 2.5 code-base.
#
# Copyright (C) 2008-2012, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
import pprint
from string import Template
class ASTCodeGenerator(object):
def __init__(self, cfg_filename='_c_ast.cfg'):
""" Initialize the code generator from a configuration
file.
"""
self.cfg_filename = cfg_filename
self.node_cfg = [NodeCfg(name, contents)
for (name, contents) in self.parse_cfgfile(cfg_filename)]
def generate(self, file=None):
""" Generates the code into file, an open file buffer.
"""
src = Template(_PROLOGUE_COMMENT).substitute(
cfg_filename=self.cfg_filename)
src += _PROLOGUE_CODE
for node_cfg in self.node_cfg:
src += node_cfg.generate_source() + '\n\n'
file.write(src)
def parse_cfgfile(self, filename):
""" Parse the configuration file and yield pairs of
(name, contents) for each node.
"""
with open(filename, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
colon_i = line.find(':')
lbracket_i = line.find('[')
rbracket_i = line.find(']')
if colon_i < 1 or lbracket_i <= colon_i or rbracket_i <= lbracket_i:
raise RuntimeError("Invalid line in %s:\n%s\n" % (filename, line))
name = line[:colon_i]
val = line[lbracket_i + 1:rbracket_i]
vallist = [v.strip() for v in val.split(',')] if val else []
yield name, vallist
class NodeCfg(object):
""" Node configuration.
name: node name
contents: a list of contents - attributes and child nodes
See comment at the top of the configuration file for details.
"""
def __init__(self, name, contents):
self.name = name
self.all_entries = []
self.attr = []
self.child = []
self.seq_child = []
for entry in contents:
clean_entry = entry.rstrip('*')
self.all_entries.append(clean_entry)
if entry.endswith('**'):
self.seq_child.append(clean_entry)
elif entry.endswith('*'):
self.child.append(clean_entry)
else:
self.attr.append(entry)
def generate_source(self):
src = self._gen_init()
src += '\n' + self._gen_children()
src += '\n' + self._gen_attr_names()
return src
def _gen_init(self):
src = "class %s(Node):\n" % self.name
if self.all_entries:
args = ', '.join(self.all_entries)
arglist = '(self, %s, coord=None)' % args
else:
arglist = '(self, coord=None)'
src += " def __init__%s:\n" % arglist
for name in self.all_entries + ['coord']:
src += " self.%s = %s\n" % (name, name)
return src
def _gen_children(self):
src = ' def children(self):\n'
if self.all_entries:
src += ' nodelist = []\n'
for child in self.child:
src += (
' if self.%(child)s is not None:' +
' nodelist.append(("%(child)s", self.%(child)s))\n') % (
dict(child=child))
for seq_child in self.seq_child:
src += (
' for i, child in enumerate(self.%(child)s or []):\n'
' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % (
dict(child=seq_child))
src += ' return tuple(nodelist)\n'
else:
src += ' return ()\n'
return src
def _gen_attr_names(self):
src = " attr_names = (" + ''.join("%r," % nm for nm in self.attr) + ')'
return src
_PROLOGUE_COMMENT = \
r'''#-----------------------------------------------------------------
# ** ATTENTION **
# This code was automatically generated from the file:
# $cfg_filename
#
# Do not modify it directly. Modify the configuration file and
# run the generator again.
# ** ** *** ** **
#
# pycparser: c_ast.py
#
# AST Node classes.
#
# Copyright (C) 2008-2012, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
'''
_PROLOGUE_CODE = r'''
import sys
class Node(object):
""" Abstract base class for AST nodes.
"""
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__+ ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c)
'''
if __name__ == "__main__":
import sys
ast_gen = ASTCodeGenerator('_c_ast.cfg')
ast_gen.generate(open('c_ast.py', 'w'))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import cross_tower_utils
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
# TODO(yuefengz): support in-graph replication.
class CollectiveAllReduceStrategy(mirrored_strategy.MirroredStrategy):
"""Distribution strategy that uses collective ops for all-reduce.
It is similar to the MirroredStrategy but it uses collective ops for
reduction.
When `cluster_spec` is given by the `configure` method, it turns into the
mulit-worker version that works on multiple workers with between-graph
replication.
Note: `configure` will be called by higher-level APIs if running in
distributed environment.
"""
def __init__(self, num_gpus_per_worker=0):
"""Initializes the object.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker, the default
is 0 meaning CPU only.
"""
self._num_gpus_per_worker = num_gpus_per_worker
self._initialize_local_worker(num_gpus_per_worker)
def _initialize_local_worker(self, num_gpus_per_worker):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
if num_gpus_per_worker:
local_devices = [
"/device:GPU:%d" % i for i in range(num_gpus_per_worker)
]
else:
local_devices = ["/device:CPU:0"]
self._collective_keys = cross_tower_utils.CollectiveKeys()
super(CollectiveAllReduceStrategy, self).__init__(
devices=local_devices,
cross_tower_ops=cross_tower_ops_lib.CollectiveAllReduce(
num_workers=1,
num_gpus_per_worker=num_gpus_per_worker,
collective_keys=self._collective_keys))
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info("CollectiveAllReduceStrategy with local_devices = %r",
local_devices)
def _initialize_multi_worker(self, num_gpus_per_worker, cluster_spec,
task_type, task_id):
"""Initializes the object for multi-worker training."""
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
if task_type not in ["chief", "worker"]:
raise ValueError(
"Unrecognized task_type: %r, valid task types are: \"chief\", "
"\"worker\"." % task_type)
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._num_workers = len(cluster_spec.as_dict().get("worker", [])) + len(
cluster_spec.as_dict().get("chief", []))
if not self._num_workers:
raise ValueError("No `worker` or `chief` tasks can be found in "
"`cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
worker_device = "/job:%s/task:%d" % (task_type, task_id)
if num_gpus_per_worker:
local_devices = [
"%s/device:GPU:%d" % (worker_device, i)
for i in range(num_gpus_per_worker)
]
else:
local_devices = [worker_device]
self._collective_keys = cross_tower_utils.CollectiveKeys()
super(CollectiveAllReduceStrategy, self).__init__(
devices=local_devices,
cross_tower_ops=cross_tower_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus_per_worker,
collective_keys=self._collective_keys))
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
self._cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker CollectiveAllReduceStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_workers = %r, local_devices = %r", cluster_spec.as_dict(),
task_type, task_id, self._num_workers, local_devices)
def _create_variable(self, next_creator, *args, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
group_size = len(devices) * self._num_workers
group_key = self._collective_keys.get_group_key(self._devices)
def _real_mirrored_creator(devices, *args, **kwargs):
"""Creates one MirroredVariable on the current worker."""
index = {}
collective_instance_key = self._collective_keys.get_instance_key(
key_id=kwargs["name"])
if "initial_value" not in kwargs:
raise ValueError("Initial value must be specified.")
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value_fn = initial_value
else:
initial_value_fn = lambda: initial_value
for i, d in enumerate(devices):
with ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = index[devices[0]].name.split(":")[0]
# We append a / to variable names created on towers with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# The initial value fn makes sure variables all initialized to
# same values. The first device of the chief worker will send their
# variable values to other devices and other workers.
def _overridden_initial_value_fn(device=d, index=i): # pylint: disable=g-missing-docstring
with ops.device(device):
initial_value = initial_value_fn()
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(initial_value)
if self._is_chief and index == 0:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(
initial_value.shape, initial_value.dtype, group_size,
group_key, collective_instance_key)
kwargs["initial_value"] = _overridden_initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.DistributedVariable)
index[d] = v
return index
# pylint: disable=protected-access
return mirrored_strategy._create_mirrored_variable(
devices, _real_mirrored_creator, *args, **kwargs)
def distribute_dataset(self, dataset_fn):
"""Distributes the dataset to each local GPU."""
# TODO(yuefengz): shard the dataset.
return values.PerDeviceDataset(
self._call_dataset_fn(dataset_fn), self._devices, True)
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a @{tf.ConfigProto}
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if not self._cluster_spec and cluster_spec:
# If a `cluster_spec` is already passed in, do nothing here.
# TODO(yuefengz): check `cluster_spec` is the same if this object has
# already been initialized with a `cluster_spec`.
self._initialize_multi_worker(self._num_gpus_per_worker, cluster_spec,
task_type, task_id)
if not session_config or not self._cluster_spec:
return
session_config.isolate_session_state = True
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
if "chief" in self._cluster_spec.jobs:
session_config.experimental.collective_group_leader = (
"/job:chief/replica:0/task:0")
else:
if "worker" not in self._cluster_spec.jobs:
raise ValueError(
"You must have `chief` or `worker` jobs in the `cluster_spec`.")
session_config.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
# The device filters prevent communication between workers.
del session_config.device_filters[:]
session_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
# The scoped_allocator_optimization is to optimize graphs for collective
# ops.
rewrite_options = session_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
@property
def between_graph(self):
return True
@property
def should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15+dev (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-branch-based"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "conda-testenv-"
cfg.versionfile_source = "conda_testenv/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = [r.strip() for r in refnames.strip("()").split(",")]
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(set(refs) - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None, "branch": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags",
"branch": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM). Note, for git v1.7
# and below, it is necessary to run "git update-index --refresh" first.
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# abbrev-ref available with git >= 1.7
branch_name = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root).strip()
if branch_name == 'HEAD':
branches = run_command(GITS, ["branch", "--contains"],
cwd=root).split('\n')
branches = [branch[2:] for branch in branches if branch[4:5] != '(']
if 'master' in branches:
branch_name = 'master'
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
branch_name = branch_name.replace(' ', '.').replace('(', '').replace(')', '')
pieces['branch'] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out or 1) # total number of commits
return pieces
# Default matches v1.2.x, maint/1.2.x, 1.2.x, 1.x etc.
default_maint_branch_regexp = ".*([0-9]+\.)+x$"
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def add_one_to_version(version_string, number_index_to_increment=-1):
"""
Add one to a version string at the given numeric indices.
>>> add_one_to_version('v1.2.3')
'v1.2.4'
"""
# Break up the tag by number groups (preserving multi-digit
# numbers as multidigit)
parts = re.split("([0-9]+)", version_string)
digit_parts = [(i, part) for i, part in enumerate(parts)
if part.isdigit()]
# Deal with negative indexing.
increment_at_index = ((number_index_to_increment + len(digit_parts))
% len(digit_parts))
for n_seen, (i, part) in enumerate(digit_parts):
if n_seen == increment_at_index:
parts[i] = str(int(part) + 1)
elif n_seen > increment_at_index:
parts[i] = '0'
return ''.join(parts)
def render_pep440_branch_based(pieces):
# [TAG+1 of minor number][.devDISTANCE][+gHEX]. The git short is
# included for dirty.
# exceptions:
# 1: no tags. 0.0.0.devDISTANCE[+gHEX]
master = pieces.get('branch') == 'master'
maint = re.match(default_maint_branch_regexp,
pieces.get('branch') or '')
# If we are on a tag, just pep440-pre it.
if pieces["closest-tag"] and not (pieces["distance"] or
pieces["dirty"]):
rendered = pieces["closest-tag"]
else:
# Put a default closest-tag in.
if not pieces["closest-tag"]:
pieces["closest-tag"] = '0.0.0'
if pieces["distance"] or pieces["dirty"]:
if maint:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post%d" % pieces["distance"]
else:
rendered = add_one_to_version(pieces["closest-tag"])
if pieces["distance"]:
rendered += ".dev%d" % pieces["distance"]
# Put the branch name in if it isn't master nor a
# maintenance branch.
plus = '+'
if not (master or maint):
rendered += "%s%s" % (plus,
pieces.get('branch') or
'unknown_branch')
plus = '_'
if pieces["dirty"]:
rendered += "%sg%s" % (plus, pieces["short"])
else:
rendered = pieces["closest-tag"]
return rendered
STYLES = {'default': render_pep440,
'pep440': render_pep440,
'pep440-pre': render_pep440_pre,
'pep440-post': render_pep440_post,
'pep440-old': render_pep440_old,
'git-describe': render_git_describe,
'git-describe-long': render_git_describe_long,
'pep440-old': render_pep440_old,
'pep440-branch-based': render_pep440_branch_based,
}
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style:
style = 'default'
renderer = STYLES.get(style)
if not renderer:
raise ValueError("unknown style '%s'" % style)
rendered = renderer(pieces)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.pdf import get_pdf
from frappe.email.smtp import get_outgoing_email_account
from frappe.utils import get_url, scrub_urls, strip, expand_relative_urls, cint, split_emails
import email.utils
from markdown2 import markdown
def get_email(recipients, sender='', msg='', subject='[No Subject]',
text_content = None, footer=None, print_html=None, formatted=None, attachments=None,
content=None, reply_to=None, cc=()):
"""send an html email as multipart with attachments and all"""
content = content or msg
emailobj = EMail(sender, recipients, subject, reply_to=reply_to, cc=cc)
if not content.strip().startswith("<"):
content = markdown(content)
emailobj.set_html(content, text_content, footer=footer, print_html=print_html, formatted=formatted)
if isinstance(attachments, dict):
attachments = [attachments]
for attach in (attachments or []):
emailobj.add_attachment(**attach)
return emailobj
class EMail:
"""
Wrapper on the email module. Email object represents emails to be sent to the client.
Also provides a clean way to add binary `FileData` attachments
Also sets all messages as multipart/alternative for cleaner reading in text-only clients
"""
def __init__(self, sender='', recipients=(), subject='', alternative=0, reply_to=None, cc=()):
from email.mime.multipart import MIMEMultipart
from email import Charset
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
if isinstance(recipients, basestring):
recipients = recipients.replace(';', ',').replace('\n', '')
recipients = split_emails(recipients)
# remove null
recipients = filter(None, (strip(r) for r in recipients))
self.sender = sender
self.reply_to = reply_to or sender
self.recipients = recipients
self.subject = subject
self.msg_root = MIMEMultipart('mixed')
self.msg_multipart = MIMEMultipart('alternative')
self.msg_root.attach(self.msg_multipart)
self.cc = cc or []
self.html_set = False
def set_html(self, message, text_content = None, footer=None, print_html=None, formatted=None):
"""Attach message in the html portion of multipart/alternative"""
if not formatted:
formatted = get_formatted_html(self.subject, message, footer, print_html)
# this is the first html part of a multi-part message,
# convert to text well
if not self.html_set:
if text_content:
self.set_text(expand_relative_urls(text_content))
else:
self.set_html_as_text(expand_relative_urls(formatted))
self.set_part_html(formatted)
self.html_set = True
def set_text(self, message):
"""
Attach message in the text portion of multipart/alternative
"""
from email.mime.text import MIMEText
part = MIMEText(message, 'plain', 'utf-8')
self.msg_multipart.attach(part)
def set_part_html(self, message):
from email.mime.text import MIMEText
part = MIMEText(message, 'html', 'utf-8')
self.msg_multipart.attach(part)
def set_html_as_text(self, html):
"""return html2text"""
import HTMLParser
from html2text import html2text
try:
self.set_text(html2text(html))
except HTMLParser.HTMLParseError:
pass
def set_message(self, message, mime_type='text/html', as_attachment=0, filename='attachment.html'):
"""Append the message with MIME content to the root node (as attachment)"""
from email.mime.text import MIMEText
maintype, subtype = mime_type.split('/')
part = MIMEText(message, _subtype = subtype)
if as_attachment:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg_root.attach(part)
def attach_file(self, n):
"""attach a file from the `FileData` table"""
from frappe.utils.file_manager import get_file
res = get_file(n)
if not res:
return
self.add_attachment(res[0], res[1])
def add_attachment(self, fname, fcontent, content_type=None):
"""add attachment"""
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
if not content_type:
content_type, encoding = mimetypes.guess_type(fname)
if content_type is None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
content_type = 'application/octet-stream'
maintype, subtype = content_type.split('/', 1)
if maintype == 'text':
# Note: we should handle calculating the charset
if isinstance(fcontent, unicode):
fcontent = fcontent.encode("utf-8")
part = MIMEText(fcontent, _subtype=subtype, _charset="utf-8")
elif maintype == 'image':
part = MIMEImage(fcontent, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(fcontent, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(fcontent)
# Encode the payload using Base64
from email import encoders
encoders.encode_base64(part)
# Set the filename parameter
if fname:
part.add_header(b'Content-Disposition',
("attachment; filename=\"%s\"" % fname).encode('utf-8'))
self.msg_root.attach(part)
def add_pdf_attachment(self, name, html, options=None):
self.add_attachment(name, get_pdf(html, options), 'application/octet-stream')
def get_default_sender(self):
email_account = get_outgoing_email_account()
return email.utils.formataddr((email_account.name, email_account.get("sender") or email_account.get("email_id")))
def validate(self):
"""validate the email ids"""
from frappe.utils import validate_email_add
if not self.sender:
self.sender = self.get_default_sender()
validate_email_add(strip(self.sender), True)
self.reply_to = validate_email_add(strip(self.reply_to) or self.sender, True)
self.recipients = [strip(r) for r in self.recipients]
self.cc = [strip(r) for r in self.cc]
for e in self.recipients + (self.cc or []):
validate_email_add(e, True)
def set_message_id(self, message_id):
self.msg_root["Message-Id"] = "<{0}@{1}>".format(message_id, frappe.local.site)
def make(self):
"""build into msg_root"""
headers = {
"Subject": strip(self.subject).encode("utf-8"),
"From": self.sender.encode("utf-8"),
"To": ', '.join(self.recipients).encode("utf-8"),
"Date": email.utils.formatdate(),
"Reply-To": self.reply_to.encode("utf-8") if self.reply_to else None,
"CC": ', '.join(self.cc).encode("utf-8") if self.cc else None,
b'X-Frappe-Site': get_url().encode('utf-8'),
}
# reset headers as values may be changed.
for key, val in headers.iteritems():
if self.msg_root.has_key(key):
del self.msg_root[key]
self.msg_root[key] = val
# call hook to enable apps to modify msg_root before sending
for hook in frappe.get_hooks("make_email_body_message"):
frappe.get_attr(hook)(self)
def as_string(self):
"""validate, build message and convert to string"""
self.validate()
self.make()
return self.msg_root.as_string()
def get_formatted_html(subject, message, footer=None, print_html=None):
# imported here to avoid cyclic import
message = scrub_urls(message)
email_account = get_outgoing_email_account(False)
rendered_email = frappe.get_template("templates/emails/standard.html").render({
"content": message,
"signature": get_signature(email_account),
"footer": get_footer(email_account, footer),
"title": subject,
"print_html": print_html,
"subject": subject
})
return rendered_email
def get_signature(email_account):
if email_account and email_account.add_signature and email_account.signature:
return "<br><br>" + email_account.signature
else:
return ""
def get_footer(email_account, footer=None):
"""append a footer (signature)"""
footer = footer or ""
if email_account and email_account.footer:
footer += '<div style="margin: 15px auto;">{0}</div>'.format(email_account.footer)
footer += "<!--unsubscribe link here-->"
company_address = frappe.db.get_default("email_footer_address")
if company_address:
footer += '<div style="margin: 15px auto; text-align: center; color: #8d99a6">{0}</div>'\
.format(company_address.replace("\n", "<br>"))
if not cint(frappe.db.get_default("disable_standard_email_footer")):
for default_mail_footer in frappe.get_hooks("default_mail_footer"):
footer += '<div style="margin: 15px auto;">{0}</div>'.format(default_mail_footer)
return footer
|
|
# Copyright 2010-2016 by Haibao Tang et al. All rights reserved.
#
# This code is part of the goatools distribution and goverend by its
# license. Please see the LICENSE file included with goatools.
"""Read and store Gene Ontology's obo file."""
# -*- coding: UTF-8 -*-
from __future__ import print_function
from collections import defaultdict
import sys
import os
import re
GraphEngines = ("pygraphviz", "pydot")
__copyright__ = "Copyright (C) 2010-2017, H Tang et al., All rights reserved."
__author__ = "various"
class OBOReader(object):
"""Read goatools.org's obo file. Load into this iterable class.
Download obo from: http://geneontology.org/ontology/go-basic.obo
>>> reader = OBOReader()
>>> for rec in reader:
print(rec)
"""
def __init__(self, obo_file="go-basic.obo", optional_attrs=None):
"""Read obo file. Load dictionary."""
self._init_optional_attrs(optional_attrs)
self.format_version = None # e.g., "1.2" of "format-version:" line
self.data_version = None # e.g., "releases/2016-07-07" from "data-version:" line
self.typedefs = {}
# True if obo file exists or if a link to an obo file exists.
if os.path.isfile(obo_file):
self.obo_file = obo_file
# GOTerm attributes that are necessary for any operations:
else:
raise Exception("COULD NOT READ({OBO})\n"
"download obo file first\n "
"[http://geneontology.org/ontology/"
"go-basic.obo]".format(OBO=obo_file))
def __iter__(self):
"""Return one GO Term record at a time from an obo file."""
# Written by DV Klopfenstein
# Wait to open file until needed. Automatically close file when done.
with open(self.obo_file) as fstream:
rec_curr = None # Stores current GO Term
typedef_curr = None # Stores current typedef
for lnum, line in enumerate(fstream):
# obo lines start with any of: [Term], [Typedef], /^\S+:/, or /^\s*/
if self.data_version is None:
self._init_obo_version(line)
if line[0:6].lower() == "[term]":
rec_curr = self._init_goterm_ref(rec_curr, "Term", lnum)
elif line[0:9].lower() == "[typedef]":
typedef_curr = self._init_typedef(rec_curr, "Typedef", lnum)
elif rec_curr is not None or typedef_curr is not None:
line = line.rstrip() # chomp
if ":" in line:
if rec_curr is not None:
self._add_to_ref(rec_curr, line, lnum)
else:
self._add_to_typedef(typedef_curr, line, lnum)
elif line == "":
if rec_curr is not None:
yield rec_curr
rec_curr = None
elif typedef_curr is not None:
# Save typedef.
self.typedefs[typedef_curr.id] = typedef_curr
typedef_curr = None
else:
self._die("UNEXPECTED LINE CONTENT: {L}".format(L=line), lnum)
# Return last record, if necessary
if rec_curr is not None:
yield rec_curr
def _init_obo_version(self, line):
"""Save obo version and release."""
if line[0:14] == "format-version":
self.format_version = line[16:-1]
if line[0:12] == "data-version":
self.data_version = line[14:-1]
def _init_goterm_ref(self, rec_curr, name, lnum):
"""Initialize new reference and perform checks."""
if rec_curr is None:
return GOTerm()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum)
def _init_typedef(self, typedef_curr, name, lnum):
"""Initialize new typedef and perform checks."""
if typedef_curr is None:
return TypeDef()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum)
def _add_to_ref(self, rec_curr, line, lnum):
"""Add new fields to the current reference."""
# Written by DV Klopfenstein
# Examples of record lines containing ':' include:
# id: GO:0000002
# name: mitochondrial genome maintenance
# namespace: biological_process
# def: "The maintenance of ...
# is_a: GO:0007005 ! mitochondrion organization
mtch = re.match(r'^(\S+):\s*(\S.*)$', line)
if mtch:
field_name = mtch.group(1)
field_value = mtch.group(2)
if field_name == "id":
self._chk_none(rec_curr.id, lnum)
rec_curr.id = field_value
elif field_name == "alt_id":
rec_curr.alt_ids.add(field_value)
elif field_name == "name":
self._chk_none(rec_curr.name, lnum)
rec_curr.name = field_value
elif field_name == "namespace":
self._chk_none(rec_curr.namespace, lnum)
rec_curr.namespace = field_value
elif field_name == "is_a":
rec_curr._parents.append(field_value.split()[0])
elif field_name == "is_obsolete" and field_value == "true":
rec_curr.is_obsolete = True
elif field_name in self.optional_attrs:
self.update_rec(rec_curr, field_name, field_value)
else:
self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum)
def update_rec(self, rec, name, value):
"""Update current GOTerm with optional record."""
# 'def' is a reserved word in python, do not use it as a Class attr.
if name == "def":
name = "defn"
# If we have a relationship, then we will split this into a further
# dictionary.
if hasattr(rec, name):
if name not in self.attrs_scalar:
if name not in self.attrs_nested:
getattr(rec, name).add(value)
else:
self._add_nested(rec, name, value)
else:
raise Exception("ATTR({NAME}) ALREADY SET({VAL})".format(
NAME=name, VAL=getattr(rec, name)))
else: # Initialize new GOTerm attr
if name in self.attrs_scalar:
setattr(rec, name, value)
elif name not in self.attrs_nested:
setattr(rec, name, set([value]))
else:
name = '_{:s}'.format(name)
setattr(rec, name, defaultdict(list))
self._add_nested(rec, name, value)
def _add_to_typedef(self, typedef_curr, line, lnum):
"""Add new fields to the current typedef."""
mtch = re.match(r'^(\S+):\s*(\S.*)$', line)
if mtch:
field_name = mtch.group(1)
field_value = mtch.group(2).split('!')[0].rstrip()
if field_name == "id":
self._chk_none(typedef_curr.id, lnum)
typedef_curr.id = field_value
elif field_name == "name":
self._chk_none(typedef_curr.name, lnum)
typedef_curr.name = field_value
elif field_name == "transitive_over":
typedef_curr.transitive_over.append(field_value)
elif field_name == "inverse_of":
self._chk_none(typedef_curr.inverse_of, lnum)
typedef_curr.inverse_of = field_value
# Note: there are other tags that aren't imported here.
else:
self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum)
@staticmethod
def _add_nested(rec, name, value):
"""Adds a term's nested attributes."""
# Remove comments and split term into typedef / target term.
(typedef, target_term) = value.split('!')[0].rstrip().split(' ')
# Save the nested term.
getattr(rec, name)[typedef].append(target_term)
def _init_optional_attrs(self, optional_attrs):
"""Prepare to store data from user-desired optional fields.
Not loading these optional fields by default saves in space and speed.
But allow the possibility for saving these fields, if the user desires,
Including:
comment consider def is_class_level is_metadata_tag is_transitive
relationship replaced_by subset synonym transitive_over xref
"""
# Written by DV Klopfenstein
# Required attributes are always loaded. All others are optionally loaded.
self.attrs_req = ['id', 'alt_id', 'name', 'namespace', 'is_a', 'is_obsolete']
self.attrs_scalar = ['comment', 'defn',
'is_class_level', 'is_metadata_tag',
'is_transitive', 'transitive_over']
self.attrs_nested = frozenset(['relationship'])
# Allow user to specify either: 'def' or 'defn'
# 'def' is an obo field name, but 'defn' is legal Python attribute name
fnc = lambda aopt: aopt if aopt != "defn" else "def"
if optional_attrs is None:
optional_attrs = []
elif isinstance(optional_attrs, str):
optional_attrs = [fnc(optional_attrs)] if optional_attrs not in self.attrs_req else []
elif isinstance(optional_attrs, list) or isinstance(optional_attrs, set):
optional_attrs = set([fnc(f) for f in optional_attrs if f not in self.attrs_req])
else:
raise Exception("optional_attrs arg MUST BE A str, list, or set.")
self.optional_attrs = optional_attrs
def _die(self, msg, lnum):
"""Raise an Exception if file read is unexpected."""
raise Exception("**FATAL {FILE}({LNUM}): {MSG}\n".format(
FILE=self.obo_file, LNUM=lnum, MSG=msg))
def _chk_none(self, init_val, lnum):
"""Expect these lines to be uninitialized."""
if init_val is None or init_val is "":
return
self._die("FIELD IS ALREADY INITIALIZED", lnum)
class GOTerm(object):
"""
GO term, actually contain a lot more properties than interfaced here
"""
def __init__(self):
self.id = "" # GO:NNNNNNN
self.name = "" # description
self.namespace = "" # BP, CC, MF
self._parents = [] # is_a basestring of parents
self.parents = [] # parent records
self.children = [] # children records
self.level = None # shortest distance from root node
self.depth = None # longest distance from root node
self.is_obsolete = False # is_obsolete
self.alt_ids = set() # alternative identifiers
def __str__(self):
ret = ['{GO}\t'.format(GO=self.id)]
if self.level is not None:
ret.append('level-{L:>02}\t'.format(L=self.level))
if self.depth is not None:
ret.append('depth-{D:>02}\t'.format(D=self.depth))
ret.append('{NAME} [{NS}]'.format(NAME=self.name, NS=self.namespace))
if self.is_obsolete:
ret.append('obsolete')
return ''.join(ret)
def __repr__(self):
"""Print GO id and all attributes in GOTerm class."""
ret = ["GOTerm('{ID}'):".format(ID=self.id)]
for key, val in self.__dict__.items():
if isinstance(val, int) or isinstance(val, str):
ret.append("{K}:{V}".format(K=key, V=val))
elif val is not None:
ret.append("{K}: {V} items".format(K=key, V=len(val)))
if len(val) < 10:
if not isinstance(val, dict):
for elem in val:
ret.append(" {ELEM}".format(ELEM=elem))
else:
for (typedef, terms) in val.items():
ret.append(" {TYPEDEF}: {NTERMS} items"
.format(TYPEDEF=typedef,
NTERMS=len(terms)))
for term in terms:
ret.append(" {TERM}".format(TERM=term))
else:
ret.append("{K}: None".format(K=key))
return "\n ".join(ret)
def has_parent(self, term):
"""Return True if this GO object has a parent GO ID."""
for praent in self.parents:
if praent.id == term or praent.has_parent(term):
return True
return False
def has_child(self, term):
"""Return True if this GO object has a child GO ID."""
for parent in self.children:
if parent.id == term or parent.has_child(term):
return True
return False
def get_all_parents(self):
"""Return all parent GO IDs."""
all_parents = set()
for parent in self.parents:
all_parents.add(parent.id)
all_parents |= parent.get_all_parents()
return all_parents
def get_all_children(self):
"""Return all children GO IDs."""
all_children = set()
for parent in self.children:
all_children.add(parent.id)
all_children |= parent.get_all_children()
return all_children
def get_all_parent_edges(self):
"""Return tuples for all parent GO IDs, containing current GO ID and parent GO ID."""
all_parent_edges = set()
for parent in self.parents:
all_parent_edges.add((self.id, parent.id))
all_parent_edges |= parent.get_all_parent_edges()
return all_parent_edges
def get_all_child_edges(self):
"""Return tuples for all child GO IDs, containing current GO ID and child GO ID."""
all_child_edges = set()
for parent in self.children:
all_child_edges.add((parent.id, self.id))
all_child_edges |= parent.get_all_child_edges()
return all_child_edges
def write_hier_rec(self, gos_printed, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None,
depth=1, depth_dashes="-"):
"""Write hierarchy for a GO Term record."""
# Added by DV Klopfenstein
goid = self.id
# Shortens hierarchy report by only printing the hierarchy
# for the sub-set of user-specified GO terms which are connected.
if include_only is not None and goid not in include_only:
return
nrp = short_prt and goid in gos_printed
if go_marks is not None:
out.write('{} '.format('>' if goid in go_marks else ' '))
if len_dash is not None:
# Default character indicating hierarchy level is '-'.
# '=' is used to indicate a hierarchical path printed in detail previously.
letter = '-' if not nrp or not self.children else '='
depth_dashes = ''.join([letter]*depth)
out.write('{DASHES:{N}} '.format(DASHES=depth_dashes, N=len_dash))
if num_child is not None:
out.write('{N:>5} '.format(N=len(self.get_all_children())))
out.write('{GO}\tL-{L:>02}\tD-{D:>02}\t{desc}\n'.format(
GO=self.id, L=self.level, D=self.depth, desc=self.name))
# Track GOs previously printed only if needed
if short_prt:
gos_printed.add(goid)
# Do not print hierarchy below this turn if it has already been printed
if nrp:
return
depth += 1
if max_depth is not None and depth > max_depth:
return
for child in self.children:
child.write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt,
include_only, go_marks,
depth, depth_dashes)
class TypeDef(object):
"""
TypeDef term. These contain more tags than included here, but these
are the most important.
"""
def __init__(self):
self.id = "" # GO:NNNNNNN
self.name = "" # description
self.transitive_over = [] # List of other typedefs
self.inverse_of = "" # Name of inverse typedef.
def __str__(self):
ret = []
ret.append("Typedef - {} ({}):".format(self.id, self.name))
ret.append(" Inverse of: {}".format(self.inverse_of
if self.inverse_of else "None"))
if self.transitive_over:
ret.append(" Transitive over:")
for txo in self.transitive_over:
ret.append(" - {}".format(txo))
return "\n".join(ret)
class GODag(dict):
"""Holds the GO DAG as a dict."""
def __init__(self, obo_file="go-basic.obo", optional_attrs=None, load_obsolete=False):
self.version = self.load_obo_file(obo_file, optional_attrs, load_obsolete)
def load_obo_file(self, obo_file, optional_attrs, load_obsolete):
"""Read obo file. Store results."""
sys.stdout.write("load obo file {OBO}\n".format(OBO=obo_file))
reader = OBOReader(obo_file, optional_attrs)
for rec in reader:
# Save record if:
# 1) Argument load_obsolete is True OR
# 2) Argument load_obsolete is False and the GO term is "live" (not obsolete)
if load_obsolete or not rec.is_obsolete:
self[rec.id] = rec
for alt in rec.alt_ids:
self[alt] = rec
num_items = len(self)
data_version = reader.data_version
if data_version is not None:
data_version = data_version.replace("releases/", "")
version = "{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms".format(
OBO=obo_file, FMT=reader.format_version,
REL=data_version, N=num_items)
# Save the typedefs and parsed optional_attrs
self.typedefs = reader.typedefs
self.optional_attrs = reader.optional_attrs
self.populate_terms()
sys.stdout.write("{VER}\n".format(VER=version))
return version
def populate_terms(self):
"""Add level and depth to GO objects."""
def _init_level(rec):
if rec.level is None:
if not rec.parents:
rec.level = 0
else:
rec.level = min(_init_level(rec) for rec in rec.parents) + 1
return rec.level
def _init_depth(rec):
if rec.depth is None:
if not rec.parents:
rec.depth = 0
else:
rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1
return rec.depth
# Make parents and relationships references to the actual GO terms.
for rec in self.values():
rec.parents = [self[x] for x in rec._parents]
if hasattr(rec, '_relationship'):
rec.relationship = defaultdict(set)
for (typedef, terms) in rec._relationship.items():
rec.relationship[typedef].update(set([self[x] for x in terms]))
delattr(rec, '_relationship')
# populate children, levels and add inverted relationships
for rec in self.values():
for parent in rec.parents:
if rec not in parent.children:
parent.children.append(rec)
# Add invert relationships
if hasattr(rec, 'relationship'):
for (typedef, terms) in rec.relationship.items():
invert_typedef = self.typedefs[typedef].inverse_of
if invert_typedef:
# Add inverted relationship
for term in terms:
if not hasattr(term, 'relationship'):
term.relationship = defaultdict(set)
term.relationship[invert_typedef].add(rec)
if rec.level is None:
_init_level(rec)
if rec.depth is None:
_init_depth(rec)
def write_dag(self, out=sys.stdout):
"""Write info for all GO Terms in obo file, sorted numerically."""
for rec in sorted(self.values()):
print(rec, file=out)
def write_hier_all(self, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False):
"""Write hierarchy for all GO Terms in obo file."""
# Print: [biological_process, molecular_function, and cellular_component]
for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:
self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)
def write_hier(self, go_id, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None):
"""Write hierarchy for a GO Term."""
gos_printed = set()
self[go_id].write_hier_rec(gos_printed, out, len_dash, max_depth, num_child,
short_prt, include_only, go_marks)
@staticmethod
def id2int(go_id):
"""Given a GO ID, return the int value."""
return int(go_id.replace("GO:", "", 1))
def query_term(self, term, verbose=False):
"""Given a GO ID, return GO object."""
if term not in self:
sys.stderr.write("Term %s not found!\n" % term)
return
rec = self[term]
if verbose:
print(rec)
sys.stderr.write("all parents: {}\n".format(
repr(rec.get_all_parents())))
sys.stderr.write("all children: {}\n".format(
repr(rec.get_all_children())))
return rec
def paths_to_top(self, term):
""" Returns all possible paths to the root node
Each path includes the term given. The order of the path is
top -> bottom, i.e. it starts with the root and ends with the
given term (inclusively).
Parameters:
-----------
- term:
the id of the GO term, where the paths begin (i.e. the
accession 'GO:0003682')
Returns:
--------
- a list of lists of GO Terms
"""
# error handling consistent with original authors
if term not in self:
sys.stderr.write("Term %s not found!\n" % term)
return
def _paths_to_top_recursive(rec):
if rec.level == 0:
return [[rec]]
paths = []
for parent in rec.parents:
top_paths = _paths_to_top_recursive(parent)
for top_path in top_paths:
top_path.append(rec)
paths.append(top_path)
return paths
go_term = self[term]
return _paths_to_top_recursive(go_term)
def _label_wrap(self, label):
wrapped_label = r"%s\n%s" % (label,
self[label].name.replace(",", r"\n"))
return wrapped_label
def make_graph_pydot(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
"""draw AMIGO style network, lineage containing one query record."""
import pydot
grph = pydot.Dot(graph_type='digraph', dpi="{}".format(dpi)) # Directed Graph
edgeset = set()
usr_ids = [rec.id for rec in recs]
for rec in recs:
if draw_parents:
edgeset.update(rec.get_all_parent_edges())
if draw_children:
edgeset.update(rec.get_all_child_edges())
rec_id_set = set([rec_id for endpts in edgeset for rec_id in endpts])
nodes = {str(ID):pydot.Node(
self._label_wrap(ID).replace("GO:", ""), # Node name
shape="box",
style="rounded, filled",
# Highlight query terms in plum:
fillcolor="beige" if ID not in usr_ids else "plum",
color=nodecolor)
for ID in rec_id_set}
# add nodes explicitly via add_node
for rec_id, node in nodes.items():
grph.add_node(node)
for src, target in edgeset:
# default layout in graphviz is top->bottom, so we invert
# the direction and plot using dir="back"
grph.add_edge(pydot.Edge(nodes[target], nodes[src],
shape="normal",
color=edgecolor,
label="is_a",
dir="back"))
return grph
def make_graph_pygraphviz(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
"""Draw AMIGO style network, lineage containing one query record."""
import pygraphviz as pgv
grph = pgv.AGraph(name="GO tree")
edgeset = set()
for rec in recs:
if draw_parents:
edgeset.update(rec.get_all_parent_edges())
if draw_children:
edgeset.update(rec.get_all_child_edges())
edgeset = [(self._label_wrap(a), self._label_wrap(b))
for (a, b) in edgeset]
# add nodes explicitly via add_node
# adding nodes implicitly via add_edge misses nodes
# without at least one edge
for rec in recs:
grph.add_node(self._label_wrap(rec.id))
for src, target in edgeset:
# default layout in graphviz is top->bottom, so we invert
# the direction and plot using dir="back"
grph.add_edge(target, src)
grph.graph_attr.update(dpi="%d" % dpi)
grph.node_attr.update(shape="box", style="rounded,filled",
fillcolor="beige", color=nodecolor)
grph.edge_attr.update(shape="normal", color=edgecolor,
dir="back", label="is_a")
# highlight the query terms
for rec in recs:
try:
node = grph.get_node(self._label_wrap(rec.id))
node.attr.update(fillcolor="plum")
except:
continue
return grph
def draw_lineage(self, recs, nodecolor="mediumseagreen",
edgecolor="lightslateblue", dpi=96,
lineage_img="GO_lineage.png", engine="pygraphviz",
gml=False, draw_parents=True, draw_children=True):
"""Draw GO DAG subplot."""
assert engine in GraphEngines
grph = None
if engine == "pygraphviz":
grph = self.make_graph_pygraphviz(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents,
draw_children=draw_children)
else:
grph = self.make_graph_pydot(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents, draw_children=draw_children)
if gml:
import networkx as nx # use networkx to do the conversion
gmlbase = lineage_img.rsplit(".", 1)[0]
NG = nx.from_agraph(grph) if engine == "pygraphviz" else nx.from_pydot(grph)
del NG.graph['node']
del NG.graph['edge']
gmlfile = gmlbase + ".gml"
nx.write_gml(Nself._label_wrapG, gmlfile)
sys.stderr.write("GML graph written to {0}\n".format(gmlfile))
sys.stderr.write(("lineage info for terms %s written to %s\n" %
([rec.id for rec in recs], lineage_img)))
if engine == "pygraphviz":
grph.draw(lineage_img, prog="dot")
else:
grph.write_png(lineage_img)
def update_association(self, association):
"""Add the GO parents of a gene's associated GO IDs to the gene's association."""
bad_goids = set()
# Loop through all sets of GO IDs for all genes
for goids in association.values():
parents = set()
# Iterate thru each GO ID in the current gene's association
for goid in goids:
try:
parents.update(self[goid].get_all_parents())
except:
bad_goids.add(goid.strip())
# Add the GO parents of all GO IDs in the current gene's association
goids.update(parents)
if bad_goids:
sys.stderr.write("goids not found: %s\n" % (bad_goids,))
# Copyright (C) 2010-2017, H Tang et al., All rights reserved.
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import custom_gradient
from tensorflow.python.eager import tape
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
class BackpropTest(test.TestCase):
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = ops.convert_to_tensor(grad).numpy()
with context.graph_mode(), self.test_session():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, tf_dense_grad.eval())
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
tape.watch_variable(x)
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
def testDy(self):
def f(x):
return x
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testErrors(self):
@custom_gradient.custom_gradient
def f(x):
def grad(_):
raise RuntimeError('x')
return x, grad
# TODO(apassos) raise the right error here
with self.assertRaises(RuntimeError):
backprop.gradients_function(f)(constant_op.constant(1.0))
def testImplicitGradOverEmbeddingLookup(self):
batch_size = 8
embedding_size = 512
vocab_size = 1000
lrn_rate = 0.1
random_init = random_ops.random_uniform([vocab_size, embedding_size])
x = array_ops.ones((batch_size), dtypes.int64)
embedding = resource_variable_ops.ResourceVariable(
initial_value=random_init, dtype=dtypes.float32, name='embedding')
def f():
tape.watch_variable(embedding)
embedded_x = embedding_ops.embedding_lookup(embedding, x)
return constant_op.constant(1.0, dtypes.float32) - embedded_x
grad = backprop.implicit_grad(f)()[0][0]
opt = training.GradientDescentOptimizer(lrn_rate)
with context.graph_mode(), self.test_session():
tf_x = array_ops.ones((batch_size), dtypes.int64)
# TODO(ashankar,apassos): Change to ResourceVariable.
tf_embedding = variables.Variable(
random_init.numpy(), name='tf_embedding')
tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
tf_y = 1.0 - tf_embedded_x
tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
tf_opt = training.GradientDescentOptimizer(0.1)
tf_embedding.initializer.run()
self.assertAllClose(tf_grad.indices.eval(), grad.indices)
self.assertAllClose(tf_grad.values.eval(), grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = tf_embedding.eval()
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())
def testGradientNone(self):
def loss(x, l):
return math_ops.reduce_mean(
nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),
constant_op.constant([0]))
logits = constant_op.constant([[0.0, 0.0]])
labels = constant_op.constant([[1.0, 0.0]])
# softmax_cross_entropy_with_logits returns two outputs and in this case the
# gradient wrt the second is None.
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
def testSecondGrad(self):
def first(x):
l = constant_op.constant([[0.0]])
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)
x = math_ops.reduce_sum(x, constant_op.constant([0]))
return x
def second(x):
grad = backprop.gradients_function(first, [0])(x)[0]
return math_ops.reduce_sum(grad, constant_op.constant([0]))
f = constant_op.constant([[0.1]])
grad = backprop.gradients_function(second, [0])(f)[0]
self.assertAllEqual([[0.0]], grad)
def testMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f)
result, vjp = wrapped_fn(constant_op.constant(3.0))
self.assertAllEqual(result, 9.0)
self.assertAllEqual(vjp(2.0)[0], 12.0)
def testGradGrad(self):
def sq(x):
return x * x
def grad(x):
value = backprop.gradients_function(sq, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)
def testGradGradExp(self):
def grad(x):
value = backprop.gradients_function(math_ops.exp, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)
def testGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def fn(x):
with context.device('/gpu:0'):
b = constant_op.constant(2.0)
c = math_ops.add(x.gpu(), b)
# TODO(apassos): remove cpu below by making TensorVSPace aware
# of devices.
return math_ops.add(c, constant_op.constant(3.0)).cpu()
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
def testGPUImplicitGrad(self):
if not context.context().num_gpus():
self.skipTest('No GPU found')
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
tape.watch_variable(v)
return v.read_value()
self.assertEqual(
backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
def testCPU(self):
def fn(x):
b = constant_op.constant(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, constant_op.constant(3.0))
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
def testTensorCopyGPU2CPU2GPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def f(a, b):
return a.cpu() + b.cpu()
with context.device('/gpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testEmptyParams(self):
def fn(a, b):
return a * b
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx, y.numpy())
self.assertAllEqual(dy, x.numpy())
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(
1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(inner_grad.numpy(), 2.0)
y += inner_grad
grad = g.gradient(y, [x])[0]
self.assertEqual(grad.numpy(), 6.0)
def testGradientTapeGradientCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.gradient(z, [x])
with self.assertRaisesRegexp(
RuntimeError, 'GradientTape.gradient can only be called once'):
g.gradient(y, [x])
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
with backprop.GradientTape() as g:
y = v * v
grad = g.gradient(y, [v])[0]
self.assertAllEqual(grad, 2.0)
def testEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grads_fn = backprop.val_and_grad_function(fn)
x = 2.0
y = 3.0
val, (dx, dy) = val_and_grads_fn(x, y)
self.assertAllClose(val, x * y)
self.assertAllEqual(dx, y)
self.assertAllEqual(dy, x)
def testNonEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])
x = 2.0
y = 3.0
val, grads = val_and_grad_fn(x, y)
self.assertAllClose(val, x * y)
self.assertEqual(1, len(grads))
self.assertAllEqual(grads[0], x)
def testTensorCopyCPU2GPU2CPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.gpu(0), b.gpu(0))
return math_ops.add(c.cpu(), constant_op.constant(3.0))
with context.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))
def testMulType(self):
def mul(x):
return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access
self.assertAllEqual(
backprop.gradients_function(mul)(3.0)[0].numpy(),
6.0)
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' % (s, expected,
actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))
def testArgsGradientFunction(self):
def f(*args):
return args[0] * args[0]
grad = backprop.gradients_function(f)
self.assertAllEqual(grad(1.0)[0], 2.0)
def testPartial(self):
def f(x, y):
return x * y
part = functools.partial(f, constant_op.constant(2.0))
self.assertAllEqual(
backprop.gradients_function(part)(constant_op.constant(1.0))[0],
2.0)
def testReturnSameThing(self):
def f(x):
return x, 2 * x
self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0)
def testExceptionSafety(self):
def f(unused_x):
raise ValueError()
try:
backprop.gradients_function(f)(1.0)
except ValueError:
pass
def real_f(x):
return x * x
self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
tape.watch_variable(x)
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
def testOutput(self):
def multiout(x):
return x + 2, x * x
x = constant_op.constant([0.0, 1.0, 2.0])
grad = backprop.gradients_function(multiout)(x)[0]
self.assertAllEqual([1.0, 3.0, 5.0], grad)
def testMultiValuePreservesIfNotDiffedAgainst(self):
def tfe_conv2d(timage, tkernel, conv2dstrides):
return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')
i = constant_op.constant([[[[1.0]]]])
k = constant_op.constant([[[[2.0]]]])
s = [1, 1, 1, 1]
grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]
self.assertAllEqual([[[[2.0]]]], grad)
def testSameObjectForMultipleArguments(self):
def f(x, y):
return math_ops.multiply(x, y)
g = backprop.gradients_function(f)
def np_g(x, y):
dx, dy = g(x, y)
return [dx.numpy(), dy.numpy()]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x, x))
x = 1.
self.assertAllEqual([1., 1.], np_g(x, x))
x = constant_op.constant([[1.]])
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
x = [[1.]]
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
v = resource_variable_ops.ResourceVariable(
initial_value=1., name='testSameObjectForMultipleArguments.Variable')
self.assertAllEqual([1., 1.], np_g(v, v))
def testImplicitGradientsCustomGradientAndCachedVariableValue(self):
@custom_gradient.custom_gradient
def my_square(x):
result = math_ops.square(x)
def grad(dr):
return 2 * dr * x + 1
return result, grad
x = resource_variable_ops.ResourceVariable(
initial_value=3, name='X.' + self.id())
def f():
return my_square(x)
g = backprop.implicit_grad(f)
grads_and_vars = g()
self.assertEqual(1, len(grads_and_vars))
grad, var = grads_and_vars[0]
self.assertAllEqual(7, grad)
self.assertAllEqual(x, var)
def testCustomGradient(self):
@custom_gradient.custom_gradient
def my_mul(x, y):
result = x*y
def grad(dr):
return [dr*y, dr*x]
return result, grad
lr = 0.25
x = resource_variable_ops.ResourceVariable(2., name='x')
def loss(x):
return my_mul(2., x.read_value())
loss_grads_fn = backprop.implicit_val_and_grad(loss)
losses = []
for _ in range(5):
loss, grads_and_vars = loss_grads_fn(x)
losses.append(loss.numpy())
for (grad, var) in grads_and_vars:
var.assign_sub(lr*grad)
self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])
def testCustomGradientIdentity(self):
@custom_gradient.custom_gradient
def my_identity(x):
def grad(dresult):
return [2 * dresult]
return x, grad
self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0)
def testDifferentiatingFunctionThatReturnsNone(self):
def fn(x, y):
result = x*y # pylint: disable=unused-variable
x = constant_op.constant(1)
y = constant_op.constant(2)
loss_grads_fn = backprop.implicit_val_and_grad(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
loss_grads_fn(x, y)
val_and_grads_fn = backprop.val_and_grad_function(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
if __name__ == '__main__':
test.main()
|
|
import sys
from rpython.rtyper.test.test_llinterp import interpret, get_interpreter
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib.objectmodel import UnboxedValue
from rpython.translator.translator import graphof
from rpython.flowspace.model import summary
from rpython.translator.backendopt.all import backend_optimizations
from rpython.conftest import option
class A(object):
__slots__ = ()
def meth(self, x):
raise NotImplementedError
class B(A):
attrvalue = 66
def __init__(self, normalint):
self.normalint = normalint
def meth(self, x):
return self.normalint + x + 2
class C(A, UnboxedValue):
__slots__ = 'smallint'
def meth(self, x):
return self.smallint + x + 3
class D(B):
attrvalue = 68
# ____________________________________________________________
def test_instantiate():
def fn1(n):
return C(n)
res = interpret(fn1, [42], taggedpointers=True)
value = lltype.cast_ptr_to_int(res)
assert value == 42 * 2 + 1 # for now
def test_attribute():
def fn1(n):
return C(n).smallint
res = interpret(fn1, [42], taggedpointers=True)
assert res == 42
def test_get_untagged_value():
def fn1(n):
return C(n).get_untagged_value()
res = interpret(fn1, [42], taggedpointers=True)
assert res == 42
def test_overflowerror():
def makeint(n):
try:
return C(n)
except OverflowError: # 'n' out of range
return B(n)
def fn2(n):
x = makeint(n)
if isinstance(x, B):
return 'B', x.normalint
elif isinstance(x, C):
return 'C', x.smallint
else:
return 'A', 0
res = interpret(fn2, [-117], taggedpointers=True)
assert res.item0 == 'C'
assert res.item1 == -117
res = interpret(fn2, [sys.maxint], taggedpointers=True)
assert res.item0 == 'B'
assert res.item1 == sys.maxint
def test_prebuilt():
c = C(111)
b = B(939393)
def makeint(n):
if n < 0:
x = c
elif n > 0:
x = C(n)
else:
x = b
return x
def fn(n):
x = makeint(n)
if isinstance(x, B):
return 'B', x.normalint
elif isinstance(x, C):
return 'C', x.smallint
else:
return 'A', 0
res = interpret(fn, [12], taggedpointers=True)
assert res.item0 == 'C'
assert res.item1 == 12
res = interpret(fn, [-1], taggedpointers=True)
assert res.item0 == 'C'
assert res.item1 == 111
res = interpret(fn, [0], taggedpointers=True)
assert res.item0 == 'B'
assert res.item1 == 939393
def test_C_or_None():
def g(x):
if x is None:
return sys.maxint
else:
return x.smallint
def fn(n):
if n < 0:
x = None
else:
x = C(n)
return g(x)
res = interpret(fn, [-1], taggedpointers=True)
assert res == sys.maxint
res = interpret(fn, [56], taggedpointers=True)
assert res == 56
def test_type():
def fn(n):
if n < 0:
x = B(n)
else:
x = C(n)
return type(x) is B, type(x) is C
res = interpret(fn, [-212], taggedpointers=True)
assert res.item0 and not res.item1
res = interpret(fn, [9874], taggedpointers=True)
assert res.item1 and not res.item0
def test_type_of_None():
# use extra function to prevent flow graph cleverness
def g(n):
if n < 0:
x = B(n)
elif n == 0:
x = None
else:
x = C(n)
return x
def fn(n):
x= g(n)
return type(x) is B, type(x) is C
res = interpret(fn, [-212], taggedpointers=True)
assert res.item0 and not res.item1
res = interpret(fn, [9874], taggedpointers=True)
assert res.item1 and not res.item0
res = interpret(fn, [0], taggedpointers=True)
assert not res.item1 and not res.item0
def test_str():
def fn(n):
if n > 0:
x = B(n)
else:
x = C(n)
return str(x)
res = interpret(fn, [-832], taggedpointers=True)
assert ''.join(res.chars) == '<unboxed -832>'
res = interpret(fn, [1], taggedpointers=True)
assert ''.join(res.chars).startswith('<B object')
def test_format():
def fn(n):
if n > 0:
x = B(n)
else:
x = C(n)
return '%r' % (x,)
res = interpret(fn, [-832], taggedpointers=True)
assert ''.join(res.chars) == '<unboxed -832>'
res = interpret(fn, [1], taggedpointers=True)
assert ''.join(res.chars).startswith('<B object')
def test_method():
def fn(n):
if n > 0:
x = B(n)
else:
x = C(n)
return x.meth(100)
res = interpret(fn, [1000], taggedpointers=True)
assert res == 1102
res = interpret(fn, [-1000], taggedpointers=True)
assert res == -897
def test_optimize_method():
def fn(n):
if n > 0:
x = B(n)
else:
x = C(n)
return x.meth(100)
interp, graph = get_interpreter(fn, [-1000], taggedpointers=True)
t = interp.typer.annotator.translator
t.config.translation.backendopt.constfold = True
backend_optimizations(t)
if option.view:
t.view()
LLFrame = interp.frame_class
class MyFrame(LLFrame):
def op_indirect_call(self, f, *args):
raise AssertionError("this call should be optimized away")
interp.frame_class = MyFrame
res = interp.eval_graph(graph, [-1000])
assert res == -897
def test_untagged_subclasses():
def g(x):
return x.attrvalue # should not produce a call to ll_unboxed_getclass
def fn(n):
y = C(12)
if n > 0:
x = B(5)
else:
x = D(5)
return g(x)
interp, graph = get_interpreter(fn, [-1000], taggedpointers=True)
t = interp.typer.annotator.translator
ggraph = graphof(t, g)
assert summary(ggraph) == {'cast_pointer': 2, 'getfield': 2}
res = interp.eval_graph(graph, [-1000])
assert res == 68
res = interp.eval_graph(graph, [3])
assert res == 66
def test_disable_tagging():
def fn(n):
if n < 0:
x = B(n)
else:
x = C(n)
return type(x) is B, type(x) is C
res = interpret(fn, [-212], taggedpointers=False)
assert res.item0 and not res.item1
res = interpret(fn, [9874], taggedpointers=False)
assert res.item1 and not res.item0
|
|
from __future__ import unicode_literals
import boto3
from botocore.exceptions import ClientError
from moto import mock_sns
import sure # noqa
from moto.core import ACCOUNT_ID
import pytest
@mock_sns
def test_create_platform_application():
conn = boto3.client("sns", region_name="us-east-1")
response = conn.create_platform_application(
Name="my-application",
Platform="APNS",
Attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
application_arn = response["PlatformApplicationArn"]
application_arn.should.equal(
"arn:aws:sns:us-east-1:{}:app/APNS/my-application".format(ACCOUNT_ID)
)
@mock_sns
def test_get_platform_application_attributes():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application",
Platform="APNS",
Attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
arn = platform_application["PlatformApplicationArn"]
attributes = conn.get_platform_application_attributes(PlatformApplicationArn=arn)[
"Attributes"
]
attributes.should.equal(
{
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
}
)
@mock_sns
def test_get_missing_platform_application_attributes():
conn = boto3.client("sns", region_name="us-east-1")
conn.get_platform_application_attributes.when.called_with(
PlatformApplicationArn="a-fake-arn"
).should.throw(ClientError)
@mock_sns
def test_set_platform_application_attributes():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application",
Platform="APNS",
Attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
arn = platform_application["PlatformApplicationArn"]
conn.set_platform_application_attributes(
PlatformApplicationArn=arn, Attributes={"PlatformPrincipal": "other"}
)
attributes = conn.get_platform_application_attributes(PlatformApplicationArn=arn)[
"Attributes"
]
attributes.should.equal(
{"PlatformCredential": "platform_credential", "PlatformPrincipal": "other"}
)
@mock_sns
def test_list_platform_applications():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_platform_application(
Name="application1", Platform="APNS", Attributes={}
)
conn.create_platform_application(
Name="application2", Platform="APNS", Attributes={}
)
applications_response = conn.list_platform_applications()
applications = applications_response["PlatformApplications"]
applications.should.have.length_of(2)
@mock_sns
def test_delete_platform_application():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_platform_application(
Name="application1", Platform="APNS", Attributes={}
)
conn.create_platform_application(
Name="application2", Platform="APNS", Attributes={}
)
applications_response = conn.list_platform_applications()
applications = applications_response["PlatformApplications"]
applications.should.have.length_of(2)
application_arn = applications[0]["PlatformApplicationArn"]
conn.delete_platform_application(PlatformApplicationArn=application_arn)
applications_response = conn.list_platform_applications()
applications = applications_response["PlatformApplications"]
applications.should.have.length_of(1)
@mock_sns
def test_create_platform_endpoint():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application", Platform="APNS", Attributes={}
)
application_arn = platform_application["PlatformApplicationArn"]
endpoint = conn.create_platform_endpoint(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"Enabled": "false"},
)
endpoint_arn = endpoint["EndpointArn"]
endpoint_arn.should.contain(
"arn:aws:sns:us-east-1:{}:endpoint/APNS/my-application/".format(ACCOUNT_ID)
)
@mock_sns
def test_create_duplicate_platform_endpoint():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application", Platform="APNS", Attributes={}
)
application_arn = platform_application["PlatformApplicationArn"]
endpoint = conn.create_platform_endpoint(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"Enabled": "false"},
)
endpoint = conn.create_platform_endpoint.when.called_with(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"Enabled": "false"},
).should.throw(ClientError)
@mock_sns
def test_get_list_endpoints_by_platform_application():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application", Platform="APNS", Attributes={}
)
application_arn = platform_application["PlatformApplicationArn"]
endpoint = conn.create_platform_endpoint(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"CustomUserData": "some data"},
)
endpoint_arn = endpoint["EndpointArn"]
endpoint_list = conn.list_endpoints_by_platform_application(
PlatformApplicationArn=application_arn
)["Endpoints"]
endpoint_list.should.have.length_of(1)
endpoint_list[0]["Attributes"]["CustomUserData"].should.equal("some data")
endpoint_list[0]["EndpointArn"].should.equal(endpoint_arn)
@mock_sns
def test_get_endpoint_attributes():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application", Platform="APNS", Attributes={}
)
application_arn = platform_application["PlatformApplicationArn"]
endpoint = conn.create_platform_endpoint(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"Enabled": "false", "CustomUserData": "some data"},
)
endpoint_arn = endpoint["EndpointArn"]
attributes = conn.get_endpoint_attributes(EndpointArn=endpoint_arn)["Attributes"]
attributes.should.equal(
{"Token": "some_unique_id", "Enabled": "false", "CustomUserData": "some data"}
)
@mock_sns
def test_get_non_existent_endpoint_attributes():
conn = boto3.client("sns", region_name="us-east-1")
endpoint_arn = "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/c1f76c42-192a-4e75-b04f-a9268ce2abf3"
with pytest.raises(conn.exceptions.NotFoundException) as excinfo:
conn.get_endpoint_attributes(EndpointArn=endpoint_arn)
error = excinfo.value.response["Error"]
error["Type"].should.equal("Sender")
error["Code"].should.equal("NotFound")
error["Message"].should.equal("Endpoint does not exist")
@mock_sns
def test_get_missing_endpoint_attributes():
conn = boto3.client("sns", region_name="us-east-1")
conn.get_endpoint_attributes.when.called_with(
EndpointArn="a-fake-arn"
).should.throw(ClientError)
@mock_sns
def test_set_endpoint_attributes():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application", Platform="APNS", Attributes={}
)
application_arn = platform_application["PlatformApplicationArn"]
endpoint = conn.create_platform_endpoint(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"Enabled": "false", "CustomUserData": "some data"},
)
endpoint_arn = endpoint["EndpointArn"]
conn.set_endpoint_attributes(
EndpointArn=endpoint_arn, Attributes={"CustomUserData": "other data"}
)
attributes = conn.get_endpoint_attributes(EndpointArn=endpoint_arn)["Attributes"]
attributes.should.equal(
{"Token": "some_unique_id", "Enabled": "false", "CustomUserData": "other data"}
)
@mock_sns
def test_publish_to_platform_endpoint():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application", Platform="APNS", Attributes={}
)
application_arn = platform_application["PlatformApplicationArn"]
endpoint = conn.create_platform_endpoint(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"Enabled": "true"},
)
endpoint_arn = endpoint["EndpointArn"]
conn.publish(
Message="some message", MessageStructure="json", TargetArn=endpoint_arn
)
@mock_sns
def test_publish_to_disabled_platform_endpoint():
conn = boto3.client("sns", region_name="us-east-1")
platform_application = conn.create_platform_application(
Name="my-application", Platform="APNS", Attributes={}
)
application_arn = platform_application["PlatformApplicationArn"]
endpoint = conn.create_platform_endpoint(
PlatformApplicationArn=application_arn,
Token="some_unique_id",
CustomUserData="some user data",
Attributes={"Enabled": "false"},
)
endpoint_arn = endpoint["EndpointArn"]
conn.publish.when.called_with(
Message="some message", MessageStructure="json", TargetArn=endpoint_arn
).should.throw(ClientError)
@mock_sns
def test_set_sms_attributes():
conn = boto3.client("sns", region_name="us-east-1")
conn.set_sms_attributes(
attributes={"DefaultSMSType": "Transactional", "test": "test"}
)
response = conn.get_sms_attributes()
response.should.contain("attributes")
response["attributes"].should.contain("DefaultSMSType")
response["attributes"].should.contain("test")
response["attributes"]["DefaultSMSType"].should.equal("Transactional")
response["attributes"]["test"].should.equal("test")
@mock_sns
def test_get_sms_attributes_filtered():
conn = boto3.client("sns", region_name="us-east-1")
conn.set_sms_attributes(
attributes={"DefaultSMSType": "Transactional", "test": "test"}
)
response = conn.get_sms_attributes(attributes=["DefaultSMSType"])
response.should.contain("attributes")
response["attributes"].should.contain("DefaultSMSType")
response["attributes"].should_not.contain("test")
response["attributes"]["DefaultSMSType"].should.equal("Transactional")
|
|
from ipypublish.filters_pandoc.utils import apply_filter
from ipypublish.filters_pandoc import (
prepare_raw,
format_cite_elements,
format_raw_spans,
)
def test_mkdown_hlink_to_rst():
out_string = apply_filter("[a link](https://pandoc.org/filters.html)", [], "rst")
assert out_string == "`a link <https://pandoc.org/filters.html>`__"
def test_latex_to_rst():
"""
"""
in_string = [
r"\cref{label1} \Cref{label2} \cite{a-cite-key_2019}",
"",
"\\cite{label1,label2}",
"",
r"\ref{label3} \todo{something todo}",
"",
r"\todo{something else todo}",
]
out_string = apply_filter(
in_string,
[prepare_raw.main, format_cite_elements.main, format_raw_spans.main],
"rst",
)
assert out_string == "\n".join(
[
":ref:`label1` :ref:`label2` :cite:`a-cite-key_2019`",
"",
":cite:`label1,label2`",
"",
":ref:`label3`",
"",
".. todo:: something todo",
"",
"",
"",
".. todo:: something else todo",
"",
"",
]
)
def test_latex_to_rst_with_numref():
""""""
in_string = [
"---",
"ipub:",
" pandoc:",
" use_numref: true",
"---",
"",
r"\cref{label1} \Cref{label2} \cite{a-cite-key_2019}",
"",
r"\ref{label3} \todo[inline]{something todo}",
"",
r"\todo{something else todo}",
]
out_string = apply_filter(
in_string,
[prepare_raw.main, format_cite_elements.main, format_raw_spans.main],
"rst",
)
assert out_string.strip() == "\n".join(
[
":numref:`label1` :numref:`label2` :cite:`a-cite-key_2019`",
"",
":ref:`label3`",
"",
".. todo:: something todo" "",
"",
"",
"",
".. todo:: something else todo",
]
)
def test_html_to_latex_label():
in_string = ["[some text](#alabel)"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "latex"
)
assert out_string == "\n".join([r"\cref{alabel}"])
def test_cite_in_table_caption():
in_string = ["a b", "- -", "1 2", "", "Table: Caption \\cite{a}"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "markdown"
)
assert out_string == "\n".join(
[" a b", " --- ---", " 1 2", "", " : Caption [@a]"]
)
def test_html_to_latex_cite():
in_string = [
'surrounding <cite data-cite="cite_key">text</cite> text' "",
'<cite data-cite="cite_key2"></cite>',
]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "latex"
)
assert out_string == "\n".join(
[r"surrounding \cite{cite_key} text \cite{cite_key2}"]
)
def test_html_to_rst_cite():
in_string = [
'surrounding <cite data-cite="cite_key">text</cite> text',
"",
'<cite data-cite="cite_key2"></cite>',
]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "rst"
)
assert out_string == "\n".join(
["surrounding :cite:`cite_key` text", "", ":cite:`cite_key2`"]
)
def test_citations_latex():
in_string = [
"@label1",
"",
"[@label1;@label2]",
"",
"[an internal link](#label2)" "",
"[an external link](http://something.org)",
"",
"",
]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "latex"
)
assert out_string.strip() == "\n".join(
[
"\\cite{label1}",
"",
"\\cite{label1,label2}",
"",
"\\cref{label2} \\href{http://something.org}{an external link}",
"",
"\\begin{figure}",
"\\centering",
"\\includegraphics{path/to/image.png}",
"\\caption{a citation \\cite{label}}",
"\\end{figure}",
]
)
def test_citations_rst():
in_string = [
"@label1",
"",
"[an internal link](#label2)" "",
"[an external link](http://something.org)",
"",
"",
]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "rst"
)
assert out_string.strip() == "\n".join(
[
":cite:`label1`",
"",
":ref:`label2` `an external link <http://something.org>`__",
"",
".. figure:: path/to/image.png",
" :alt: a citation :cite:`label`",
"",
" a citation :cite:`label`",
]
)
def test_rst_cite_to_rst():
in_string = ["a :ref:`label` b"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "rst"
)
assert out_string.strip() == "\n".join(["a :ref:`label` b"])
def test_rst_cite_to_latex():
in_string = ["a :ref:`label` b"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_cite_elements.main], "latex"
)
assert out_string.strip() == "\n".join([r"a \ref{label} b"])
def test_rst_known_role_to_rst():
in_string = ["a :py:func:`label` b"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_raw_spans.main], "rst"
)
assert out_string.strip() == "\n".join(["a :py:func:`label` b"])
def test_rst_directive_to_rst():
in_string = [".. versionchanged:: v0.8.3", "", " abc", "", " xyz"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_raw_spans.main], "rst"
)
assert out_string.strip() == "\n".join(
[".. versionchanged:: v0.8.3", "", " abc", "", " xyz"]
)
def test_rst_directive_to_latex():
in_string = [".. versionchanged:: v0.8.3", "", " abc", "", " xyz"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_raw_spans.main], "latex"
)
assert out_string.strip() == "\n".join(
[
"\\begin{mdframed}[frametitle={versionchanged},frametitlerule=true]",
"\\mdfsubtitle{v0.8.3}",
"",
"abc",
"",
"xyz",
"",
"\\end{mdframed}",
]
)
def test_rst_directive_with_options_to_rst():
in_string = [
".. dir::",
" :maxdepth: 2",
" :numbered:",
"",
" abc",
" xyz",
"",
" new paragraph",
"",
]
out_string = apply_filter(
in_string, [prepare_raw.main, format_raw_spans.main], "rst"
)
assert out_string == "\n".join(
[
".. dir::",
" :maxdepth: 2",
" :numbered:",
"",
" abc",
" xyz",
"",
" new paragraph",
"",
"",
]
)
def test_rst_label_to_rst():
in_string = [".. _alabel:"]
out_string = apply_filter(
in_string, [prepare_raw.main, format_raw_spans.main], "rst"
)
assert out_string.strip() == "\n".join([".. _alabel:"])
|
|
import gevent
import gevent.monkey
import gevent.socket
gevent.monkey.patch_all()
import subprocess
import fcntl
import os
import errno
import sys
import urllib
import distutils.spawn
import gevent.queue
import gevent.event
import ujson
import flask
import flask.ext.login
import chef
import logging
app = flask.Flask('chefdash')
app.config.update(
DEBUG = True,
SECRET_KEY = 'dev',
LOG_FILE = None,
LOG_FORMAT = '%(asctime)s %(name)s\t%(levelname)s\t%(message)s',
LOG_LEVEL = logging.INFO,
ENABLE_BOOTSTRAP = True,
)
BOOTSTRAP_ENV = '__chefdash_bootstrap__'
if distutils.spawn.find_executable('knife'):
bootstrap_enabled = True
else:
bootstrap_enabled = False
login_manager = flask.ext.login.LoginManager(app)
api = chef.autoconfigure()
def handler(environ, start_response):
handled = False
path = environ['PATH_INFO']
if path.startswith('/feed/'):
ws = environ.get('wsgi.websocket')
if ws:
handle_websocket(ws, path[6:])
handled = True
if not handled:
return app(environ, start_response)
websockets = {}
def handle_websocket(ws, env):
if not env:
env = BOOTSTRAP_ENV
s = websockets.get(env)
if s is None:
s = websockets[env] = []
s.append(ws)
while True:
buf = ws.receive()
if buf is None:
break
if ws in s:
s.remove(ws)
@app.route('/feed/<env>')
@flask.ext.login.login_required
def feed(env = None):
flask.abort(400)
greenlets = {}
def processes(env = None, node = None, only_executing = True):
env_greenlets = greenlets.get(env)
if env_greenlets is None:
return []
elif node is None:
result = []
for greenlet in env_greenlets.itervalues():
if not only_executing or not greenlet.ready():
result.append(greenlet)
return result
else:
greenlet = env_greenlets.get(node)
if greenlet is None or (only_executing and greenlet.ready()):
return []
else:
return [greenlet,]
def broadcast(env, packet):
sockets = websockets.get(env)
if sockets is not None:
packet = ujson.encode(packet)
for ws in list(sockets):
if ws.socket is not None:
try:
ws.send(packet)
except gevent.socket.error:
if ws in sockets:
sockets.remove(ws)
@app.route('/converge/<env>', methods = ['POST'])
@app.route('/converge/<env>/<node>', methods = ['POST'])
@flask.ext.login.login_required
def converge(env, node = None):
if env == BOOTSTRAP_ENV:
flask.abort(400)
if len(processes(env, node, only_executing = True)) > 0:
return ujson.encode({ 'status': 'converging' })
if node is not None:
nodes = { node: chef.Node(node, api = api), }
else:
nodes = { row.object.name: row.object for row in chef.Search('node', 'chef_environment:' + env, api = api) }
get_command = lambda n: ['ssh', '-o', 'StrictHostKeyChecking=no', n['ipaddress'], 'sudo', 'chef-client']
return _run(
nodes,
get_command,
env = env,
progress_status = 'converging',
)
@app.route('/bootstrap')
@flask.ext.login.login_required
def bootstrap_list():
if not bootstrap_enabled or not app.config.get('ENABLE_BOOTSTRAP'):
flask.abort(400)
nodes = greenlets.get(BOOTSTRAP_ENV, {}).keys()
status, output, executing = get_env_status(BOOTSTRAP_ENV, nodes, progress_status = 'bootstrapping')
return flask.render_template(
'bootstrap.html',
status = status,
output = output,
nodes = nodes,
)
@app.route('/bootstrap/<ip>', methods = ['POST'])
@flask.ext.login.login_required
def bootstrap(ip):
if not bootstrap_enabled or not app.config.get('ENABLE_BOOTSTRAP'):
flask.abort(400)
if len(processes(BOOTSTRAP_ENV, ip, only_executing = True)) > 0:
return ujson.encode({ 'status': 'bootstrapping' })
if len(chef.Search('node', 'ipaddress:%s OR fqdn:%s OR hostname:%s' % (ip, ip, ip), api = api)) > 0:
broadcast(BOOTSTRAP_ENV, { 'host': ip, 'status': 'ready', 'data': 'A node already exists at this address.\n' })
return ujson.encode({ 'status': 'ready' })
get_command = lambda ip: ['knife', 'bootstrap', '--sudo', ip]
return _run(
{ ip: ip, },
get_command,
env = BOOTSTRAP_ENV,
progress_status = 'bootstrapping',
)
def _run(nodes, get_command, env, progress_status):
# nodes: dictionary of node names mapped to node objects
# Node objects can be anything. They're just passed to the get_command function
# get_command: function that takes a node object and returns a command to execute via Popen
# env: name of the environment
# progress_status: the status to broadcast to the websockets when the command is executing
env_greenlets = greenlets.get(env)
if env_greenlets is None:
greenlets[env] = env_greenlets = { }
for node in nodes:
try:
del env_greenlets[node]
except KeyError:
pass
for hostname in nodes:
node_object = nodes[hostname]
p = subprocess.Popen(get_command(node_object), shell = False, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
p.chunks = [] # Chunks of stdout data
fcntl.fcntl(p.stdout, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking
def read(host, process):
broadcast(env, { 'host': host, 'status': progress_status })
while True:
chunk = None
try:
chunk = process.stdout.read(4096)
if not chunk:
break
except IOError, e:
chunk = None
if e[0] != errno.EAGAIN:
raise
sys.exc_clear()
if chunk:
process.chunks.append(chunk)
broadcast(env, { 'host': host, 'data': chunk, })
gevent.socket.wait_read(process.stdout.fileno())
process.stdout.close()
process.wait()
errors = process.stderr.read()
process.chunks.append(errors)
broadcast(env, { 'host': host, 'status': 'ready' if process.returncode == 0 else 'error', 'data': errors })
if len(processes(env, only_executing = True)) <= 1:
broadcast(env, { 'status': 'ready' })
return process.returncode
greenlet = gevent.spawn(read, host = hostname, process = p)
greenlet.process = p
env_greenlets[hostname] = greenlet
broadcast(env, { 'status': progress_status })
return ujson.encode({ 'status': progress_status if len(nodes) > 0 else 'ready' })
@app.route('/')
@flask.ext.login.login_required
def index():
envs = chef.Environment.list(api = api)
return flask.render_template(
'index.html',
envs = envs.itervalues(),
bootstrap_enabled = bootstrap_enabled and app.config.get('ENABLE_BOOTSTRAP'),
)
def get_env_status(env, nodes, progress_status):
status = {}
output = {}
executing = False
env_greenlets = greenlets.get(env)
if env_greenlets is None:
env_greenlets = greenlets[env] = { }
for node in nodes:
greenlet = env_greenlets.get(node)
if greenlet is None:
status[node] = 'ready'
output[node] = ''
else:
s = progress_status
if greenlet.ready():
s = 'ready' if greenlet.value == 0 else 'error'
else:
executing = True
status[node] = s
output[node] = ''.join(greenlet.process.chunks)
return status, output, executing
@app.route('/env/<env>')
@flask.ext.login.login_required
def env(env):
if env == BOOTSTRAP_ENV:
flask.abort(400)
if len(chef.Search('environment', 'name:' + env, api = api)) == 0:
flask.abort(404)
nodes = list(chef.Search('node', 'chef_environment:%s' % env, api = api))
nodes.sort(key = lambda n: n.object.name)
status, output, converging = get_env_status(env, (n.object.name for n in nodes), progress_status = 'converging')
return flask.render_template(
'env.html',
env = env,
converging = converging,
status = status,
output = output,
nodes = nodes,
)
@login_manager.user_loader
class User(object):
def __init__(self, id):
self.id = id
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
#def get_auth_token(self):
#return flask.ext.login.make_secure_token(self.id)
login_manager.login_view = 'login'
@app.template_filter('urlquote')
def urlquote(url):
return urllib.quote(url, '')
@app.route('/login', methods = ['GET', 'POST'])
def login():
request = flask.request
if flask.ext.login.current_user.is_authenticated():
return flask.redirect(request.args.get('next') or flask.url_for('index'))
username = request.form.get('username')
remember = request.form.get('remember') == 'on'
if username is not None:
password = request.form.get('password')
auth_result = ujson.decode(api.request('POST', '/authenticate_user', data = ujson.encode({ 'name': username, 'password': password })))
if auth_result.get('name') == username and auth_result.get('verified'):
flask.ext.login.login_user(User(username), remember = remember)
return flask.redirect(request.args.get('next') or flask.url_for('index'))
else:
return flask.render_template('login.html',
username = username,
error = True,
remember = remember,
next = request.args.get('next'),
)
return flask.render_template('login.html',
username = None,
error = False,
remember = remember,
next = request.args.get('next'),
)
@app.route('/logout')
def logout():
flask.ext.login.logout_user()
return flask.redirect(flask.url_for('login'))
@app.route('/favicon.ico')
def favicon():
return flask.send_from_directory(
os.path.join(app.root_path, 'static'),
'favicon.ico',
mimetype = 'image/vnd.microsoft.icon',
)
|
|
import datetime
import json
import os
import pathlib
import re
import warnings
from collections import OrderedDict
from urllib.parse import (
parse_qs,
quote,
unquote,
urlencode,
urlparse,
urlunparse,
)
from urllib.request import pathname2url, url2pathname
import requests
# Third-Party
import yaml
from appdirs import user_cache_dir, user_data_dir
def get_bool_from_env(var_name: str):
return os.getenv(var_name, '').lower() == 'true'
APP_NAME = "Quilt"
APP_AUTHOR = "QuiltData"
BASE_DIR = user_data_dir(APP_NAME, APP_AUTHOR)
BASE_PATH = pathlib.Path(BASE_DIR)
CACHE_PATH = pathlib.Path(user_cache_dir(APP_NAME, APP_AUTHOR)) / "v0"
TEMPFILE_DIR_PATH = BASE_PATH / "tempfiles"
CONFIG_PATH = BASE_PATH / 'config.yml'
OPEN_DATA_URL = "https://open.quiltdata.com"
PACKAGE_NAME_FORMAT = r"([\w-]+/[\w-]+)(?:/(.+))?$"
DISABLE_TQDM = get_bool_from_env('QUILT_MINIMIZE_STDOUT')
PACKAGE_UPDATE_POLICY = {'incoming', 'existing'}
IS_CACHE_ENABLED = not get_bool_from_env('QUILT_DISABLE_CACHE')
# CONFIG_TEMPLATE
# Must contain every permitted config key, as well as their default values (which can be 'null'/None).
# Comments are retained and added to local config, unless overridden by autoconfig via `api.config(<url>)`
CONFIG_TEMPLATE = """
# Quilt3 configuration file
# navigator_url: <url string, default: null>
#
# Used for autoconfiguration
# navigator_url: https://example.com
navigator_url:
# default_local_registry: <url string, default: local appdirs>
# default target registry for operations like install and build
default_local_registry: "{}"
# default_remote_registry: <url string, default: null>
# default target for operations like push and browse
default_remote_registry:
# default_install_location: <url string, default: null>
# default filesystem target for the install operation
default_install_location:
# Identity service URL
registryUrl:
# Disable anonymous usage metrics
telemetry_disabled: false
# S3 Proxy
s3Proxy:
# API Gateway endpoint (e.g., for search)
apiGatewayEndpoint:
# Binary API Gateway endpoint (e.g., for preview)
binaryApiGatewayEndpoint:
default_registry_version: 1
""".format(BASE_PATH.as_uri() + '/packages')
def get_pos_int_from_env(var_name):
val = os.getenv(var_name)
if val:
try:
val = int(val)
except ValueError:
val = None
if val is None or val <= 0:
raise ValueError(f'{var_name} must be a positive integer')
return val
class QuiltException(Exception):
def __init__(self, message, **kwargs):
# We use NewError("Prefix: " + str(error)) a lot.
# To be consistent across Python 2.7 and 3.x:
# 1) This `super` call must exist, or 2.7 will have no text for str(error)
# 2) This `super` call must have only one argument (the message) or str(error) will be a repr of args
super().__init__(message)
self.message = message
for k, v in kwargs.items():
setattr(self, k, v)
class RemovedInQuilt4Warning(FutureWarning):
pass
class URLParseError(ValueError):
pass
class PhysicalKey:
__slots__ = ('bucket', 'path', 'version_id')
def __init__(self, bucket, path, version_id):
"""
For internal use only; call from_path or from_url instead.
"""
assert bucket is None or isinstance(bucket, str)
assert isinstance(path, str)
assert version_id is None or isinstance(version_id, str)
if bucket is None:
assert path is not None, "Local keys must have a path"
assert version_id is None, "Local keys cannot have a version ID"
if os.name == 'nt':
assert '\\' not in path, "Paths must use / as a separator"
else:
assert not path.startswith('/'), "S3 paths must not start with '/'"
self.bucket = bucket
self.path = path
self.version_id = version_id
@classmethod
def from_url(cls, url):
parsed = urlparse(url)
if parsed.scheme == 's3':
if not parsed.netloc:
raise URLParseError("Missing bucket")
bucket = parsed.netloc
assert not parsed.path or parsed.path.startswith('/')
path = unquote(parsed.path)[1:]
# Parse the version ID the way the Java SDK does:
# https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-s3/src/main/java/com/amazonaws/services/s3/AmazonS3URI.java#L192
query = parse_qs(parsed.query)
version_id = query.pop('versionId', [None])[0]
if query:
raise URLParseError(f"Unexpected S3 query string: {parsed.query!r}")
return cls(bucket, path, version_id)
elif parsed.scheme == 'file':
if parsed.netloc not in ('', 'localhost'):
raise URLParseError("Unexpected hostname")
if not parsed.path:
raise URLParseError("Missing path")
if not parsed.path.startswith('/'):
raise URLParseError("Relative paths are not allowed")
if parsed.query:
raise URLParseError("Unexpected query")
path = url2pathname(parsed.path)
if parsed.path.endswith('/') and not path.endswith(os.path.sep):
# On Windows, url2pathname loses the trailing `/`.
path += os.path.sep
return cls.from_path(path)
else:
raise URLParseError(f"Unexpected scheme: {parsed.scheme!r}")
@classmethod
def from_path(cls, path):
path = os.fspath(path)
new_path = os.path.realpath(path)
# Use '/' as the path separator.
if os.path.sep != '/':
new_path = new_path.replace(os.path.sep, '/')
# Add back a trailing '/' if the original path has it.
if (path.endswith(os.path.sep) or
(os.path.altsep is not None and path.endswith(os.path.altsep))):
new_path += '/'
return cls(None, new_path, None)
def is_local(self):
return self.bucket is None
def join(self, rel_path):
if self.version_id is not None:
raise ValueError('Cannot append paths to URLs with a version ID')
if os.name == 'nt' and '\\' in rel_path:
raise ValueError("Paths must use / as a separator")
if self.path:
new_path = self.path.rstrip('/') + '/' + rel_path.lstrip('/')
else:
new_path = rel_path.lstrip('/')
return PhysicalKey(self.bucket, new_path, None)
def basename(self):
return self.path.rsplit('/', 1)[-1]
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.bucket == other.bucket and
self.path == other.path and
self.version_id == other.version_id
)
def __repr__(self):
return f'{self.__class__.__name__}({self.bucket!r}, {self.path!r}, {self.version_id!r})'
def __str__(self):
if self.bucket is None:
return urlunparse(('file', '', pathname2url(self.path.replace('/', os.path.sep)), None, None, None))
else:
if self.version_id is None:
params = {}
else:
params = {'versionId': self.version_id}
return urlunparse(('s3', self.bucket, quote(self.path), None, urlencode(params), None))
def fix_url(url):
"""Convert non-URL paths to file:// URLs"""
# If it has a scheme, we assume it's a URL.
# On Windows, we ignore schemes that look like drive letters, e.g. C:/users/foo
if not url:
raise ValueError("Empty URL")
url = str(url)
parsed = urlparse(url)
if parsed.scheme and not os.path.splitdrive(url)[0]:
return url
# `expanduser()` expands any leading "~" or "~user" path components, as a user convenience
# `resolve()` _tries_ to make the URI absolute - but doesn't guarantee anything.
# In particular, on Windows, non-existent files won't be resolved.
# `absolute()` makes the URI absolute, though it can still contain '..'
fixed_url = pathlib.Path(url).expanduser().resolve().absolute().as_uri()
# pathlib likes to remove trailing slashes, so add it back if needed.
if url[-1:] in (os.sep, os.altsep) and not fixed_url.endswith('/'):
fixed_url += '/'
return fixed_url
def extract_file_extension(file_path_or_url):
"""
Extract the file extension if it exists.
Args:
file_path_or_url: The path to the file. Type can can be anything that pathlib.Path understands.
Returns:
File extension without the period, i.e. ("txt" not ".txt"). None if the path does not have an extension.
"""
p = pathlib.Path(file_path_or_url)
if len(p.suffix) > 0:
return p.suffix[1:]
else:
return None
def read_yaml(yaml_stream):
try:
if isinstance(yaml_stream, pathlib.Path):
with yaml_stream.open(mode='r') as stream:
return yaml.safe_load(stream)
return yaml.safe_load(yaml_stream)
except yaml.YAMLError as error:
raise QuiltException(str(error), original_error=error)
def write_yaml(data, yaml_path, keep_backup=False):
"""Write `data` to `yaml_path`
:param data: Any yaml-serializable data
:param yaml_path: Destination. Can be a string or pathlib path.
:param keep_backup: If set, a timestamped backup will be kept in the same dir.
"""
path = pathlib.Path(yaml_path)
now = str(datetime.datetime.now())
# XXX unicode colon for Windows/NTFS -- looks prettier, but could be confusing. We could use '_' instead.
if os.name == 'nt':
now = now.replace(':', '\ua789')
backup_path = path.with_name(path.name + '.backup.' + now)
try:
if path.exists():
path.rename(backup_path)
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open('w') as config_file:
yaml.dump(data, config_file)
except Exception: # intentionally wide catch -- reraised immediately.
if backup_path.exists():
if path.exists():
path.unlink()
backup_path.rename(path)
raise
if backup_path.exists() and not keep_backup:
backup_path.unlink()
def validate_url(url):
"""A URL must have scheme and host, at minimum."""
parsed_url = urlparse(url)
# require scheme and host at minimum, like config_path'http://foo'
if not all((parsed_url.scheme, parsed_url.netloc)):
raise QuiltException("Invalid URL -- Requires at least scheme and host: {}".format(url))
try:
parsed_url.port
except ValueError:
raise QuiltException("Invalid URL -- Port must be a number: {}".format(url))
# Although displaying the config may seem not to warrant a class, it's pretty important
# for good UX. A lot of points were considered in making this -- retaining order,
# user's usage in an interpreted environment like Jupyter, and keeping the displayed
# information concise. Given the limitations of the other options, making a class with
# custom repr panned out to be the best (and shortest) option.
class QuiltConfig(OrderedDict):
def __init__(self, filepath, *args, **kwargs):
self.filepath = pathlib.Path(filepath)
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
# Per chat in #engineering 4-5-19, strip navigator_url of trailing slash.
# Ideally, we should do that kind of thing in one cohesive spot.
# This is a good spot.
if key == 'navigator_url' and value:
if not isinstance(value, str):
raise ValueError("Expected a string for config key {!r}, but got {!r}"
.format(key, value))
value = value.strip().rstrip('/')
# Similar activity, moved from api.config() to here.
if isinstance(key, str) and key.endswith('_url'):
if value:
validate_url(value)
super().__setitem__(key, value)
# TODO: Make an _html_repr_ for nicer Notebook display
def __repr__(self):
return "<{} at {!r} {}>".format(type(self).__name__, str(self.filepath), json.dumps(self, indent=4))
def parse_sub_package_name(name):
"""
Extract package name and optional sub-package path as tuple.
"""
m = re.match(PACKAGE_NAME_FORMAT, name)
if m:
return tuple(m.groups())
def validate_package_name(name):
""" Verify that a package name is two alphanumeric strings separated by a slash."""
parts = parse_sub_package_name(name)
if not parts or parts[1]:
raise QuiltException(f"Invalid package name: {name}.")
def configure_from_url(catalog_url):
""" Read configuration settings from a Quilt catalog """
config_template = read_yaml(CONFIG_TEMPLATE)
# Clean up and validate catalog url
catalog_url = catalog_url.rstrip('/')
validate_url(catalog_url)
# Get the new config
config_url = catalog_url + '/config.json'
response = requests.get(config_url)
if not response.ok:
message = "An HTTP Error ({code}) occurred: {reason}"
raise QuiltException(
message.format(code=response.status_code, reason=response.reason),
response=response
)
# QuiltConfig may perform some validation and value scrubbing.
new_config = QuiltConfig('', response.json())
# 'navigator_url' needs to be renamed, the term is outdated.
if not new_config.get('navigator_url'):
new_config['navigator_url'] = catalog_url
# Use our template + their configured values, keeping our comments.
for key, value in new_config.items():
if key not in config_template:
continue
config_template[key] = value
write_yaml(config_template, CONFIG_PATH, keep_backup=True)
return config_template
def config_exists():
"""
Returns True if a config file (config.yml) is installed.
"""
return CONFIG_PATH.exists()
def user_is_configured_to_custom_stack():
"""Look at the users stack to see if they have configured to their own stack."""
configured_nav_url = get_from_config("navigator_url")
return configured_nav_url is not None
def configure_from_default():
"""
Try to configure to the default (public) Quilt stack.
If reading from the public stack fails, warn the user
and save an empty template.
"""
try:
local_config = configure_from_url(OPEN_DATA_URL)
except requests.exceptions.ConnectionError:
msg = f"Failed to connect to {OPEN_DATA_URL}."
msg += "Some features will not work without a"
msg += "valid configuration."
warnings.warn(msg)
config_template = read_yaml(CONFIG_TEMPLATE)
write_yaml(config_template, CONFIG_PATH, keep_backup=True)
local_config = config_template
return local_config
def load_config():
"""
Read the local config using defaults from CONFIG_TEMPLATE.
"""
local_config = read_yaml(CONFIG_TEMPLATE)
if CONFIG_PATH.exists():
local_config.update(read_yaml(CONFIG_PATH))
return local_config
def get_from_config(key):
return load_config().get(key)
def get_install_location():
loc = get_from_config('default_install_location')
if loc is None:
loc = get_from_config('default_local_registry').rstrip('/')
return loc
def set_config_value(key, value):
# Use local configuration (or defaults)
local_config = load_config()
local_config[key] = value
write_yaml(local_config, CONFIG_PATH)
def quiltignore_filter(paths, ignore, url_scheme):
"""Given a list of paths, filter out the paths which are captured by the
given ignore rules.
Args:
paths (list): a list or iterable of paths
ignore (path): a path to the file defining ignore rules, in Unix shell
style wildcard format
url_scheme (str): the URL scheme, only the "file" scheme is currently
supported
"""
ignore_rules = ignore.read_text('utf-8').split("\n")
ignore_rules = ['*/' + rule for rule in ignore_rules if rule]
if url_scheme == 'file':
from fnmatch import fnmatch
files, dirs = set(), set()
for path in paths:
if path.is_file():
files.add(path)
else:
dirs.add(path)
filtered_dirs = dirs.copy()
for ignore_rule in ignore_rules:
for pkg_dir in filtered_dirs.copy():
# copy git behavior --- git matches paths and directories equivalently.
# e.g. both foo and foo/ will match the ignore rule "foo"
# but only foo/ will match the ignore rule "foo/"
if fnmatch(pkg_dir.as_posix() + "/", ignore_rule) or fnmatch(pkg_dir.as_posix(), ignore_rule):
files = set(n for n in files if pkg_dir not in n.parents)
dirs = dirs - {pkg_dir}
files = set(n for n in files if not fnmatch(n, ignore_rule))
return files.union(dirs)
else:
raise NotImplementedError
def validate_key(key):
"""
Verify that a file path or S3 path does not contain any '.' or '..' separators or files.
"""
if key is None or key == '':
raise QuiltException(
f"Invalid key {key!r}. A package entry key cannot be empty."
)
for part in key.split('/'):
if part in ('', '.', '..'):
raise QuiltException(
f"Invalid key {key!r}. "
f"A package entry key cannot contain a file or folder named '.' or '..' in its path."
)
def catalog_s3_url(catalog_url, s3_url):
"""
Generate a URL to the Quilt catalog page for an object in S3
"""
if s3_url is None:
return catalog_url
pk = PhysicalKey.from_url(s3_url)
if pk.is_local():
raise QuiltException("Not an S3 URL")
url = f"{catalog_url}/b/{quote(pk.bucket)}"
if pk.path:
url += f"/tree/{quote(pk.path)}"
# Ignore version_id if path is empty (e.g., s3://<bucket>)
if pk.version_id is not None:
params = {'version': pk.version_id}
url += f"?{urlencode(params)}"
return url
def catalog_package_url(catalog_url, bucket, package_name, package_timestamp="latest", tree=True):
"""
Generate a URL to the Quilt catalog page of a package. By default will go to the latest version of the package,
but the user can pass in the appropriate timestamp to go to a different version.
Disabling tree by passing `tree=False` will generate a package URL without tree path.
Note: There is currently no good way to generate the URL given a specific tophash
"""
assert bucket is not None, "The bucket parameter must not be None"
assert package_name is not None, "The package_name parameter must not be None"
validate_package_name(package_name)
package_url = f"{catalog_url}/b/{bucket}/packages/{package_name}"
if tree:
package_url = package_url + f"/tree/{package_timestamp}"
return package_url
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for concept in orm.DataConcept.objects.all():
# Use the truthiness of strings to set the viewable flag. If the
# formatter names is not null or empty, then viewable will be set
# to true, otherwise, it will be false.
concept.viewable = bool(concept.formatter_name)
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'avocado.datacategory': {
'Meta': {'ordering': "('-parent__id', 'order', 'name')", 'object_name': 'DataCategory'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['avocado.DataCategory']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'avocado.dataconcept': {
'Meta': {'ordering': "('order',)", 'object_name': 'DataConcept'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.DataCategory']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'concepts'", 'symmetrical': 'False', 'through': "orm['avocado.DataConceptField']", 'to': "orm['avocado.DataField']"}),
'formatter_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'concepts+'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'queryable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'concepts+'", 'blank': 'True', 'to': "orm['sites.Site']"}),
'sortable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'avocado.dataconceptfield': {
'Meta': {'ordering': "('order',)", 'object_name': 'DataConceptField'},
'concept': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'concept_fields'", 'to': "orm['avocado.DataConcept']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'concept_fields'", 'to': "orm['avocado.DataField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'})
},
'avocado.datacontext': {
'Meta': {'object_name': 'DataContext'},
'accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 29, 0, 0)'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'_count'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datacontext+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'avocado.datafield': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_name', 'model_name', 'field_name'),)", 'object_name': 'DataField'},
'app_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.DataCategory']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data_version': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enumerable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'fields+'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'fields+'", 'blank': 'True', 'to': "orm['sites.Site']"}),
'translator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'unit_plural': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
'avocado.dataquery': {
'Meta': {'object_name': 'DataQuery'},
'accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 29, 0, 0)'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'context_json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'shared_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shareddataquery+'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dataquery+'", 'null': 'True', 'to': "orm['auth.User']"}),
'view_json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'})
},
'avocado.dataview': {
'Meta': {'object_name': 'DataView'},
'accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 29, 0, 0)'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'_count'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dataview+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'avocado.log': {
'Meta': {'object_name': 'Log'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['avocado']
symmetrical = True
|
|
# -*- coding: utf-8 -*-
"""
tests.test_jwt
~~~~~~~~~~~~~~
Flask-JWT tests
"""
import time
from datetime import datetime, timedelta
import jwt as _jwt
from flask import Flask, json, jsonify
import flask_jwt
def post_json(client, url, data):
data = json.dumps(data)
resp = client.post(url, headers={'Content-Type': 'application/json'}, data=data)
return resp, json.loads(resp.data)
def assert_error_response(r, code, msg, desc):
assert r.status_code == code
jdata = json.loads(r.data)
assert jdata['status_code'] == code
assert jdata['error'] == msg
assert jdata['description'] == desc
def test_initialize():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'super-secret'
jwt = flask_jwt.JWT(app, lambda: None, lambda: None)
assert isinstance(jwt, flask_jwt.JWT)
assert len(app.url_map._rules) == 2
def test_adds_auth_endpoint():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'super-secret'
app.config['JWT_AUTH_URL_RULE'] = '/auth'
app.config['JWT_AUTH_ENDPOINT'] = 'jwt_auth'
flask_jwt.JWT(app, lambda: None, lambda: None)
rules = [str(r) for r in app.url_map._rules]
assert '/auth' in rules
def test_auth_endpoint_with_valid_request(client, user):
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
assert resp.status_code == 200
assert 'access_token' in jdata
def test_custom_auth_endpoint_with_valid_request(app, client, user):
app.config['JWT_AUTH_USERNAME_KEY'] = 'email'
app.config['JWT_AUTH_PASSWORD_KEY'] = 'pass'
resp, jdata = post_json(
client,
'/auth',
{'email': user.username, 'pass': user.password}
)
assert resp.status_code == 200
assert 'access_token' in jdata
def test_auth_endpoint_with_invalid_request(client, user):
# Invalid request (no password)
resp, jdata = post_json(client, '/auth', {'username': user.username})
assert resp.status_code == 401
assert 'error' in jdata
assert jdata['error'] == 'Bad Request'
assert 'description' in jdata
assert jdata['description'] == 'Invalid credentials'
assert 'status_code' in jdata
assert jdata['status_code'] == 401
def test_auth_endpoint_with_invalid_credentials(client):
resp, jdata = post_json(
client, '/auth', {'username': 'bogus', 'password': 'bogus'})
assert resp.status_code == 401
assert 'error' in jdata
assert jdata['error'] == 'Bad Request'
assert 'description' in jdata
assert jdata['description'] == 'Invalid credentials'
assert 'status_code' in jdata
assert jdata['status_code'] == 401
def test_jwt_required_decorator_with_valid_token(app, client, user):
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
token = jdata['access_token']
resp = client.get('/protected', headers={'Authorization': 'JWT ' + token})
assert resp.status_code == 200
assert resp.data == b'success'
def test_jwt_required_decorator_with_valid_request_current_identity(app, client, user):
with client as c:
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
token = jdata['access_token']
c.get(
'/protected',
headers={'authorization': 'JWT ' + token})
assert flask_jwt.current_identity
def test_jwt_required_decorator_with_invalid_request_current_identity(app, client):
with client as c:
c.get('/protected', headers={'authorization': 'JWT bogus'})
assert flask_jwt.current_identity._get_current_object() is None
def test_jwt_required_decorator_with_invalid_authorization_headers(app, client):
# Missing authorization header
r = client.get('/protected')
assert_error_response(
r, 401, 'Authorization Required', 'Request does not contain an access token')
assert r.headers['WWW-Authenticate'] == 'JWT realm="Login Required"'
# Not a JWT auth header prefix
r = client.get('/protected', headers={'authorization': 'Bogus xxx'})
assert_error_response(
r, 401, 'Invalid JWT header', 'Unsupported authorization type')
# Missing token
r = client.get('/protected', headers={'authorization': 'JWT'})
assert_error_response(
r, 401, 'Invalid JWT header', 'Token missing')
# Token with spaces
r = client.get('/protected', headers={'authorization': 'JWT xxx xxx'})
assert_error_response(
r, 401, 'Invalid JWT header', 'Token contains spaces')
def test_jwt_required_decorator_with_invalid_jwt_tokens(client, user, app):
app.config['JWT_LEEWAY'] = timedelta(seconds=0)
app.config['JWT_EXPIRATION_DELTA'] = timedelta(milliseconds=200)
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
token = jdata['access_token']
# Undecipherable
r = client.get('/protected', headers={'authorization': 'JWT %sX' % token})
assert_error_response(r, 401, 'Invalid token', 'Signature verification failed')
# Expired
time.sleep(1.5)
r = client.get('/protected', headers={'authorization': 'JWT ' + token})
assert_error_response(r, 401, 'Invalid token', 'Signature has expired')
def test_jwt_required_decorator_with_missing_user(client, jwt, user):
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
token = jdata['access_token']
@jwt.identity_handler
def load_user(payload):
return None
r = client.get('/protected', headers={'authorization': 'JWT %s' % token})
assert_error_response(r, 401, 'Invalid JWT', 'User does not exist')
def test_custom_error_handler(client, jwt):
@jwt.jwt_error_handler
def error_handler(e):
return "custom"
r = client.get('/protected')
assert r.data == b'custom'
def test_custom_response_handler(client, jwt, user):
@jwt.auth_response_handler
def resp_handler(access_token, identity):
return jsonify({'mytoken': access_token.decode('utf-8')})
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
assert 'mytoken' in jdata
def test_custom_encode_handler(client, jwt, user, app):
secret = app.config['JWT_SECRET_KEY']
alg = 'HS256'
@jwt.jwt_encode_handler
def encode_data(identity):
return _jwt.encode({'hello': 'world'}, secret, algorithm=alg)
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
decoded = _jwt.decode(jdata['access_token'], secret, algorithms=[alg])
assert decoded == {'hello': 'world'}
def test_custom_decode_handler(client, user, jwt):
# The following function should receive the decode return value
@jwt.identity_handler
def load_user(payload):
assert payload == {'user_id': user.id}
@jwt.jwt_decode_handler
def decode_data(token):
return {'user_id': user.id}
with client as c:
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
token = jdata['access_token']
c.get('/protected', headers={'authorization': 'JWT ' + token})
def test_custom_payload_handler(client, jwt, user):
@jwt.identity_handler
def load_user(payload):
if payload['id'] == user.id:
return user
@jwt.jwt_payload_handler
def make_payload(u):
iat = datetime.utcnow()
exp = iat + timedelta(seconds=60)
nbf = iat + timedelta(seconds=0)
return {'iat': iat, 'exp': exp, 'nbf': nbf, 'id': u.id}
with client as c:
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
token = jdata['access_token']
c.get('/protected', headers={'authorization': 'JWT ' + token})
assert flask_jwt.current_identity == user
def test_custom_auth_header(app, client, user):
app.config['JWT_AUTH_HEADER_PREFIX'] = 'Bearer'
with client as c:
resp, jdata = post_json(
client, '/auth', {'username': user.username, 'password': user.password})
token = jdata['access_token']
# Custom Bearer auth header prefix
resp = c.get('/protected', headers={'authorization': 'Bearer ' + token})
assert resp.status_code == 200
assert resp.data == b'success'
# Not custom Bearer auth header prefix
resp = c.get('/protected', headers={'authorization': 'JWT ' + token})
assert_error_response(resp, 401, 'Invalid JWT header', 'Unsupported authorization type')
|
|
import datetime
from unittest import mock
from django.contrib.postgres.indexes import OpClass
from django.db import (
IntegrityError, NotSupportedError, connection, transaction,
)
from django.db.models import (
CheckConstraint, Deferrable, F, Func, IntegerField, Q, UniqueConstraint,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Cast, Left, Lower
from django.test import modify_settings, skipUnlessDBFeature
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
HotelReservation, IntegerArrayModel, RangesModel, Room, Scene,
)
try:
from psycopg2.extras import DateRange, NumericRange
from django.contrib.postgres.constraints import ExclusionConstraint
from django.contrib.postgres.fields import (
DateTimeRangeField, RangeBoundary, RangeOperators,
)
except ImportError:
pass
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
class SchemaTests(PostgreSQLTestCase):
get_opclass_query = '''
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
'''
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_check_constraint_range_value(self):
constraint_name = 'ints_between'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = CheckConstraint(
check=Q(ints__contained_by=NumericRange(10, 30)),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(20, 50))
RangesModel.objects.create(ints=(10, 30))
def test_check_constraint_daterange_contains(self):
constraint_name = 'dates_contains'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = CheckConstraint(
check=Q(dates__contains=F('dates_inner')),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
date_1 = datetime.date(2016, 1, 1)
date_2 = datetime.date(2016, 1, 4)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2.replace(day=5)),
)
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2),
)
def test_check_constraint_datetimerange_contains(self):
constraint_name = 'timestamps_contains'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = CheckConstraint(
check=Q(timestamps__contains=F('timestamps_inner')),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
datetime_1 = datetime.datetime(2016, 1, 1)
datetime_2 = datetime.datetime(2016, 1, 2, 12)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2.replace(hour=13)),
)
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2),
)
def test_opclass(self):
constraint = UniqueConstraint(
name='test_opclass',
fields=['scene'],
opclasses=['varchar_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[('varchar_pattern_ops', constraint.name)],
)
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
def test_opclass_multiple_columns(self):
constraint = UniqueConstraint(
name='test_opclass_multiple',
fields=['scene', 'setting'],
opclasses=['varchar_pattern_ops', 'text_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
expected_opclasses = (
('varchar_pattern_ops', constraint.name),
('text_pattern_ops', constraint.name),
)
self.assertCountEqual(cursor.fetchall(), expected_opclasses)
def test_opclass_partial(self):
constraint = UniqueConstraint(
name='test_opclass_partial',
fields=['scene'],
opclasses=['varchar_pattern_ops'],
condition=Q(setting__contains="Sir Bedemir's Castle"),
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[('varchar_pattern_ops', constraint.name)],
)
@skipUnlessDBFeature('supports_covering_indexes')
def test_opclass_include(self):
constraint = UniqueConstraint(
name='test_opclass_include',
fields=['scene'],
opclasses=['varchar_pattern_ops'],
include=['setting'],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[('varchar_pattern_ops', constraint.name)],
)
@skipUnlessDBFeature('supports_expression_indexes')
def test_opclass_func(self):
constraint = UniqueConstraint(
OpClass(Lower('scene'), name='text_pattern_ops'),
name='test_opclass_func',
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIs(constraints[constraint.name]['unique'], True)
self.assertIn(constraint.name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[('text_pattern_ops', constraint.name)],
)
Scene.objects.create(scene='Scene 10', setting='The dark forest of Ewing')
with self.assertRaises(IntegrityError), transaction.atomic():
Scene.objects.create(scene='ScEnE 10', setting="Sir Bedemir's Castle")
Scene.objects.create(scene='Scene 5', setting="Sir Bedemir's Castle")
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
Scene.objects.create(scene='ScEnE 10', setting="Sir Bedemir's Castle")
class ExclusionConstraintTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_invalid_condition(self):
msg = 'ExclusionConstraint.condition must be a Q instance.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='GIST',
name='exclude_invalid_condition',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
condition=F('invalid'),
)
def test_invalid_index_type(self):
msg = 'Exclusion constraints only support GiST or SP-GiST indexes.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='gin',
name='exclude_invalid_index_type',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
)
def test_invalid_expressions(self):
msg = 'The expressions must be a list of 2-tuples.'
for expressions in (['foo'], [('foo')], [('foo_1', 'foo_2', 'foo_3')]):
with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='GIST',
name='exclude_invalid_expressions',
expressions=expressions,
)
def test_empty_expressions(self):
msg = 'At least one expression is required to define an exclusion constraint.'
for empty_expressions in (None, []):
with self.subTest(empty_expressions), self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='GIST',
name='exclude_empty_expressions',
expressions=empty_expressions,
)
def test_invalid_deferrable(self):
msg = 'ExclusionConstraint.deferrable must be a Deferrable instance.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_deferrable',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
deferrable='invalid',
)
def test_deferrable_with_condition(self):
msg = 'ExclusionConstraint with conditions cannot be deferred.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_condition',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
deferrable=Deferrable.DEFERRED,
)
def test_invalid_include_type(self):
msg = 'ExclusionConstraint.include must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_include',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
include='invalid',
)
def test_invalid_include_index_type(self):
msg = 'Covering exclusion constraints only support GiST indexes.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_index_type',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
include=['cancelled'],
index_type='spgist',
)
def test_invalid_opclasses_type(self):
msg = 'ExclusionConstraint.opclasses must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_opclasses',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
opclasses='invalid',
)
def test_opclasses_and_expressions_same_length(self):
msg = (
'ExclusionConstraint.expressions and '
'ExclusionConstraint.opclasses must have the same number of '
'elements.'
)
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_expressions_opclasses_length',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
opclasses=['foo', 'bar'],
)
def test_repr(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
(F('room'), RangeOperators.EQUAL),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '&&'), (F(room), '=')] name='exclude_overlapping'>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
condition=Q(cancelled=False),
index_type='SPGiST',
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='SPGiST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"condition=(AND: ('cancelled', False))>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.IMMEDIATE,
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"deferrable=Deferrable.IMMEDIATE>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
include=['cancelled', 'room'],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"include=('cancelled', 'room')>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"opclasses=['range_ops']>",
)
def test_eq(self):
constraint_1 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
(F('room'), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
constraint_2 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
)
constraint_3 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
)
constraint_4 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
deferrable=Deferrable.DEFERRED,
)
constraint_5 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
)
constraint_6 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
include=['cancelled'],
)
constraint_7 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
include=['cancelled'],
)
constraint_8 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
include=['cancelled'],
opclasses=['range_ops', 'range_ops']
)
constraint_9 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
opclasses=['range_ops', 'range_ops']
)
self.assertEqual(constraint_1, constraint_1)
self.assertEqual(constraint_1, mock.ANY)
self.assertNotEqual(constraint_1, constraint_2)
self.assertNotEqual(constraint_1, constraint_3)
self.assertNotEqual(constraint_1, constraint_4)
self.assertNotEqual(constraint_2, constraint_3)
self.assertNotEqual(constraint_2, constraint_4)
self.assertNotEqual(constraint_2, constraint_7)
self.assertNotEqual(constraint_2, constraint_9)
self.assertNotEqual(constraint_4, constraint_5)
self.assertNotEqual(constraint_5, constraint_6)
self.assertNotEqual(constraint_7, constraint_8)
self.assertNotEqual(constraint_1, object())
def test_deconstruct(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
})
def test_deconstruct_index_type(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
index_type='SPGIST',
expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'index_type': 'SPGIST',
'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
})
def test_deconstruct_condition(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
condition=Q(cancelled=False),
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
'condition': Q(cancelled=False),
})
def test_deconstruct_deferrable(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
deferrable=Deferrable.DEFERRED,
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS)],
'deferrable': Deferrable.DEFERRED,
})
def test_deconstruct_include(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
include=['cancelled', 'room'],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS)],
'include': ('cancelled', 'room'),
})
def test_deconstruct_opclasses(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
opclasses=['range_ops'],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS)],
'opclasses': ['range_ops'],
})
def _test_range_overlaps(self, constraint):
# Create exclusion constraint.
self.assertNotIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table))
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table))
# Add initial reservations.
room101 = Room.objects.create(number=101)
room102 = Room.objects.create(number=102)
datetimes = [
timezone.datetime(2018, 6, 20),
timezone.datetime(2018, 6, 24),
timezone.datetime(2018, 6, 26),
timezone.datetime(2018, 6, 28),
timezone.datetime(2018, 6, 29),
]
HotelReservation.objects.create(
datespan=DateRange(datetimes[0].date(), datetimes[1].date()),
start=datetimes[0],
end=datetimes[1],
room=room102,
)
HotelReservation.objects.create(
datespan=DateRange(datetimes[1].date(), datetimes[3].date()),
start=datetimes[1],
end=datetimes[3],
room=room102,
)
# Overlap dates.
with self.assertRaises(IntegrityError), transaction.atomic():
reservation = HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
)
reservation.save()
# Valid range.
HotelReservation.objects.bulk_create([
# Other room.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room101,
),
# Cancelled reservation.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[1].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
cancelled=True,
),
# Other adjacent dates.
HotelReservation(
datespan=(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
),
])
def test_range_overlaps_custom(self):
class TsTzRange(Func):
function = 'TSTZRANGE'
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name='exclude_overlapping_reservations_custom',
expressions=[
(TsTzRange('start', 'end', RangeBoundary()), RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL)
],
condition=Q(cancelled=False),
opclasses=['range_ops', 'gist_int4_ops'],
)
self._test_range_overlaps(constraint)
def test_range_overlaps(self):
constraint = ExclusionConstraint(
name='exclude_overlapping_reservations',
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL)
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_adjacent(self):
constraint_name = 'ints_adjacent'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_expressions_with_params(self):
constraint_name = 'scene_left_equal'
self.assertNotIn(constraint_name, self.get_constraints(Scene._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Left('scene', 4), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint_name, self.get_constraints(Scene._meta.db_table))
def test_expressions_with_key_transform(self):
constraint_name = 'exclude_overlapping_reservations_smoking'
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
(KeyTextTransform('smoking', 'requirements'), RangeOperators.EQUAL),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint_name,
self.get_constraints(HotelReservation._meta.db_table),
)
def test_index_transform(self):
constraint_name = 'first_index_equal'
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('field__0', RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(IntegerArrayModel, constraint)
self.assertIn(
constraint_name,
self.get_constraints(IntegerArrayModel._meta.db_table),
)
def test_range_adjacent_initially_deferred(self):
constraint_name = 'ints_adjacent_deferred'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
adjacent_range = RangesModel.objects.create(ints=(10, 20))
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(constraint_name)
cursor.execute('SET CONSTRAINTS %s IMMEDIATE' % quoted_name)
# Remove adjacent range before the end of transaction.
adjacent_range.delete()
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_include(self):
constraint_name = 'ints_adjacent_include'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
include=['decimals', 'ints'],
index_type='gist',
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_include_condition(self):
constraint_name = 'ints_adjacent_include_condition'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
include=['decimals'],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_include_deferrable(self):
constraint_name = 'ints_adjacent_include_deferrable'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
include=['decimals'],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_include_not_supported(self):
constraint_name = 'ints_adjacent_include_not_supported'
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
include=['id'],
)
msg = 'Covering exclusion constraints requires PostgreSQL 12+.'
with connection.schema_editor() as editor:
with mock.patch(
'django.db.backends.postgresql.features.DatabaseFeatures.supports_covering_gist_indexes',
False,
):
with self.assertRaisesMessage(NotSupportedError, msg):
editor.add_constraint(RangesModel, constraint)
def test_range_adjacent_opclasses(self):
constraint_name = 'ints_adjacent_opclasses'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_opclasses_condition(self):
constraint_name = 'ints_adjacent_opclasses_condition'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_opclasses_deferrable(self):
constraint_name = 'ints_adjacent_opclasses_deferrable'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_opclasses_include(self):
constraint_name = 'ints_adjacent_opclasses_include'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
include=['decimals'],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_equal_cast(self):
constraint_name = 'exclusion_equal_room_cast'
self.assertNotIn(constraint_name, self.get_constraints(Room._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Cast('number', IntegerField()), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Room, constraint)
self.assertIn(constraint_name, self.get_constraints(Room._meta.db_table))
|
|
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
from socialregistration.clients import Client
from django.conf import settings
import httplib2
import socket
import logging
import oauth2 as oauth
import urllib
import urlparse
logger = logging.getLogger(__name__)
TIMEOUT = getattr(settings, 'SOCIALREGISTRATION_SOCKET_TIMEOUT', 5)
class OAuthError(Exception):
"""
Exception thrown when we run into OAuth{1,2} errors. This error is
displayed to the end user in the default templates.
"""
pass
class OAuth(Client):
"""
Base class for OAuth1 services such as Twitter, LinkedIn and Tumblr.
"""
# The API key provided by the service
api_key = None
# The secret key provided by the service
secret_key = None
# The authorization / authentication URL we'll be asking the user for
# permissions at
auth_url = None
# The request token URL we'll be fetching the request token from
request_token_url = None
# The access token URL we'll be fetching the access token from
access_token_url = None
# Memoized request token
_request_token = None
# Memoized access token
_access_token = None
# Memoized dict of whole access token response
_access_token_dict = None
# Memoized user information
_user_info = None
def __init__(self, access_token=None, access_token_secret=None):
self.consumer = oauth.Consumer(self.api_key, self.secret_key)
if access_token and access_token_secret:
self._access_token = oauth.Token(access_token, access_token_secret)
def client(self, verifier=None):
"""
Return the correct client depending on which stage of the OAuth process
we're in.
"""
# We're just starting out and don't have neither request nor access
# token. Return the standard client
if not self._request_token and not self._access_token:
client = oauth.Client(self.consumer, timeout=TIMEOUT)
# We're one step in, we've got the request token and can add that to
# the client.
if self._request_token and not self._access_token:
if verifier is not None:
self._request_token.set_verifier(verifier)
client = oauth.Client(self.consumer, self._request_token, timeout=TIMEOUT)
# Two steps in, we've got an access token and can now properly sign
# our client requests with it.
if self._access_token:
client = oauth.Client(self.consumer, self._access_token, timeout=TIMEOUT)
# Ensure we use the proper list of CA certificates
ca_certs = getattr(settings, 'HTTPLIB2_CA_CERTS', None)
if ca_certs is not None:
client.ca_certs = ca_certs
return client
def _get_request_token(self):
"""
Fetch a request token from `self.request_token_url`.
"""
params = {
'oauth_callback': self.get_callback_url()
}
response, content = self.client().request(self.request_token_url,
"POST", body=urllib.urlencode(params))
content = smart_unicode(content)
if not response['status'] == '200':
raise OAuthError(_(
u"Invalid status code %s while obtaining request token from %s: %s") % (
response['status'], self.request_token_url, content))
token = dict(urlparse.parse_qsl(content))
return oauth.Token(token['oauth_token'], token['oauth_token_secret'])
def _get_access_token(self, verifier=None):
"""
Fetch an access token from `self.access_token_url`.
"""
response, content = self.client(verifier).request(
self.access_token_url, "POST")
content = smart_unicode(content)
if not response['status'] == '200':
raise OAuthError(_(
u"Invalid status code %s while obtaining access token from %s: %s") %
(response['status'], self.access_token_url, content))
token = dict(urlparse.parse_qsl(content))
return (oauth.Token(token['oauth_token'], token['oauth_token_secret']),
token)
def get_request_token(self):
"""
Return the request token for this API. If we've not fetched it yet,
go out, request and memoize it.
"""
if self._request_token is None:
self._request_token = self._get_request_token()
return self._request_token
def get_access_token(self, verifier=None):
"""
Return the access token for this API. If we've not fetched it yet,
go out, request and memoize it.
"""
if self._access_token is None:
self._access_token, self._access_token_dict = self._get_access_token(verifier)
return self._access_token
def get_redirect_url(self, **kwargs):
"""
Return the authorization/authentication URL signed with the request
token.
"""
params = {
'oauth_token': self.get_request_token().key,
}
return '%s?%s' % (self.auth_url, urllib.urlencode(params))
def complete(self, GET):
"""
When redirect back to our application, try to complete the flow by
requesting an access token. If the access token request fails, it'll
throw an `OAuthError`.
Tries to complete the flow by validating against the `GET` paramters
received.
"""
token = self.get_access_token(verifier=GET.get('oauth_verifier', None))
return token
def request(self, url, method="GET", params=None, headers=None):
"""
Make signed requests against `url`.
"""
params = params or {}
headers = headers or {}
logger.debug("URL: %s", url)
logger.debug("Method: %s", method)
logger.debug("Headers: %s", headers)
logger.debug("Params: %s", params)
response, content = self.client().request(url, method, headers=headers,
body=urllib.urlencode(params))
content = smart_unicode(content)
logger.debug("Status: %s", response['status'])
logger.debug("Content: %s", content)
if response['status'] != '200':
raise OAuthError(_(
u"Invalid status code %s while requesting %s: %s") % (
response['status'], url, content))
return content
class OAuth2(Client):
"""
Base class for OAuth2 services such as Facebook, Github and Foursquare.
"""
# The client id provided by the service
client_id = None
# The secret id provided by the service
secret = None
# The URL where we'll be requesting permissions from the user
auth_url = None
# The URL where we'll be obtaining an access token once permissions were
# granted
access_token_url = None
# The permission's we'll be asking for
scope = None
# The access token we obtained
_access_token = None
# The dict holding all infos we got from the access token endpoint
access_token_dict = None
# Memoized user info fetched once an access token was obtained
_user_info = None
def __init__(self, access_token=None):
self._access_token = access_token
def client(self):
ca_certs = getattr(settings, 'HTTPLIB2_CA_CERTS', None)
return httplib2.Http(ca_certs=ca_certs, timeout=TIMEOUT)
def get_redirect_url(self, state='', **kwargs):
"""
Assemble the URL to where we'll be redirecting the user to to request
permissions.
"""
params = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': self.get_callback_url(**kwargs),
'scope': self.scope or '',
'state': state,
}
return '%s?%s' % (self.auth_url, urllib.urlencode(params))
def parse_access_token(self, content):
"""
Parse the access token response. The default OAuth response should be
a query string - but some services return JSON instead.
"""
return dict(urlparse.parse_qsl(content))
def request_access_token(self, params):
"""
Request the access token from `self.access_token_url`. The default
behaviour is to use a `POST` request, but some services use `GET`
requests. Individual clients can override this method to use the
correct HTTP method.
"""
return self.request(self.access_token_url, method="POST", params=params,
is_signed=False)
def _get_access_token(self, code, **params):
"""
Fetch an access token with the provided `code`.
"""
params.update({
'code': code,
'client_id': self.client_id,
'client_secret': self.secret,
'redirect_uri': self.get_callback_url(),
})
logger.debug("Params: %s", params)
resp, content = self.request_access_token(params=params)
content = smart_unicode(content)
logger.debug("Status: %s", resp['status'])
logger.debug("Content: %s", content)
content = self.parse_access_token(content)
if 'error' in content:
raise OAuthError(_(
u"Received error while obtaining access token from %s: %s") % (
self.access_token_url, content['error']))
return content
def get_access_token(self, code=None, **params):
"""
Return the memoized access token or go out and fetch one.
"""
if self._access_token is None:
if code is None:
raise ValueError(_('Invalid code.'))
self.access_token_dict = self._get_access_token(code, **params)
try:
self._access_token = self.access_token_dict['access_token']
except KeyError, e:
raise OAuthError("Credentials could not be validated, the provider returned no access token.")
return self._access_token
def complete(self, GET):
"""
Complete the OAuth2 flow by fetching an access token with the provided
code in the GET parameters.
"""
if 'error' in GET:
raise OAuthError(
_("Received error while obtaining access token from %s: %s") % (
self.access_token_url, GET['error']))
return self.get_access_token(code=GET.get('code'))
def get_signing_params(self):
"""
Return the paramters for signing a request. Some APIs don't
obey the standard `access_token` parameter - they can override this
method and return their used parameters.
"""
return dict(access_token=self._access_token)
def request(self, url, method="GET", params=None, headers=None, is_signed=True):
"""
Make a request against ``url``. By default, the request is signed with
an access token, but can be turned off by passing ``is_signed=False``.
"""
params = params or {}
headers = headers or {}
if is_signed:
params.update(self.get_signing_params())
if method.upper() == "GET":
url = '%s?%s' % (url, urllib.urlencode(params))
return self.client().request(url, method=method, headers=headers)
return self.client().request(url, method, body=urllib.urlencode(params), headers=headers)
|
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
import astropy.units as u
from astropy.utils import NumpyRNGContext
'''
Routines for fitting a line with errors in both variables.
'''
def leastsq_linear(x, y, x_err, y_err, verbose=False):
'''
Fit a line with errors in both variables using least squares fitting.
Specifically, this uses orthogonal distance regression (in scipy) since
there is no clear "independent" variable here and the covariance is
unknown (or at least difficult to estimate).
Parameters
----------
x : `~numpy.ndarray`
x data.
y : `~numpy.ndarray`
y data.
x_err : `~numpy.ndarray`
x errors.
y_err : `~numpy.ndarray`
y errors.
verbose : bool, optional
Plot the resulting fit.
Returns
-------
params : `~numpy.ndarray`
Fit parameters (slope, intercept)
errors : `~numpy.ndarray`
1-sigma errors from the covariance matrix (slope, intercept).
'''
import scipy.odr as odr
from scipy.stats import linregress
def fit_func(B, x):
return B[0] * x + B[1]
linear = odr.Model(fit_func)
mydata = odr.Data(x, y, wd=np.power(x_err, -2), we=np.power(y_err, -2))
# Set the initial guess for ODR from a normal linear regression
beta0 = linregress(x, y)[:2]
# beta sets the initial parameters
myodr = odr.ODR(mydata, linear, beta0=beta0)
output = myodr.run()
params = output.beta
errors = output.sd_beta
# found a source saying this equivalent to reduced chi-square. Not sure if
# this is true... Bootstrapping is likely a better way to go.
# gof = output.res_var
if verbose:
output.pprint()
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.subplot2grid((4, 1), (0, 0), colspan=1, rowspan=3)
ax_r = plt.subplot2grid((4, 1), (3, 0), colspan=1,
rowspan=1,
sharex=ax)
ax.errorbar(x, y, xerr=x_err, yerr=y_err, fmt='o', color='k')
ax.set_ylabel("log Spectral Length")
xvals = np.linspace(x.min(), x.max(), x.size * 10)
ax.plot(xvals, params[0] * xvals + params[1], 'r-')
ax.fill_between(xvals,
(params[0] - errors[0]) * xvals +
(params[1] - errors[1]),
(params[0] + errors[0]) * xvals +
(params[1] + errors[1]),
facecolor='red', interpolate=True, alpha=0.4)
# Some very large error bars makes it difficult to see the model
y_range = np.ptp(y)
x_range = np.ptp(x)
ax.set_ylim([y.min() - y_range / 4, y.max() + y_range / 4])
ax.set_xlim([x.min() - x_range / 4, x.max() + x_range / 4])
ax_r.errorbar(x, y - (params[0] * x + params[1]),
xerr=x_err, yerr=y_err, fmt='o', color='k')
ax_r.axhline(0., color='red', linestyle='-', alpha=0.7)
ax_r.set_ylabel("Residuals")
ax_r.set_xlabel("log Spatial Length")
plt.tight_layout()
fig.subplots_adjust(hspace=0.1)
plt.show()
return params, errors
def bayes_linear(x, y, x_err, y_err, nWalkers=10, nBurn=100, nSample=1000,
conf_interval=[15.9, 84.1], verbose=False,
return_samples=False):
'''
Fit a line with errors in both variables using MCMC.
Original version of this function is Erik Rosolowsky's:
https://github.com/low-sky/py-low-sky/blob/master/BayesLinear.py
Parameters
----------
x : `~numpy.ndarray`
x data.
y : `~numpy.ndarray`
y data.
x_err : `~numpy.ndarray`
x errors.
y_err : `~numpy.ndarray`
y errors.
nWalkers : int, optional
Number of walkers in the sampler (>2 required). Defaults to 10.
nBurn : int, optional
Number of steps to burn chain in for. Default is 100.
nSample : int, optional
Number of steps to sample chain with. Default is 1000.
conf_interval : list, optional
Upper and lower percentiles to estimate the bounds on the parameters.
Defaults to the 1-sigma intervals (34.1% about the median).
verbose : bool, optional
Plot the resulting fit.
return_samples : bool, optional
Returns the entire chain of samples, when enabled.
Returns
-------
params : `~numpy.ndarray`
Fit parameters (slope, intercept)
errors : `~numpy.ndarray`
Confidence interval defined by values given in `conf_interval`
(slope, intercept).
samples : `~numpy.ndarray`
Samples from the chain. Returned only when `return_samples` is enabled.
'''
try:
import emcee
except ImportError:
raise ImportError("emcee must be installed to use Bayesian fitting.")
def _logprob(p, x, y, x_err, y_err):
theta, b = p[0], p[1]
if np.abs(theta - np.pi / 4) > np.pi / 4:
return -np.inf
Delta = (np.cos(theta) * y - np.sin(theta) * x - b * np.cos(theta))**2
Sigma = (np.sin(theta))**2 * x_err**2 + (np.cos(theta))**2 * y_err**2
lp = -0.5 * np.nansum(Delta / Sigma) - 0.5 * np.nansum(np.log(Sigma))
return lp
ndim = 2
p0 = np.zeros((nWalkers, ndim))
p0[:, 0] = np.pi / 4 + np.random.randn(nWalkers) * 0.1
p0[:, 1] = np.random.randn(nWalkers) * y.std() + y.mean()
sampler = emcee.EnsembleSampler(nWalkers, ndim, _logprob,
args=[x, y, x_err, y_err])
pos, prob, state = sampler.run_mcmc(p0, nBurn)
sampler.reset()
sampler.run_mcmc(pos, nSample)
slopes = np.tan(sampler.flatchain[:, 0])
intercepts = sampler.flatchain[:, 1]
slope = np.median(slopes)
intercept = np.median(intercepts)
params = np.array([slope, intercept])
# Use the percentiles given in conf_interval
error_intervals = np.empty((2, 2))
error_intervals[0] = np.percentile(slopes, conf_interval)
error_intervals[1] = np.percentile(intercepts, conf_interval)
if verbose:
# Make some trace plots, PDFs and a plot of the range of solutions
import matplotlib.pyplot as plt
from astropy.visualization import hist
fig = plt.figure(figsize=(9.9, 4.8))
ax = plt.subplot2grid((4, 4), (0, 0), colspan=1, rowspan=2)
ax.plot(slopes, 'k', linewidth=0.5)
ax.set_ylabel("Slope")
# ax.set_xlabel("Iteration")
ax.get_xaxis().set_ticklabels([])
ax2 = plt.subplot2grid((4, 4), (0, 1), colspan=1, rowspan=2)
ax2.plot(intercepts, 'k', linewidth=0.5)
ax2.set_ylabel("Intercept")
# ax2.set_xlabel("Iteration")
ax2.get_xaxis().set_ticklabels([])
ax3 = plt.subplot2grid((4, 4), (2, 0), colspan=1, rowspan=2)
hist(slopes, bins='knuth', color='k', alpha=0.6, ax=ax3)
ax3.axvline(slope, color='r', linestyle='-')
ax3.axvline(error_intervals[0][0], color='r', linestyle='--')
ax3.axvline(error_intervals[0][1], color='r', linestyle='--')
ax3.set_xlabel("Slope")
ax4 = plt.subplot2grid((4, 4), (2, 1), colspan=1, rowspan=2)
hist(intercepts, bins='knuth', color='k', alpha=0.6, ax=ax4)
ax4.axvline(intercept, color='r', linestyle='-')
ax4.axvline(error_intervals[1][0], color='r', linestyle='--')
ax4.axvline(error_intervals[1][1], color='r', linestyle='--')
ax4.set_xlabel("Intercept")
ax5 = plt.subplot2grid((4, 4), (0, 2), colspan=2, rowspan=3)
ax_r = plt.subplot2grid((4, 4), (3, 2), colspan=2,
rowspan=1,
sharex=ax5)
ax5.errorbar(x, y, xerr=x_err, yerr=y_err, fmt='o', color='k')
ax5.set_ylabel("log Spectral Length")
xvals = np.linspace(x.min(), x.max(), x.size * 10)
ax5.plot(xvals, slope * xvals + intercept, 'r-')
ax5.fill_between(xvals,
error_intervals[0, 0] * xvals + error_intervals[1, 0],
error_intervals[0, 1] * xvals + error_intervals[1, 1],
facecolor='red', interpolate=True, alpha=0.4)
# ax5.get_xaxis().set_ticklabels([])
# Some very large error bars makes it difficult to see the model
y_range = np.ptp(y)
x_range = np.ptp(x)
ax5.set_ylim([y.min() - y_range / 4, y.max() + y_range / 4])
ax5.set_xlim([x.min() - x_range / 4, x.max() + x_range / 4])
ax_r.errorbar(x, y - (slope * x + intercept),
xerr=x_err, yerr=y_err, fmt='o', color='k')
ax_r.axhline(0., color='red', linestyle='--', alpha=0.7)
ax_r.set_ylabel("Residuals")
ax_r.set_xlabel("log Spatial Length")
print("Slope: {0} ({1}, {2})".format(slope, error_intervals[0, 0],
error_intervals[0, 1]))
print("Intercept: {0} ({1}, {2})".format(intercept,
error_intervals[1, 0],
error_intervals[1, 1]))
plt.tight_layout()
fig.subplots_adjust(hspace=0.1)
plt.show()
if return_samples:
return params, error_intervals, np.vstack([slopes, intercepts])
return params, error_intervals
def check_fit_limits(xlow, xhigh):
'''
Check that the inputs are floats (or ints), or a 2-element array for
passing separate limits.
Parameters
----------
xlow : float or np.ndarray, optional
The lower lag fitting limit. An array with 2 elements can be passed to
give separate lower limits for the datasets.
xhigh : float or np.ndarray, optional
The upper lag fitting limit. See `xlow` above.
'''
if xlow is None:
xlow = np.array([xlow] * 2)
elif isinstance(xlow, u.Quantity):
if xlow.isscalar:
xlow = u.Quantity([xlow] * 2)
if not len(xlow) == 2:
raise ValueError("xlow must be a 2-element array when giving "
"separate fitting limits for each dataset.")
if xhigh is None:
xhigh = np.array([xhigh] * 2)
elif isinstance(xhigh, u.Quantity):
if xhigh.isscalar:
xhigh = u.Quantity([xhigh] * 2)
if not len(xhigh) == 2:
raise ValueError("xhigh must be a 2-element array when giving "
"separate fitting limits for each dataset.")
return xlow, xhigh
def clip_func(arr, low, high):
return np.logical_and(arr > low, arr <= high)
def residual_bootstrap(fit_model, nboot=1000, seed=38574895,
return_samps=False, debug=False,
**fit_kwargs):
'''
Bootstrap with residual resampling.
'''
y = fit_model.model.wendog
y_res = fit_model.wresid
resamps = []
if debug:
import matplotlib.pyplot as plt
with NumpyRNGContext(seed):
for _ in range(nboot):
y_resamp = y + y_res[np.random.choice(y_res.size - 1, y_res.size)]
resamp_mod = fit_model.model.__class__(y_resamp,
fit_model.model.exog)
resamp_fit = resamp_mod.fit(**fit_kwargs)
if debug:
plt.plot(fit_model.model.exog[:, 1], y, label='Data')
plt.plot(fit_model.model.exog[:, 1], y_resamp, label='Resamp')
plt.plot(resamp_fit.model.exog[:, 1], resamp_fit.model.endog,
label='Resamp Model')
plt.legend()
plt.draw()
print(resamp_fit.params)
input("?")
plt.clf()
resamps.append(resamp_fit.params)
resamps = np.array(resamps).squeeze()
if return_samps:
return resamps
return np.std(resamps, axis=0)
|
|
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import ha
from neutron.agent.l3 import router_info
from neutron.agent.linux import interface
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.tests import base
from oslo_utils import uuidutils
from neutron_fwaas.services.firewall.agents.varmour import varmour_router
from neutron_fwaas.services.firewall.drivers.varmour import varmour_fwaas
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_DIRECTOR = '1.1.1.1'
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ha.OPTS)
agent_config.register_process_monitor_opts(self.conf)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.state_path = ''
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.makedirs_p = mock.patch('os.makedirs')
self.makedirs = self.makedirs_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
mock.patch('neutron.agent.l3.agent.L3PluginApi').start()
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
self.fake_agent_mode = None
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _create_router(self):
router = varmour_router.vArmourL3NATAgent(HOSTNAME, self.conf)
router.rest.server = FAKE_DIRECTOR
router.rest.user = 'varmour'
router.rest.passwd = 'varmour'
return router
def _create_fwaas(self):
fwaas = varmour_fwaas.vArmourFwaasDriver()
fwaas.rest.server = FAKE_DIRECTOR
fwaas.rest.user = 'varmour'
fwaas.rest.passwd = 'varmour'
return fwaas
def _del_all_internal_ports(self, router):
router[l3_constants.INTERFACE_KEY] = []
def _del_internal_ports(self, router, port_idx):
del router[l3_constants.INTERFACE_KEY][port_idx]
def _add_internal_ports(self, router, port_count=1):
self._del_all_internal_ports(router)
for i in range(port_count):
port = {'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '10.0.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '10.0.%s.0/24' % i,
'gateway_ip': '10.0.%s.1' % i}}
router[l3_constants.INTERFACE_KEY].append(port)
def _del_all_floating_ips(self, router):
router[l3_constants.FLOATINGIP_KEY] = []
def _del_floating_ips(self, router, port_idx):
del router[l3_constants.FLOATINGIP_KEY][port_idx]
def _add_floating_ips(self, router, port_count=1):
self._del_all_floating_ips(router)
for i in range(port_count):
fip = {'id': _uuid(),
'port_id': router['gw_port']['id'],
'floating_ip_address': '172.24.4.%s' % (100 + i),
'fixed_ip_address': '10.0.0.%s' % (100 + i)}
router[l3_constants.FLOATINGIP_KEY].append(fip)
def _prepare_router_data(self, enable_snat=None):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '172.24.4.2',
'subnet_id': _uuid()}],
'subnet': {'cidr': '172.24.4.0/24',
'gateway_ip': '172.24.4.1'},
'ip_cidr': '172.24.4.226/28'}
int_ports = []
router = {
'id': router_id,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port}
if enable_snat is not None:
router['enable_snat'] = enable_snat
ri = router_info.RouterInfo(router_id=router['id'], router=router,
**self.ri_kwargs)
return ri
def _add_firewall_rules(self, fw, rule_count=1):
rules = []
for i in range(rule_count):
rule = {'id': _uuid(),
'enabled': True,
'action': 'deny' if (i % 2 == 0) else 'allow',
'ip_version': 4,
'protocol': 'tcp',
'source_ip_address': '10.0.0.%s/24' % (100 + i),
'destination_port': '%s' % (100 + i)}
rules.append(rule)
fw['firewall_rule_list'] = rules
def _prepare_firewall_data(self):
fw = {'id': _uuid(),
'admin_state_up': True,
'firewall_rule_list': []}
return fw
def test_firewall_without_rule(self):
fwaas = self._create_fwaas()
fwaas.create_firewall = mock.Mock()
fwaas.delete_firewall = mock.Mock()
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
self._add_floating_ips(ri.router, port_count=1)
rl = [ri]
fw = self._prepare_firewall_data()
fwaas.create_firewall(self.fake_agent_mode, rl, fw)
fwaas.create_firewall.assert_called_once_with(self.fake_agent_mode,
rl, fw)
fwaas.delete_firewall(self.fake_agent_mode, rl, fw)
def test_firewall_with_rules(self):
fwaas = self._create_fwaas()
fwaas.create_firewall = mock.Mock()
fwaas.delete_firewall = mock.Mock()
fw = self._prepare_firewall_data()
self._add_firewall_rules(fw, 2)
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
self._add_floating_ips(ri.router, port_count=1)
rl = [ri]
fwaas.create_firewall(self.fake_agent_mode, rl, fw)
fwaas.create_firewall.assert_called_once_with(self.fake_agent_mode,
rl, fw)
fwaas.delete_firewall(self.fake_agent_mode, rl, fw)
def test_firewall_add_remove_rules(self):
fwaas = self._create_fwaas()
fwaas.create_firewall = mock.Mock()
fwaas.delete_firewall = mock.Mock()
fw = self._prepare_firewall_data()
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
self._add_floating_ips(ri.router, port_count=1)
rl = [ri]
self._add_firewall_rules(fw, 2)
fwaas.create_firewall(self.fake_agent_mode, rl, fw)
# 3x number of policies
self._add_firewall_rules(fw, 1)
fwaas.create_firewall(self.fake_agent_mode, rl, fw)
fwaas.delete_firewall(self.fake_agent_mode, rl, fw)
|
|
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Automatically restart the server when a source file is modified.
Most applications should not access this module directly. Instead,
pass the keyword argument ``autoreload=True`` to the
`tornado.web.Application` constructor (or ``debug=True``, which
enables this setting and several others). This will enable autoreload
mode as well as checking for changes to templates and static
resources. Note that restarting is a destructive operation and any
requests in progress will be aborted when the process restarts. (If
you want to disable autoreload while using other debug-mode features,
pass both ``debug=True`` and ``autoreload=False``).
This module can also be used as a command-line wrapper around scripts
such as unit test runners. See the `main` method for details.
The command-line wrapper and Application debug modes can be used together.
This combination is encouraged as the wrapper catches syntax errors and
other import-time failures, while debug mode catches changes once
the server has started.
This module depends on `.IOLoop`, so it will not work in WSGI applications
and Google App Engine. It also will not work correctly when `.HTTPServer`'s
multi-process mode is used.
Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
Additionally, modifying these variables will cause reloading to behave
incorrectly.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
# sys.path handling
# -----------------
#
# If a module is run with "python -m", the current directory (i.e. "")
# is automatically prepended to sys.path, but not if it is run as
# "path/to/file.py". The processing for "-m" rewrites the former to
# the latter, so subsequent executions won't have the same path as the
# original.
#
# Conversely, when run as path/to/file.py, the directory containing
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by reconstructing the original command
# line (Python >= 3.4) or by setting the $PYTHONPATH environment
# variable (Python < 3.4) before re-execution so the new process will
# see the correct path. We attempt to address the latter problem when
# tornado.autoreload is run as __main__.
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
# as possible - if we introduced a tornado.sys or tornado.os
# module we'd be in trouble), or else our imports would become
# relative again despite the future import.
#
# There is a separate __main__ block at the end of the file to call main().
if sys.path[0] == os.path.dirname(__file__):
del sys.path[0]
import functools
import logging
import os
import pkgutil # type: ignore
import sys
import traceback
import types
import subprocess
import weakref
from tornado import ioloop
from tornado.log import gen_log
from tornado import process
from tornado.util import exec_in
try:
import signal
except ImportError:
signal = None
# os.execv is broken on Windows and can't properly parse command line
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
_has_execv = sys.platform != 'win32'
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
_autoreload_is_main = False
_original_argv = None
_original_spec = None
def start(check_time=500):
"""Begins watching source files for changes.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start()
def wait():
"""Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
io_loop.add_callback(start)
io_loop.start()
def watch(filename):
"""Add a file to the watch list.
All imported modules are watched by default.
"""
_watched_files.add(filename)
def add_reload_hook(fn):
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
"""
_reload_hooks.append(fn)
def _reload_on_update(modify_times):
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# processes restarted themselves, they'd all restart and then
# all call fork_processes again.
return
for module in list(sys.modules.values()):
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
# module.
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
_check_file(modify_times, path)
for path in _watched_files:
_check_file(modify_times, path)
def _check_file(modify_times, path):
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
_reload()
def _reload():
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
if hasattr(signal, "setitimer"):
# Clear the alarm signal set by
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If __main__.__spec__
# exists, we were invoked with -m and the effective path is about to
# change on re-exec. Reconstruct the original command line to
# ensure that the new process sees the same path we did. If
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
if _autoreload_is_main:
spec = _original_spec
argv = _original_argv
else:
spec = getattr(sys.modules['__main__'], '__spec__', None)
argv = sys.argv
if spec:
argv = ['-m', spec.name] + argv[1:]
else:
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
os.environ["PYTHONPATH"] = (path_prefix +
os.environ.get("PYTHONPATH", ""))
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# ideal since the new process is detached from the parent
# terminal and thus cannot easily be killed with ctrl-C,
# but it's better than not being able to autoreload at
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + argv)
# At this point the IOLoop has been closed and finally
# blocks will experience errors if we allow the stack to
# unwind, so just exit uncleanly.
os._exit(0)
_USAGE = """\
Usage:
python -m tornado.autoreload -m module.to.run [args...]
python -m tornado.autoreload path/to/script.py [args...]
"""
def main():
"""Command-line wrapper to re-run a script whenever its source changes.
Scripts may be specified by filename or module name::
python -m tornado.autoreload -m tornado.test.runtests
python -m tornado.autoreload tornado/test/runtests.py
Running a script with this wrapper is similar to calling
`tornado.autoreload.wait` at the end of the script, but this wrapper
can catch import-time problems like syntax errors that would otherwise
prevent the script from reaching its call to `wait`.
"""
# Remember that we were launched with autoreload as main.
# The main module can be tricky; set the variables both in our globals
# (which may be __main__) and the real importable version.
import tornado.autoreload
global _autoreload_is_main
global _original_argv, _original_spec
tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
original_argv = sys.argv
tornado.autoreload._original_argv = _original_argv = original_argv
original_spec = getattr(sys.modules['__main__'], '__spec__', None)
tornado.autoreload._original_spec = _original_spec = original_spec
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
module = sys.argv[2]
del sys.argv[1:3]
elif len(sys.argv) >= 2:
mode = "script"
script = sys.argv[1]
sys.argv = sys.argv[1:]
else:
print(_USAGE, file=sys.stderr)
sys.exit(1)
try:
if mode == "module":
import runpy
runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script":
with open(script) as f:
# Execute the script in our namespace instead of creating
# a new one so that something that tries to import __main__
# (e.g. the unittest module) will see names defined in the
# script instead of just those defined in this module.
global __file__
__file__ = script
# If __package__ is defined, imports may be incorrectly
# interpreted as relative to this module.
global __package__
del __package__
exec_in(f.read(), globals(), globals())
except SystemExit as e:
logging.basicConfig()
gen_log.info("Script exited with status %s", e.code)
except Exception as e:
logging.basicConfig()
gen_log.warning("Script exited with uncaught exception", exc_info=True)
# If an exception occurred at import time, the file with the error
# never made it into sys.modules and so we won't know to watch it.
# Just to make sure we've covered everything, walk the stack trace
# from the exception and watch every file.
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
watch(filename)
if isinstance(e, SyntaxError):
# SyntaxErrors are special: their innermost stack frame is fake
# so extract_tb won't see it and we have to get the filename
# from the exception object.
watch(e.filename)
else:
logging.basicConfig()
gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
if mode == 'module':
# runpy did a fake import of the module as __main__, but now it's
# no longer in sys.modules. Figure out where it is and watch it.
loader = pkgutil.get_loader(module)
if loader is not None:
watch(loader.get_filename())
wait()
if __name__ == "__main__":
# See also the other __main__ block at the top of the file, which modifies
# sys.path before our imports
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.special_math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import opt_einsum
import six
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class LBetaTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.session(use_gpu=True):
self.assertAllClose(
1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one))))
self.assertAllClose(
0.5, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual([], special_math_ops.lbeta(x_one).get_shape())
@test_util.run_deprecated_v1
def test_one_dimensional_arg_dynamic(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.session(use_gpu=True):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one}))
self.assertAllClose(0.5,
beta_ph.eval(feed_dict={ph: x_one_half}))
@test_util.run_deprecated_v1
def test_four_dimensional_arg_with_partial_shape_dynamic(self):
x_ = np.ones((3, 2, 3, 4))
# Gamma(1) = 0! = 1
# Gamma(1 + 1 + 1 + 1) = Gamma(4) = 3! = 6
# ==> Beta([1, 1, 1, 1])
# = Gamma(1) * Gamma(1) * Gamma(1) * Gamma(1) / Gamma(1 + 1 + 1 + 1)
# = 1 / 6
expected_beta_x = 1 / 6 * np.ones((3, 2, 3))
with self.session(use_gpu=True):
x_ph = array_ops.placeholder(dtypes.float32, [3, 2, 3, None])
beta_ph = math_ops.exp(special_math_ops.lbeta(x_ph))
self.assertAllClose(expected_beta_x,
beta_ph.eval(feed_dict={x_ph: x_}))
@test_util.run_in_graph_and_eager_modes
def test_two_dimensional_arg(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session(use_gpu=True):
self.assertAllClose(
[0.5, 0.5],
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape())
@test_util.run_deprecated_v1
def test_two_dimensional_arg_dynamic(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session(use_gpu=True):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose([0.5, 0.5],
beta_ph.eval(feed_dict={ph: x_one_half}))
@test_util.run_in_graph_and_eager_modes
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session(use_gpu=True):
self.assertAllClose(
[0.5, 0.5],
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
(2,),
self.evaluate(array_ops.shape(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
tensor_shape.TensorShape([2]),
special_math_ops.lbeta(x_one_half).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_complicated_shape(self):
with self.session(use_gpu=True):
x = ops.convert_to_tensor(np.random.rand(3, 2, 2))
self.assertAllEqual(
(3, 2), self.evaluate(array_ops.shape(special_math_ops.lbeta(x))))
self.assertEqual(
tensor_shape.TensorShape([3, 2]),
special_math_ops.lbeta(x).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.session(use_gpu=True):
self.assertAllClose(
1,
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_a))),
rtol=3e-6)
self.assertAllClose(
1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_b))))
self.assertEqual((), special_math_ops.lbeta(x_a).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank1_returns_negative_infinity(self):
with self.session(use_gpu=True):
x = constant_op.constant([], shape=[0])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=())
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self):
with self.session(use_gpu=True):
event_size = 0
for batch_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=[batch_size])
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_batch_dim_returns_empty(self):
with self.session(use_gpu=True):
batch_size = 0
for event_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant([], shape=[batch_size])
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_all_in_graph_and_eager_modes
class DawsnTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_dawsn_boundary(self):
self.assertAllClose(0., special_math_ops.dawsn(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.dawsn(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_dawsn_odd(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.dawsn(x)),
self.evaluate(-special_math_ops.dawsn(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_dawsn_small(self, dtype):
x = np.random.uniform(-1., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.dawsn(x), self.evaluate(special_math_ops.dawsn(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_dawsn_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.dawsn(x), self.evaluate(special_math_ops.dawsn(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_dawsn_gradient(self):
inputs = [np.random.uniform(-50., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.dawsn, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
@test_util.run_all_in_graph_and_eager_modes
class ExpintTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_expint_boundary(self):
self.assertAllClose(-np.inf, special_math_ops.expint(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.expint(np.nan))))
# Check that the domain of definition is [0, inf)
self.assertTrue(
np.all(
np.isnan(
self.evaluate(
special_math_ops.expint(
np.random.uniform(-20., -1., size=int(1e3)))))))
@parameterized.parameters(np.float32, np.float64)
def test_expint_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.expi(x), self.evaluate(special_math_ops.expint(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_expint_larger(self, dtype):
x = np.random.uniform(1., 50., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.expi(x), self.evaluate(special_math_ops.expint(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_expint_gradient(self):
inputs = [np.random.uniform(1., 10., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.expint, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 5e-3)
@test_util.run_all_in_graph_and_eager_modes
class FresnelCosTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_fresnel_cos_boundary(self):
self.assertAllClose(0., special_math_ops.fresnel_cos(0.))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.fresnel_cos(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_odd(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.fresnel_cos(x)),
self.evaluate(-special_math_ops.fresnel_cos(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[1], self.evaluate(special_math_ops.fresnel_cos(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[1],
self.evaluate(special_math_ops.fresnel_cos(x)),
rtol=1e-5)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_fresnel_cos_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.fresnel_cos, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 5e-3)
@test_util.run_all_in_graph_and_eager_modes
class FresnelSinTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_fresnel_sin_boundary(self):
self.assertAllClose(0., special_math_ops.fresnel_sin(0.))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.fresnel_sin(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_sin_odd(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.fresnel_sin(x)),
self.evaluate(-special_math_ops.fresnel_sin(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_sin_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[0], self.evaluate(special_math_ops.fresnel_sin(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_sin_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[0],
self.evaluate(special_math_ops.fresnel_sin(x)),
rtol=1e-5)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_fresnel_sin_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.fresnel_sin, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 5e-3)
@test_util.run_all_in_graph_and_eager_modes
class SpenceTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_spence_boundary(self):
self.assertAllClose(np.pi**2 / 6., special_math_ops.spence(0.))
self.assertAllClose(0., special_math_ops.spence(1.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.spence(np.nan))))
# Check that the domain of definition is [0, inf)
self.assertTrue(
np.all(
np.isnan(
self.evaluate(
special_math_ops.spence(
np.random.uniform(-20., -1., size=int(1e3)))))))
@parameterized.parameters(np.float32, np.float64)
def test_spence_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.spence(x), self.evaluate(special_math_ops.spence(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_spence_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.spence(x), self.evaluate(special_math_ops.spence(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_spence_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.spence, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_spence_gradient_at_one(self):
analytical, _ = gradient_checker_v2.compute_gradient(
special_math_ops.spence, [1.])
self.assertAllClose([[[-1.]]], analytical)
@test_util.run_all_in_graph_and_eager_modes
class BesselTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_besseli_boundary(self):
self.assertAllClose(1., special_math_ops.bessel_i0(0.))
self.assertAllClose(1., special_math_ops.bessel_i0e(0.))
self.assertAllClose(0., special_math_ops.bessel_i1(0.))
self.assertAllClose(0., special_math_ops.bessel_i1e(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_i0(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_i0e(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_i1(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_i1e(np.nan))))
@test_util.run_in_graph_and_eager_modes
def test_besselj_boundary(self):
self.assertAllClose(1., special_math_ops.bessel_j0(0.))
self.assertAllClose(0., special_math_ops.bessel_j1(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_j0(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_j1(np.nan))))
@test_util.run_in_graph_and_eager_modes
def test_besselk_boundary(self):
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k0(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k0e(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k1(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k1e(0.))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_k0(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_k0e(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_k1(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_k1e(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_i0j0_even(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i0(x)),
self.evaluate(special_math_ops.bessel_i0(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i0e(x)),
self.evaluate(special_math_ops.bessel_i0e(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_j0(x)),
self.evaluate(special_math_ops.bessel_j0(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_i1j1_odd(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i1(x)),
self.evaluate(-special_math_ops.bessel_i1(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i1e(x)),
self.evaluate(-special_math_ops.bessel_i1e(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_j1(x)),
self.evaluate(-special_math_ops.bessel_j1(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_besseli_small(self, dtype):
x = np.random.uniform(-1., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.i0(x), self.evaluate(special_math_ops.bessel_i0(x)))
self.assertAllClose(
special.i1(x), self.evaluate(special_math_ops.bessel_i1(x)))
self.assertAllClose(
special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x)))
self.assertAllClose(
special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselj_small(self, dtype):
x = np.random.uniform(-1., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.j0(x), self.evaluate(special_math_ops.bessel_j0(x)))
self.assertAllClose(
special.j1(x), self.evaluate(special_math_ops.bessel_j1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselk_small(self, dtype):
x = np.random.uniform(np.finfo(dtype).eps, 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.k0(x), self.evaluate(special_math_ops.bessel_k0(x)))
self.assertAllClose(
special.k0e(x), self.evaluate(special_math_ops.bessel_k0e(x)))
self.assertAllClose(
special.k1(x), self.evaluate(special_math_ops.bessel_k1(x)))
self.assertAllClose(
special.k1e(x), self.evaluate(special_math_ops.bessel_k1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_bessely_small(self, dtype):
x = np.random.uniform(np.finfo(dtype).eps, 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.y0(x), self.evaluate(special_math_ops.bessel_y0(x)))
self.assertAllClose(
special.y1(x), self.evaluate(special_math_ops.bessel_y1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besseli_larger(self, dtype):
x = np.random.uniform(1., 20., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x)))
self.assertAllClose(
special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselj_larger(self, dtype):
x = np.random.uniform(1., 30., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.j0(x), self.evaluate(special_math_ops.bessel_j0(x)))
self.assertAllClose(
special.j1(x), self.evaluate(special_math_ops.bessel_j1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselk_larger(self, dtype):
x = np.random.uniform(1., 30., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.k0(x), self.evaluate(special_math_ops.bessel_k0(x)))
self.assertAllClose(
special.k0e(x), self.evaluate(special_math_ops.bessel_k0e(x)))
self.assertAllClose(
special.k1(x), self.evaluate(special_math_ops.bessel_k1(x)))
self.assertAllClose(
special.k1e(x), self.evaluate(special_math_ops.bessel_k1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_bessely_larger(self, dtype):
x = np.random.uniform(1., 30., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.y0(x), self.evaluate(special_math_ops.bessel_y0(x)))
self.assertAllClose(
special.y1(x), self.evaluate(special_math_ops.bessel_y1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_besseli_gradient(self):
inputs = [np.random.uniform(-10., 10., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-3)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i0e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-3)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i1e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_besselj_gradient(self):
inputs = [np.random.uniform(-50., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_j0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_j1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_besselk_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k0e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k1e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_bessely_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_y0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_y1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_without_tensor_float_32(
'Tests einsum, which sometimes does a matmul with cuBLAS')
class EinsumTest(test.TestCase):
def _check(self, s, *input_shapes, **kwargs):
dtype = kwargs.pop('dtype', np.float32)
r = np.random.RandomState(0)
inputs = []
for shape in input_shapes:
arr = np.array(r.randn(*shape)).astype(dtype)
if dtype == np.complex64 or dtype == np.complex128:
arr += 1j * np.array(r.randn(*shape)).astype(dtype)
inputs.append(arr)
input_tensors = [constant_op.constant(x, shape=x.shape) for x in inputs]
a = np.einsum(s, *inputs)
b = self.evaluate(special_math_ops.einsum(s, *input_tensors))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
def test_invalid_keyword_arguments(self):
r = np.random.RandomState(0)
a = array_ops.placeholder_with_default(r.randn(2, 3), shape=(2, 3))
b = array_ops.placeholder_with_default(r.randn(3, 4), shape=(3, 4))
with self.assertRaises(TypeError):
_ = special_math_ops.einsum(
'ij,jk->ik', a, b, name='name', invalid1='value1', invalid2='value2')
def test_unary(self):
self._check('a', (3,))
self._check('aa', (3, 3))
self._check('ab->', (3, 3))
self._check('ab->ab', (3, 3))
self._check('abc->b', (3, 4, 5))
self._check('abc->ca', (3, 4, 5))
self._check('abc->cab', (3, 4, 5))
# Empty cases.
self._check('', ())
self._check('->', ())
# Repeated indices cases.
self._check('aa->', (3, 3))
self._check('aa->a', (3, 3))
self._check('aaa->', (3, 3, 3))
self._check('aaa->a', (3, 3, 3))
self._check('aab->a', (3, 3, 4))
self._check('aabcc->a', (3, 3, 5, 4, 4))
self._check('aabcc->ac', (3, 3, 5, 4, 4))
self._check('aabcd->ad', (3, 3, 5, 4, 4))
def test_unary_ellipsis(self):
self._check('...->', ())
self._check('...ijk->...ki', (3, 4, 5))
self._check('...ijk->...ki', (1, 3, 4, 5))
self._check('...ijk->...ki', (2, 2, 3, 4, 5))
self._check('...ij->...ji', (5, 2, 3)) # batch matrix transpose
self._check('...ij->...', (5, 2, 3)) # batch sum
self._check('...->...', ())
self._check('->...', ())
# Repeated indices.
self._check('i...ii->...i', (3, 2, 3, 3))
self._check('i...i->i...', (2, 2))
self._check('i...i->', (2, 2))
self._check('i...i->...', (2, 5, 1, 2))
self._check('i...i->i...', (2, 1, 2))
self._check('i...i->i...', (2, 3, 4, 5, 2))
def test_binary_simple(self):
# Binary cases in XLA mode must have either (a) each index appearing exactly
# once in both the inputs (batch or contraction index), or (b) appearing
# exactly once in an input and in the output (free index).
self._check(',->', (), ())
self._check('a,a->', (3,), (3,))
self._check('a,a->a', (3,), (3,))
self._check('ab,b->a', (3, 4), (4,))
self._check('ab,ab->', (3, 4), (3, 4))
self._check('ab,bc->ac', (3, 4), (4, 5))
self._check('nij,jk->nik', (5, 2, 3), (3, 4))
self._check('abc,bad->abcd', (1, 2, 3), (2, 1, 4))
# Based on https://github.com/google/jax/issues/37#issuecomment-448572187
self._check('sa,shb->shab', (2, 1), (2, 3, 4))
# Infer the output subscripts.
self._check('ab,b', (3, 4), (4,))
self._check('cab,b', (1, 3, 4), (4,))
def test_reduced_indices(self):
self._check('ba,b->', (3, 2), (3,))
self._check('ab,ab->', (3, 4), (3, 4))
def test_repeated_indices(self):
# Repeated indices.
self._check('ijj,k->ik', (2, 3, 3), (4,))
self._check('aba,a->b', (3, 4, 3), (3,))
# From https://github.com/dask/dask/pull/3412#discussion_r182413444
self._check('aab,bc->ac', (2, 2, 3), (3, 4))
self._check('aab,bcc->ac', (2, 2, 3), (3, 4, 4))
def test_binary_ellipsis(self):
# Batch matmul with ellipsis but without broadcasting.
self._check('...mk,...kn->...mn', (5, 1, 2, 3), (5, 1, 3, 4))
# Empty batch dimensions.
self._check('...mk,...kn->...mn', (2, 3), (3, 4))
# Tensor contraction with transpose.
self._check('...ija,aijb...->ba...ij', (1, 2, 2, 3, 1), (1, 2, 3, 4, 1, 2))
# Output subscripts may omit ellipsis when batch shape is empty.
self._check('...mk,...kn->mn', (2, 3), (3, 4))
self._check('...mk,kn->mn', (2, 3), (3, 4))
self._check('mk,...kn->mn', (2, 3), (3, 4))
self._check('...,...->...', (2, 3), (2, 3)) # hadamard product
self._check('...i,...j->...ij', (5, 2), (5, 3)) # outer product
def test_broadcasting(self):
# Batch matmul with broadcasting.
self._check('...ij,...jk->...ik', (1, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (1, 3, 5))
self._check('...ij,...jk->...ik', (5, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (5, 3, 5))
self._check('...ij,...jk->...ik', (3, 1, 2, 3), (1, 1, 7, 3, 5))
self._check('i...j,j...k->...ik', (2, 1, 3, 1, 3), (3, 1, 7, 5))
# Broadcasting with repeated indices.
self._check('ij,jk...k->i...', (3, 2), (2, 4, 1, 4))
self._check('ij,jk...k->...i', (3, 2), (2, 4, 5, 4))
self._check('ijj,jk...k->i...', (3, 2, 2), (2, 4, 1, 4))
self._check('i...jj,jk...k->i...', (3, 3, 1, 2, 2), (2, 4, 1, 5, 4))
# Following 2 from https://stackoverflow.com/a/19203475/1611416
self._check('...abc,...abcd->...d', (1, 1, 2, 3, 4), (5, 2, 3, 4, 6))
self._check('ab...,b->ab...', (2, 3, 1, 1, 5), (3,))
def test_dtypes(self):
dtypes = [np.float64, np.float32, np.complex64, np.complex128]
for dtype in dtypes:
self._check('ij,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ij,jk->ki', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ki', (2, 2), (2, 2), dtype=dtype)
def test_multiple_inputs(self):
self._check('ijk,ijl,ikl->i', (1, 2, 3), (1, 2, 4), (1, 3, 4))
self._check('i,ijk,j->k', (1,), (1, 2, 4), (2,))
self._check('ij,ij,jk,kl->il', (1, 2), (1, 2), (2, 3), (3, 4))
# Tests from dask.
self._check('a,b,c', (5,), (7,), (9,))
self._check('ab,ab,c->c', (5, 6), (5, 6), (2,))
@test_util.disable_xla('b/131919749')
def test_placeholder(self):
def check(equation, *input_and_placeholder_shapes):
r = np.random.RandomState(0)
inputs = []
input_placeholders = []
for actual_shape, placeholder_shape in input_and_placeholder_shapes:
input_np = np.array(r.randn(*actual_shape))
inputs.append(input_np)
input_placeholders.append(
array_ops.placeholder_with_default(input_np, placeholder_shape))
a = np.einsum(equation, *inputs)
b = self.evaluate(special_math_ops.einsum(equation, *input_placeholders))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
check('bijl,bjkm->bik', ((9, 2, 3, 5), (None, None, None, 5)),
((9, 3, 4, 7), (None, None, 4, None)))
check('...ij,...->...i', ((4, 3, 1, 2), (None, 3, None, 2)),
((4, 3), (None, 3)))
# Ellipsis with unknown rank.
check('bijl,bjkm->bik', ((9, 2, 3, 5), None), ((9, 3, 4, 7), None))
check('...ij,...jk->...ik', ((3, 1, 2, 3), None), ((1, 7, 3, 4), None))
def test_numpy_input(self):
# In addition to Tensors, we also support raw numpy arrays as inputs.
r = np.random.RandomState(0)
s = 'ijk,ijl,ikl->i'
x = r.randn(1, 2, 3)
y = r.randn(1, 2, 4)
z = r.randn(1, 3, 4)
a = np.einsum(s, x, y, z)
b = self.evaluate(special_math_ops.einsum(s, x, y, z))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
def test_long_cases(self):
cases = [
'efc,dbc,acf,fd->abe',
'ea,fb,gc,hd,abcd->efgh',
'abhe,hidj,jgba,hiab,gab->ed',
# Cases with whitespace.
'efc, dbc, acf, fd -> abe',
'abhe, hidj, jgba, hiab, gab',
# Repeated equations for cache hit on the opt_einsum call.
'ea,fb,abcd,gc,hd->efgh',
'ea,fb,abcd,gc,hd->efgh',
]
dimension_map = dict((c, ord(c) - ord('a') + 1) for c in 'abcdefghij')
for equation in cases:
inputs = equation.split('->')[0].replace(' ', '')
input_shapes = []
for input_str in inputs.split(','):
input_shapes.append(tuple([dimension_map[c] for c in input_str]))
self._check(equation, *input_shapes)
def test_opt_einsum_cached(self):
# Checks call_count to opt_einsum which are only reflected in eager mode.
if not context.executing_eagerly():
return
input_1 = ('ijk,ijl,ikl->i', (1, 2, 3), (1, 2, 4), (1, 3, 4))
input_2 = ('ij,ij,jk,kl->il', (1, 2), (1, 2), (2, 3), (3, 4))
with test.mock.patch.object(
opt_einsum, 'contract_path',
wraps=opt_einsum.contract_path) as mock_contract_path:
# explicitly clear the lru_cache contents for the method
# special_math_ops.get_opt_einsum_contract_path
# We need to do this because other tests in this file invoke that method
# with the same input args (as input_1 and input_2 above), and if
# those tests run before this test, then the call_count for the method
# mock_contract_path will not increment.
if not six.PY2:
special_math_ops._get_opt_einsum_contract_path.cache_clear()
self.assertEqual(mock_contract_path.call_count, 0)
self._check(*input_1)
self.assertEqual(mock_contract_path.call_count, 1)
# The same input results in no extra call if we're caching the
# opt_einsum.contract_path call. We only cache in Python3.
self._check(*input_1)
self.assertEqual(mock_contract_path.call_count, 2 if six.PY2 else 1)
# New input results in another call to opt_einsum.
self._check(*input_2)
self.assertEqual(mock_contract_path.call_count, 3 if six.PY2 else 2)
# No more extra calls as the inputs should be cached.
self._check(*input_1)
self._check(*input_2)
self._check(*input_1)
self.assertEqual(mock_contract_path.call_count, 6 if six.PY2 else 2)
@test_util.disable_xla('b/131919749')
def test_long_cases_with_repeated_labels(self):
cases = [
# Tests from dask.
'fdf,cdd,ccd,afe->ae',
'fff,fae,bef,def->abd',
]
dimension_map = dict((c, ord(c) - ord('a') + 1) for c in 'abcdefghij')
for equation in cases:
inputs = equation.split('->')[0].replace(' ', '')
input_shapes = []
for input_str in inputs.split(','):
input_shapes.append(tuple([dimension_map[c] for c in input_str]))
self._check(equation, *input_shapes)
@test_util.disable_xla('b/131919749')
@test_util.run_in_graph_and_eager_modes
def test_invalid_equation(self):
r = np.random.RandomState(0)
cases = [
# invalid equation format.
('a0->a', r.randn(5, 3)),
('a->a,a', r.randn(5)),
('a->a->a', r.randn(5)),
('ijk ijk', r.randn(1, 2, 3), r.randn(1, 2, 3)),
('ij.jk->ik', r.randn(2, 3), r.randn(3, 4)),
# output label not present in input.
('a->b', r.randn(5)),
('ij,jk->im', r.randn(2, 3), r.randn(3, 4)),
# wrong shape.
('ij,jk->ik', r.randn(1, 2, 3), r.randn(3, 4)),
# inconsistent dimensions.
('ij,jk->ik', r.randn(2, 3), r.randn(4, 4)),
# output has repeated subscripts.
('ij,jk->iik', r.randn(2, 3), r.randn(3, 4)),
# too many ellipses
('...ij...,jk...->ik...', r.randn(2, 3), r.randn(3, 4)),
('...ij,jk...->...ik...', r.randn(2, 3), r.randn(3, 4)),
# invalid broadcast dimensions.
('...ij,...jk->...ik', r.randn(5, 2, 3), r.randn(7, 3, 4)),
# output should have ellipsis when broadcasting shape is non-empty.
('...ij,...jk->ik', r.randn(2, 2, 3), r.randn(3, 4)),
]
for args in cases:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = special_math_ops.einsum(*args)
placeholders = [
array_ops.placeholder_with_default(x, shape=None) for x in args[1:]
]
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = self.evaluate(special_math_ops.einsum(args[0], *placeholders))
@test_util.disable_xla('b/131919749')
def test_empty(self):
def check(equation, input_shapes, output_shape):
# All these cases result in an output filled with zeros, so we don't call
# np.einsum. Also np.einsum doesn't support generalized diagonals which
# are needed for EinsumOp gradients.
r = np.random.RandomState(0)
inputs = [np.array(r.randn(*shape)) for shape in input_shapes]
input_tensors = [constant_op.constant(x, shape=x.shape) for x in inputs]
output = self.evaluate(special_math_ops.einsum(equation, *input_tensors))
self.assertAllClose(output, np.zeros(output_shape), atol=1e-4, rtol=1e-4)
# Contractions along zero-sized dimensions.
check('ab,bc->ac', [(0, 10), (10, 10)], (0, 10))
# From transformer xl.
check('ibnd,ijbn->jnd', [(1, 0, 5, 10), (1, 1, 0, 5)], (1, 5, 10))
# Generalized traces with zero-sized dimensions.
check('aab,bc->ac', [(0, 0, 10), (10, 10)], (0, 10))
check('aaab,bc->c', [(0, 0, 0, 3), (3, 4)], (4,))
@test_util.run_all_in_graph_and_eager_modes
class EinsumGradTest(test.TestCase):
def _check_gradient(self, s, *input_shapes):
with self.cached_session():
r = np.random.RandomState(0)
inputs = [np.array(r.randn(*shape)) for shape in input_shapes]
input_tensors = [constant_op.constant(x, shape=x.shape) for x in inputs]
analytical, numerical = gradient_checker_v2.compute_gradient(
lambda *xs: special_math_ops.einsum(s, *xs), input_tensors)
self.assertLess(
gradient_checker_v2.max_error(analytical, numerical), 1e-4)
@test_util.disable_xla('b/131919749')
def test_unary(self):
self._check_gradient('->', ())
self._check_gradient('aaa->a', (3, 3, 3))
self._check_gradient('aabcd->ad', (3, 3, 5, 4, 4))
self._check_gradient('abcd->da', (3, 5, 4, 2))
@test_util.disable_xla('b/131919749')
def test_unary_ellipsis(self):
self._check_gradient('...->...', ())
self._check_gradient('...->', ())
self._check_gradient('->...', ())
# Tests from dask
self._check_gradient('a...a->a...', (2, 2))
self._check_gradient('a...a->', (2, 2))
self._check_gradient('a...a->...', (2, 5, 1, 2))
self._check_gradient('a...a->a...', (2, 1, 2))
self._check_gradient('a...a->a...', (2, 3, 4, 5, 2))
self._check_gradient('...ijk->...ki', (3, 4, 5))
self._check_gradient('...ijk->...ki', (1, 3, 4, 5))
self._check_gradient('...ijk->...ki', (2, 2, 3, 4, 5))
self._check_gradient('ab...cd->da...', (3, 5, 2, 3, 4, 2))
def test_binary_simple(self):
# Binary cases in XLA mode must have either (a) each index appearing
# exactly once in both the inputs (batch or contraction index), or
# (b) appearing exactly once in an input and in the output (free index).
self._check_gradient(',->', (), ())
self._check_gradient('a,a->', (3,), (3,))
self._check_gradient('a,a->a', (3,), (3,))
self._check_gradient('ab,b->a', (3, 4), (4,))
self._check_gradient('ab,ab->', (3, 4), (3, 4))
self._check_gradient('ab,bc->ac', (3, 4), (4, 5))
self._check_gradient('nij,jk->nik', (5, 2, 3), (3, 4))
self._check_gradient('abc,bad->abcd', (1, 2, 3), (2, 1, 4))
# Based on https://github.com/google/jax/issues/37#issuecomment-448572187
self._check_gradient('sa,shb->shab', (2, 1), (2, 3, 4))
def test_empty(self):
# From Transformer XL.
self._check_gradient('ibnd,ijbn->jnd', (1, 0, 5, 10), (1, 1, 0, 5))
@test_util.disable_xla('b/131919749')
def test_reduced_indices(self):
self._check_gradient('ba,b->', (3, 2), (3,))
self._check_gradient('ab,ab->', (3, 4), (3, 4))
self._check_gradient('abce,badf->abcd', (1, 2, 3, 4), (2, 1, 4, 3))
@test_util.disable_xla('b/131919749')
def test_repeated_indices(self):
# Repeated indices.
self._check_gradient('aba,a->b', (3, 4, 3), (3,))
self._check_gradient('ijj,k->ik', (2, 3, 3), (4,))
self._check_gradient('ill,k->ik', (2, 3, 3), (4,))
# From https://github.com/dask/dask/pull/3412#discussion_r182413444
self._check_gradient('aab,bc->ac', (1, 1, 3), (3, 4))
self._check_gradient('aab,bcc->ac', (2, 2, 3), (3, 4, 4))
@test_util.disable_xla('b/131919749')
def test_empty_with_repeated_indices(self):
self._check_gradient('aab,bc->ac', (0, 0, 10), (10, 10))
self._check_gradient('aab,bc->ac', (1, 1, 0), (0, 10))
self._check_gradient('aaab,bc->c', (0, 0, 0, 3), (3, 4))
@test_util.disable_xla('b/131919749')
def test_broadcasting(self):
self._check_gradient('...ij,...jk->...ik', (3, 2), (2, 4))
self._check_gradient('ij...,jk...->ik...', (3, 2, 1), (2, 4))
self._check_gradient('...ij,...jk->...ik', (3, 1, 3, 2), (1, 5, 2, 4))
self._check_gradient('ij,jk...k->i...', (3, 2), (2, 4, 1, 4))
self._check_gradient('aab,b...c->a...c', (1, 1, 3), (3, 1, 1, 4))
# Tests from dask.
self._check_gradient('...i,...j,...k->...ijk', (1, 4, 1, 2), (5, 1, 1, 3),
(1, 1, 1, 1, 9))
self._check_gradient('...i,...j,...k->...ijk', (1,), (1,), (1,))
def test_long_cases(self):
cases = [
'abhe,hidj,jgba,hiab,gab->ed',
# Tests from dask.
'ea,fb,abcd,gc,hd->efgh',
]
dimension_map = dict(
(c, ((ord(c) - ord('a')) % 3) + 1) for c in 'abcdefghij')
for equation in cases:
inputs = equation.split('->')[0].replace(' ', '')
input_shapes = []
for input_str in inputs.split(','):
input_shapes.append(tuple([dimension_map[c] for c in input_str]))
self._check_gradient(equation, *input_shapes)
@test_util.disable_xla('b/131919749')
def test_long_cases_with_repeated_labels(self):
cases = [
# Tests from dask.
'fdf,cdd,ccd,afe->ae',
'fff,fae,bef,def->abd',
]
dimension_map = dict(
(c, ((ord(c) - ord('a')) % 3) + 1) for c in 'abcdefghij')
for equation in cases:
inputs = equation.split('->')[0].replace(' ', '')
input_shapes = []
for input_str in inputs.split(','):
input_shapes.append(tuple([dimension_map[c] for c in input_str]))
self._check_gradient(equation, *input_shapes)
class EinsumBenchmark(test.Benchmark):
cases = [
# Unary cases.
['ijk->i', 100],
['ijk->kji', 100],
# Regular matmul or batch matmul.
['ij,jk->ik', 500],
['ji,kj->ik', 500],
['bij,bjk->bik', 100],
['bji,bjk->bki', 100],
['ikl,kji->kl', 100],
['klj,lki->ij', 100],
['ijk,ilj->kli', 100],
['ijk,jklm->il', 50],
# Larger binary contractions.
['efabc,eabcd->efd', 20],
['fabec,abcde->fde', 20],
['efabc,edabc->efd', 20],
['eadbf,dfebc->ecfad', 20],
['abcdef,bcdfg->abcdeg', 20],
# Chain matmul.
['ij,jk,kl->il', 1000],
# Long cases. Path optimization should kick in.
['ea,fb,abcd,gc,hd->efgh', 10],
['bca,cdb,dbf,afc->', 10],
['efc,dbc,acf,fd->abe', 10],
['abhe,hidj,jgba,hiab,gab->ed', 10],
]
def benchmark_einsum(self):
for equation, dim in self.cases:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device('/cpu:0'):
r = np.random.RandomState(0)
input_subscripts = equation.split('->')[0].split(',')
input_vars = []
for subscript in input_subscripts:
input_shape = (dim,) * len(subscript)
input_vars.append(
variables.Variable(np.array(r.randn(*input_shape), np.float32)))
self.evaluate(variables.global_variables_initializer())
if len(input_vars) <= 2:
self.run_op_benchmark(
sess,
special_math_ops.einsum(equation, *input_vars),
min_iters=50,
name='einsum_cpu_({})_{}'.format(equation, dim))
else:
for optimize in ['greedy', 'auto']:
self.run_op_benchmark(
sess,
special_math_ops.einsum(
equation, *input_vars, optimize=optimize),
min_iters=50,
name='einsum_cpu_({})_{}_{}'.format(equation, optimize, dim))
if __name__ == '__main__':
test.main()
|
|
from __future__ import unicode_literals
from django.db import models
class Person(models.Model):
person_id = models.BigIntegerField(primary_key=True)
gender_concept = models.ForeignKey('Gender', null=True, blank=True)
year_of_birth = models.DecimalField(max_digits=4, decimal_places=0)
month_of_birth = models.DecimalField(null=True, max_digits=2, decimal_places=0, blank=True)
day_of_birth = models.DecimalField(null=True, max_digits=2, decimal_places=0, blank=True)
race_concept = models.ForeignKey('Race', null=True, blank=True)
ethnicity_concept = models.ForeignKey('Ethnicity', null=True, blank=True)
location = models.ForeignKey('PersonLocation', null=True, blank=True)
provider = models.ForeignKey('Provider', null=True, blank=True)
care_site = models.ForeignKey('CareSite', null=True, blank=True)
person_source_value = models.CharField(max_length=50, blank=True)
gender_source_value = models.CharField(max_length=50, blank=True)
race_source_value = models.CharField(max_length=50, blank=True)
ethnicity_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'person'
class Gender(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'gender'
class Race(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'race'
class Ethnicity(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'ethnicity'
class PersonLocation(models.Model):
location_id = models.BigIntegerField(primary_key=True)
address_1 = models.CharField(max_length=50, blank=True)
address_2 = models.CharField(max_length=50, blank=True)
city = models.CharField(max_length=50, blank=True)
state = models.CharField(max_length=2, blank=True)
zip = models.CharField(max_length=9, blank=True)
county = models.CharField(max_length=20, blank=True)
location_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'person_location'
class Provider(models.Model):
provider_id = models.BigIntegerField(primary_key=True)
npi = models.CharField(max_length=20, blank=True)
dea = models.CharField(max_length=20, blank=True)
specialty_concept = models.ForeignKey('ProviderSpecialty', null=True, blank=True)
care_site_id = models.BigIntegerField(null=True, blank=True)
provider_source_value = models.CharField(max_length=50)
specialty_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'provider'
class ProviderSpecialty(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'provider_specialty'
class CareSite(models.Model):
care_site_id = models.BigIntegerField(primary_key=True)
location = models.ForeignKey('CareSiteLocation', null=True, blank=True)
organization = models.ForeignKey('Organization', null=True, blank=True)
place_of_service_concept = models.ForeignKey('CareSitePOS', null=True, blank=True)
care_site_source_value = models.CharField(max_length=50, blank=True)
place_of_service_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'care_site'
class CareSiteLocation(models.Model):
location_id = models.BigIntegerField(primary_key=True)
address_1 = models.CharField(max_length=50, blank=True)
address_2 = models.CharField(max_length=50, blank=True)
city = models.CharField(max_length=50, blank=True)
state = models.CharField(max_length=2, blank=True)
zip = models.CharField(max_length=9, blank=True)
county = models.CharField(max_length=20, blank=True)
location_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'care_site_location'
class Organization(models.Model):
organization_id = models.BigIntegerField(primary_key=True)
place_of_service_concept_id = models.IntegerField(null=True, blank=True)
location_id = models.BigIntegerField(null=True, blank=True)
organization_source_value = models.CharField(max_length=50, blank=True)
place_of_service_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'organization'
class CareSitePOS(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'care_site_pos'
class Visit(models.Model):
visit_occurrence_id = models.BigIntegerField(primary_key=True)
person = models.ForeignKey('Person')
visit_start_date = models.DateTimeField()
visit_end_date = models.DateTimeField()
place_of_service_concept_id = models.IntegerField()
care_site_id = models.BigIntegerField(null=True, blank=True)
place_of_service_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'visit_occurrence'
class VisitCondition(models.Model):
condition_occurrence_id = models.BigIntegerField(primary_key=True)
person_id = models.BigIntegerField()
condition_concept = models.ForeignKey('VisitConditionConcept')
condition_start_date = models.DateTimeField()
condition_end_date = models.DateTimeField(null=True, blank=True)
condition_type_concept = models.ForeignKey('VisitConditionType')
stop_reason = models.CharField(max_length=20, blank=True)
associated_provider_id = models.BigIntegerField(null=True, blank=True)
visit_occurrence = models.ForeignKey('Visit', null=True, blank=True)
condition_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'visit_condition'
class VisitConditionConcept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_condition_concept'
class VisitConditionType(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_condition_type'
class VisitProcedure(models.Model):
procedure_occurrence_id = models.BigIntegerField(primary_key=True)
person_id = models.BigIntegerField()
procedure_concept = models.ForeignKey('VisitProcedureConcept')
procedure_date = models.DateTimeField()
procedure_type_concept = models.ForeignKey('VisitProcedureType')
associated_provider_id = models.BigIntegerField(null=True, blank=True)
visit_occurrence = models.ForeignKey('Visit', null=True, blank=True)
relevant_condition_concept_id = models.IntegerField(null=True, blank=True)
procedure_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'visit_procedure'
class VisitProcedureConcept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_procedure_concept'
class VisitProcedureType(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_procedure_type'
class VisitDrug(models.Model):
drug_exposure_id = models.BigIntegerField(primary_key=True)
person_id = models.BigIntegerField()
drug_concept = models.ForeignKey('VisitDrugConcept')
drug_exposure_start_date = models.DateTimeField()
drug_exposure_end_date = models.DateTimeField(null=True, blank=True)
drug_type_concept = models.ForeignKey('VisitDrugType')
stop_reason = models.CharField(max_length=20, blank=True)
refills = models.DecimalField(null=True, max_digits=3, decimal_places=0, blank=True)
quantity = models.DecimalField(null=True, max_digits=4, decimal_places=0, blank=True)
days_supply = models.DecimalField(null=True, max_digits=4, decimal_places=0, blank=True)
sig = models.CharField(max_length=500, blank=True)
prescribing_provider_id = models.BigIntegerField(null=True, blank=True)
visit_occurrence = models.ForeignKey('Visit', null=True, blank=True)
relevant_condition_concept_id = models.IntegerField(null=True, blank=True)
drug_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'visit_drug'
class VisitDrugConcept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_drug_concept'
class VisitDrugType(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_drug_type'
class VisitObservation(models.Model):
observation_id = models.BigIntegerField(primary_key=True)
person_id = models.BigIntegerField()
observation_concept = models.ForeignKey('VisitObservationConcept')
observation_date = models.DateField()
observation_time = models.TimeField(null=True, blank=True)
value_as_number = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
value_as_string = models.CharField(max_length=60, blank=True)
value_as_concept_id = models.IntegerField(null=True, blank=True)
unit_concept_id = models.IntegerField(null=True, blank=True)
range_low = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
range_high = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
observation_type_concept = models.ForeignKey('VisitObservationType')
associated_provider_id = models.BigIntegerField()
visit_occurrence = models.ForeignKey('Visit', null=True, blank=True)
relevant_condition_concept_id = models.IntegerField(null=True, blank=True)
observation_source_value = models.CharField(max_length=50, blank=True)
units_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'visit_observation'
class VisitObservationConcept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_observation_concept'
class VisitObservationType(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'visit_observation_type'
class PersonCondition(models.Model):
condition_occurrence_id = models.BigIntegerField(primary_key=True)
person = models.ForeignKey('Person')
condition_concept = models.ForeignKey('PersonConditionConcept')
condition_start_date = models.DateTimeField()
condition_end_date = models.DateTimeField(null=True, blank=True)
condition_type_concept = models.ForeignKey('PersonConditionType')
stop_reason = models.CharField(max_length=20, blank=True)
associated_provider_id = models.BigIntegerField(null=True, blank=True)
visit_occurrence_id = models.BigIntegerField(null=True, blank=True)
condition_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'person_condition'
class PersonConditionConcept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'person_condition_concept'
class PersonConditionType(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'person_condition_type'
class PersonObservation(models.Model):
observation_id = models.BigIntegerField(primary_key=True)
person = models.ForeignKey('Person')
observation_concept = models.ForeignKey('PersonObservationConcept')
observation_date = models.DateField()
observation_time = models.TimeField(null=True, blank=True)
value_as_number = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
value_as_string = models.CharField(max_length=60, blank=True)
value_as_concept_id = models.IntegerField(null=True, blank=True)
unit_concept_id = models.IntegerField(null=True, blank=True)
range_low = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
range_high = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
observation_type_concept = models.ForeignKey('PersonObservationType')
associated_provider_id = models.BigIntegerField()
visit_occurrence_id = models.BigIntegerField(null=True, blank=True)
relevant_condition_concept_id = models.IntegerField(null=True, blank=True)
observation_source_value = models.CharField(max_length=50, blank=True)
units_source_value = models.CharField(max_length=50, blank=True)
class Meta:
db_table = 'person_observation'
class PersonObservationConcept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'person_observation_concept'
class PersonObservationType(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=256)
concept_level = models.IntegerField()
concept_class = models.CharField(max_length=60)
vocabulary_id = models.IntegerField()
concept_code = models.CharField(max_length=40)
valid_start_date = models.DateTimeField()
valid_end_date = models.DateTimeField()
invalid_reason = models.CharField(max_length=1, blank=True)
class Meta:
db_table = 'person_observation_type'
#class Observation(models.Model):
# observation_id = models.IntegerField(primary_key=True)
# person_id = models.ForeignKey('Person')
# observation_concept_id = models.IntegerField()
# observation_date = models.DateField()
# observation_time = models.TimeField(null=True, blank=True)
# value_as_number = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
# value_as_string = models.CharField(max_length=60, blank=True)
# value_as_concept_id = models.IntegerField(null=True, blank=True)
# unit_concept_id = models.IntegerField(null=True, blank=True)
# range_low = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
# range_high = models.DecimalField(null=True, max_digits=14, decimal_places=3, blank=True)
# observation_type_concept_id = models.IntegerField()
# associated_provider = models.ForeignKey('Provider')
# visit_occurrence = models.ForeignKey('VisitOccurrence', null=True, blank=True)
# relevant_condition_concept_id = models.IntegerField(null=True, blank=True)
# observation_source_value = models.CharField(max_length=50, blank=True)
# units_source_value = models.CharField(max_length=50, blank=True)
# class Meta:
# db_table = 'observation'
#
#class ObservationPeriod(models.Model):
# observation_period_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person')
# observation_period_start_date = models.DateTimeField()
# observation_period_end_date = models.DateTimeField()
# class Meta:
# db_table = 'observation_period'
#
#class PayerPlanPeriod(models.Model):
# payer_plan_period_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person')
# payer_plan_period_start_date = models.DateTimeField()
# payer_plan_period_end_date = models.DateTimeField()
# payer_source_value = models.CharField(max_length=50, blank=True)
# plan_source_value = models.CharField(max_length=50, blank=True)
# family_source_value = models.CharField(max_length=50, blank=True)
# class Meta:
# db_table = 'payer_plan_period'
#
#class Cohort(models.Model):
# cohort_id = models.IntegerField(primary_key=True)
# cohort_concept = models.ForeignKey('CohortConcept')
# cohort_start_date = models.DateTimeField()
# cohort_end_date = models.DateTimeField(null=True, blank=True)
# subject = models.ForeignKey('Person')
# stop_reason = models.CharField(max_length=20, blank=True)
# class Meta:
# db_table = 'cohort'
#
#class CohortConcept(models.Model):
# code = models.IntegerField(primary_key=True, db_column='concept_id')
# label = models.CharField(max_length=256, db_column='concept_name')
# concept_level = models.IntegerField()
# concept_class = models.CharField(max_length=60)
# vocabulary_id = models.IntegerField()
# value = models.CharField(max_length=40, db_column='concept_code')
# valid_start_date = models.DateTimeField()
# valid_end_date = models.DateTimeField()
# invalid_reason = models.CharField(max_length=1, blank=True)
# class Meta:
# db_table = 'cohort_concept'
#
#class ConditionEra(models.Model):
# condition_era_id = models.IntegerField()
# person = models.ForeignKey('Person')
# condition_concept = models.ForeignKey('ConditionEraConcept')
# condition_era_start_date = models.DateTimeField()
# condition_era_end_date = models.DateTimeField()
# condition_type_concept = models.ForeignKey('ConditionEraType')
# condition_occurrence_count = models.DecimalField(null=True, max_digits=4, decimal_places=0, blank=True)
# class Meta:
# db_table = 'condition_era'
#
#class ConditionEraConcept(models.Model):
# code = models.IntegerField(primary_key=True, db_column='concept_id')
# label = models.CharField(max_length=256, db_column='concept_name')
# concept_level = models.IntegerField()
# concept_class = models.CharField(max_length=60)
# vocabulary_id = models.IntegerField()
# value = models.CharField(max_length=40, db_column='concept_code')
# valid_start_date = models.DateTimeField()
# valid_end_date = models.DateTimeField()
# invalid_reason = models.CharField(max_length=1, blank=True)
# class Meta:
# db_table = 'condition_era_concept'
#
#class ConditionEraType(models.Model):
# code = models.IntegerField(primary_key=True, db_column='concept_id')
# label = models.CharField(max_length=256, db_column='concept_name')
# concept_level = models.IntegerField()
# concept_class = models.CharField(max_length=60)
# vocabulary_id = models.IntegerField()
# value = models.CharField(max_length=40, db_column='concept_code')
# valid_start_date = models.DateTimeField()
# valid_end_date = models.DateTimeField()
# invalid_reason = models.CharField(max_length=1, blank=True)
# class Meta:
# db_table = 'condition_era_type'
#
#class ProcedureCost(models.Model):
# procedure_cost_id = models.IntegerField(primary_key=True)
# procedure_occurrence = models.ForeignKey('ProcedureOccurrence')
# paid_copay = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_coinsurance = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_toward_deductible = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_by_payer = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_by_coordination_benefits = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# total_out_of_pocket = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# total_paid = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# disease_class_concept_id = models.IntegerField(null=True, blank=True)
# revenue_code_concept_id = models.IntegerField(null=True, blank=True)
# payer_plan_period_id = models.IntegerField(null=True, blank=True)
# disease_class_source_value = models.CharField(max_length=50, blank=True)
# revenue_code_source_value = models.CharField(max_length=50, blank=True)
# class Meta:
# db_table = 'procedure_cost'
#
#class Death(models.Model):
# person_death_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person')
# death_date = models.DateTimeField()
# death_type_concept_id = models.IntegerField()
# cause_of_death_concept_id = models.IntegerField(null=True, blank=True)
# cause_of_death_source_value = models.CharField(max_length=50, blank=True)
# class Meta:
# db_table = 'death'
#
#class DrugApproval(models.Model):
# ingredient_concept_id = models.IntegerField(primary_key=True)
# approval_date = models.DateTimeField()
# approved_by = models.CharField(max_length=20)
# class Meta:
# db_table = 'drug_approval'
#
#class DrugCost(models.Model):
# drug_cost_id = models.IntegerField(primary_key=True)
# drug_exposure = models.ForeignKey('DrugExposure')
# paid_copay = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_coinsurance = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_toward_deductible = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_by_payer = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# paid_by_coordination_benefits = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# total_out_of_pocket = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# total_paid = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# ingredient_cost = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# dispensing_fee = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# average_wholesale_price = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
# payer_plan_period = models.ForeignKey('PayerPlanPeriod', null=True, blank=True)
# class Meta:
# db_table = 'drug_cost'
#
#class DrugEra(models.Model):
# drug_era_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person')
# drug_concept_id = models.IntegerField()
# drug_era_start_date = models.DateTimeField()
# drug_era_end_date = models.DateTimeField()
# drug_type_concept_id = models.IntegerField()
# drug_exposure_count = models.DecimalField(null=True, max_digits=4, decimal_places=0, blank=True)
# class Meta:
# db_table = 'drug_era'
#
#class DrugStrength(models.Model):
# drug_strength_id = models.IntegerField(primary_key=True)
# drug_concept_id = models.IntegerField()
# ingredient_concept_id = models.IntegerField()
# amount_value = models.DecimalField(null=True, max_digits=50, decimal_places=0, blank=True)
# amount_unit = models.CharField(max_length=60, blank=True)
# concentration_value = models.DecimalField(null=True, max_digits=50, decimal_places=0, blank=True)
# concentration_enum_unit = models.CharField(max_length=60, blank=True)
# concentration_denom_unit = models.CharField(max_length=60, blank=True)
# valid_start_date = models.DateTimeField()
# valid_end_date = models.DateTimeField()
# invalid_reason = models.CharField(max_length=1, blank=True)
# class Meta:
# db_table = 'drug_strength'
#
#class Relationship(models.Model):
# relationship_id = models.IntegerField(primary_key=True)
# relationship_name = models.CharField(max_length=256)
# is_hierarchical = models.IntegerField()
# defines_ancestry = models.IntegerField()
# reverse_relationship = models.IntegerField(null=True, blank=True)
# class Meta:
# db_table = 'relationship'
#
#class SourceToConceptMap(models.Model):
# source_to_concept_map_id = models.IntegerField(primary_key=True)
# source_code = models.CharField(max_length=40)
# source_vocabulary = models.ForeignKey('Vocabulary', related_name='sourcetoconceptmap_targets_set')
# source_code_description = models.CharField(max_length=256, blank=True)
# target_concept = models.ForeignKey(Concept)
# target_vocabulary = models.ForeignKey('Vocabulary', related_name='sourcetoconceptmap_sources_set')
# mapping_type = models.CharField(max_length=20, blank=True)
# primary_map = models.CharField(max_length=1, blank=True)
# valid_start_date = models.DateTimeField()
# valid_end_date = models.DateTimeField()
# invalid_reason = models.CharField(max_length=1, blank=True)
# class Meta:
# db_table = 'source_to_concept_map'
#
#class ConceptAncestor(models.Model):
# ancestry_id = models.IntegerField(primary_key=True)
# ancestor_concept = models.ForeignKey('Concept', related_name='conceptancestor_descendents_set')
# descendant_concept = models.ForeignKey('Concept', related_name='conceptancestor_ancestors_set')
# max_levels_of_separation = models.IntegerField(null=True, blank=True)
# min_levels_of_separation = models.IntegerField(null=True, blank=True)
# class Meta:
# db_table = 'concept_ancestor'
#
#class ConceptRelationship(models.Model):
# concept_relationship_id = models.IntegerField(primary_key=True)
# concept_id_1 = models.ForeignKey('Concept', db_column='concept_id_1', related_name='conceptrelationship_to_set')
# concept_id_2 = models.ForeignKey('Concept', db_column='concept_id_2', related_name='conceptrelationship_from_set')
# relationship = models.ForeignKey('Relationship')
# valid_start_date = models.DateTimeField()
# valid_end_date = models.DateTimeField()
# invalid_reason = models.CharField(max_length=1, blank=True)
# class Meta:
# db_table = 'concept_relationship'
#
#class ConceptSynonym(models.Model):
# concept_synonym_id = models.IntegerField(primary_key=True)
# concept = models.ForeignKey('Concept')
# concept_synonym_name = models.CharField(max_length=1000)
# class Meta:
# db_table = 'concept_synonym'
#
#class CareSite(models.Model):
# care_site_id = models.IntegerField(primary_key=True)
# location = models.ForeignKey('Location', null=True, blank=True)
# organization = models.ForeignKey('Organization', null=True, blank=True)
# place_of_service_concept_id = models.IntegerField(null=True, blank=True)
# care_site_source_value = models.CharField(max_length=50, blank=True)
# place_of_service_source_value = models.CharField(max_length=50, blank=True)
# class Meta:
# db_table = 'care_site'
#
#class Concept(models.Model):
# concept_id = models.IntegerField(primary_key=True)
# concept_name = models.CharField(max_length=256)
# concept_level = models.IntegerField()
# concept_class = models.CharField(max_length=60)
# vocabulary = models.ForeignKey('Vocabulary')
# concept_code = models.CharField(max_length=40)
# valid_start_date = models.DateTimeField()
# valid_end_date = models.DateTimeField()
# invalid_reason = models.CharField(max_length=1, blank=True)
# class Meta:
# db_table = 'concept'
#
#class Organization(models.Model):
# organization_id = models.IntegerField(primary_key=True)
# place_of_service_concept_id = models.IntegerField(null=True, blank=True)
# location = models.ForeignKey('Location', null=True, blank=True)
# organization_source_value = models.CharField(max_length=50, blank=True)
# place_of_service_source_value = models.CharField(max_length=50, blank=True)
# class Meta:
# db_table = 'organization'
#
#class Location(models.Model):
# location_id = models.IntegerField(primary_key=True)
# address_1 = models.CharField(max_length=50, blank=True)
# address_2 = models.CharField(max_length=50, blank=True)
# city = models.CharField(max_length=50, blank=True)
# state = models.CharField(max_length=2, blank=True)
# zip = models.CharField(max_length=9, blank=True)
# county = models.CharField(max_length=20, blank=True)
# location_source_value = models.CharField(max_length=50, blank=True)
# class Meta:
# db_table = 'location'
#
#class Vocabulary(models.Model):
# vocabulary_id = models.IntegerField(primary_key=True)
# vocabulary_name = models.CharField(max_length=256, unique=True)
# class Meta:
# db_table = 'vocabulary'
#
|
|
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mapping of bare metal node states.
Setting the node `power_state` is handled by the conductor's power
synchronization thread. Based on the power state retrieved from the driver
for the node, the state is set to POWER_ON or POWER_OFF, accordingly.
Should this fail, the `power_state` value is left unchanged, and the node
is placed into maintenance mode.
The `power_state` can also be set manually via the API. A failure to change
the state leaves the current state unchanged. The node is NOT placed into
maintenance mode in this case.
"""
from oslo_log import log as logging
from ironic.common import fsm
LOG = logging.getLogger(__name__)
#####################
# Provisioning states
#####################
# TODO(deva): add add'l state mappings here
VERBS = {
'active': 'deploy',
'deleted': 'delete',
'manage': 'manage',
'provide': 'provide',
'inspect': 'inspect',
'abort': 'abort',
'clean': 'clean',
}
""" Mapping of state-changing events that are PUT to the REST API
This is a mapping of target states which are PUT to the API, eg,
PUT /v1/node/states/provision {'target': 'active'}
The dict format is:
{target string used by the API: internal verb}
This provides a reference set of supported actions, and in the future
may be used to support renaming these actions.
"""
NOSTATE = None
""" No state information.
This state is used with power_state to represent a lack of knowledge of
power state, and in target_*_state fields when there is no target.
"""
ENROLL = 'enroll'
""" Node is enrolled.
This state indicates that Ironic is aware of a node, but is not managing it.
"""
VERIFYING = 'verifying'
""" Node power management credentials are being verified. """
MANAGEABLE = 'manageable'
""" Node is in a manageable state.
This state indicates that Ironic has verified, at least once, that it had
sufficient information to manage the hardware. While in this state, the node
is not available for provisioning (it must be in the AVAILABLE state for that).
"""
AVAILABLE = 'available'
""" Node is available for use and scheduling.
This state is replacing the NOSTATE state used prior to Kilo.
"""
ACTIVE = 'active'
""" Node is successfully deployed and associated with an instance. """
DEPLOYWAIT = 'wait call-back'
""" Node is waiting to be deployed.
This will be the node `provision_state` while the node is waiting for
the driver to finish deployment.
"""
DEPLOYING = 'deploying'
""" Node is ready to receive a deploy request, or is currently being deployed.
A node will have its `provision_state` set to DEPLOYING briefly before it
receives its initial deploy request. It will also move to this state from
DEPLOYWAIT after the callback is triggered and deployment is continued
(disk partitioning and image copying).
"""
DEPLOYFAIL = 'deploy failed'
""" Node deployment failed. """
DEPLOYDONE = 'deploy complete'
""" Node was successfully deployed.
This is mainly a target provision state used during deployment. A successfully
deployed node should go to ACTIVE status.
"""
DELETING = 'deleting'
""" Node is actively being torn down. """
DELETED = 'deleted'
""" Node tear down was successful.
In Juno, target_provision_state was set to this value during node tear down.
In Kilo, this will be a transitory value of provision_state, and never
represented in target_provision_state.
"""
CLEANING = 'cleaning'
""" Node is being automatically cleaned to prepare it for provisioning. """
CLEANWAIT = 'clean wait'
""" Node is waiting for a clean step to be finished.
This will be the node's `provision_state` while the node is waiting for
the driver to finish a cleaning step.
"""
CLEANFAIL = 'clean failed'
""" Node failed cleaning. This requires operator intervention to resolve. """
ERROR = 'error'
""" An error occurred during node processing.
The `last_error` attribute of the node details should contain an error message.
"""
REBUILD = 'rebuild'
""" Node is to be rebuilt.
This is not used as a state, but rather as a "verb" when changing the node's
provision_state via the REST API.
"""
INSPECTING = 'inspecting'
""" Node is under inspection.
This is the provision state used when inspection is started. A successfully
inspected node shall transition to MANAGEABLE status.
"""
INSPECTFAIL = 'inspect failed'
""" Node inspection failed. """
UPDATE_ALLOWED_STATES = (DEPLOYFAIL, INSPECTING, INSPECTFAIL, CLEANFAIL, ERROR,
VERIFYING)
"""Transitional states in which we allow updating a node."""
DELETE_ALLOWED_STATES = (AVAILABLE, NOSTATE, MANAGEABLE, ENROLL)
"""States in which node deletion is allowed."""
##############
# Power states
##############
POWER_ON = 'power on'
""" Node is powered on. """
POWER_OFF = 'power off'
""" Node is powered off. """
REBOOT = 'rebooting'
""" Node is rebooting. """
#####################
# State machine model
#####################
def on_exit(old_state, event):
"""Used to log when a state is exited."""
LOG.debug("Exiting old state '%s' in response to event '%s'",
old_state, event)
def on_enter(new_state, event):
"""Used to log when entering a state."""
LOG.debug("Entering new state '%s' in response to event '%s'",
new_state, event)
watchers = {}
watchers['on_exit'] = on_exit
watchers['on_enter'] = on_enter
machine = fsm.FSM()
# Add stable states
machine.add_state(ENROLL, stable=True, **watchers)
machine.add_state(MANAGEABLE, stable=True, **watchers)
machine.add_state(AVAILABLE, stable=True, **watchers)
machine.add_state(ACTIVE, stable=True, **watchers)
machine.add_state(ERROR, stable=True, **watchers)
# Add verifying state
machine.add_state(VERIFYING, target=MANAGEABLE, **watchers)
# Add deploy* states
# NOTE(deva): Juno shows a target_provision_state of DEPLOYDONE
# this is changed in Kilo to ACTIVE
machine.add_state(DEPLOYING, target=ACTIVE, **watchers)
machine.add_state(DEPLOYWAIT, target=ACTIVE, **watchers)
machine.add_state(DEPLOYFAIL, target=ACTIVE, **watchers)
# Add clean* states
machine.add_state(CLEANING, target=AVAILABLE, **watchers)
machine.add_state(CLEANWAIT, target=AVAILABLE, **watchers)
machine.add_state(CLEANFAIL, target=AVAILABLE, **watchers)
# Add delete* states
machine.add_state(DELETING, target=AVAILABLE, **watchers)
# From AVAILABLE, a deployment may be started
machine.add_transition(AVAILABLE, DEPLOYING, 'deploy')
# Add inspect* states.
machine.add_state(INSPECTING, target=MANAGEABLE, **watchers)
machine.add_state(INSPECTFAIL, target=MANAGEABLE, **watchers)
# A deployment may fail
machine.add_transition(DEPLOYING, DEPLOYFAIL, 'fail')
# A failed deployment may be retried
# ironic/conductor/manager.py:do_node_deploy()
machine.add_transition(DEPLOYFAIL, DEPLOYING, 'rebuild')
# NOTE(deva): Juno allows a client to send "active" to initiate a rebuild
machine.add_transition(DEPLOYFAIL, DEPLOYING, 'deploy')
# A deployment may also wait on external callbacks
machine.add_transition(DEPLOYING, DEPLOYWAIT, 'wait')
machine.add_transition(DEPLOYWAIT, DEPLOYING, 'resume')
# A deployment waiting on callback may time out
machine.add_transition(DEPLOYWAIT, DEPLOYFAIL, 'fail')
# A deployment may complete
machine.add_transition(DEPLOYING, ACTIVE, 'done')
# An active instance may be re-deployed
# ironic/conductor/manager.py:do_node_deploy()
machine.add_transition(ACTIVE, DEPLOYING, 'rebuild')
# An active instance may be deleted
# ironic/conductor/manager.py:do_node_tear_down()
machine.add_transition(ACTIVE, DELETING, 'delete')
# While a deployment is waiting, it may be deleted
# ironic/conductor/manager.py:do_node_tear_down()
machine.add_transition(DEPLOYWAIT, DELETING, 'delete')
# A failed deployment may also be deleted
# ironic/conductor/manager.py:do_node_tear_down()
machine.add_transition(DEPLOYFAIL, DELETING, 'delete')
# This state can also transition to error
machine.add_transition(DELETING, ERROR, 'error')
# When finished deleting, a node will begin cleaning
machine.add_transition(DELETING, CLEANING, 'clean')
# If cleaning succeeds, it becomes available for scheduling
machine.add_transition(CLEANING, AVAILABLE, 'done')
# If cleaning fails, wait for operator intervention
machine.add_transition(CLEANING, CLEANFAIL, 'fail')
machine.add_transition(CLEANWAIT, CLEANFAIL, 'fail')
# While waiting for a clean step to be finished, cleaning may be aborted
machine.add_transition(CLEANWAIT, CLEANFAIL, 'abort')
# Cleaning may also wait on external callbacks
machine.add_transition(CLEANING, CLEANWAIT, 'wait')
machine.add_transition(CLEANWAIT, CLEANING, 'resume')
# An operator may want to move a CLEANFAIL node to MANAGEABLE, to perform
# other actions like cleaning
machine.add_transition(CLEANFAIL, MANAGEABLE, 'manage')
# From MANAGEABLE, a node may move to available after going through automated
# cleaning
machine.add_transition(MANAGEABLE, CLEANING, 'provide')
# From MANAGEABLE, a node may be manually cleaned, going back to manageable
# after cleaning is completed
machine.add_transition(MANAGEABLE, CLEANING, 'clean')
machine.add_transition(CLEANING, MANAGEABLE, 'manage')
# From AVAILABLE, a node may be made unavailable by managing it
machine.add_transition(AVAILABLE, MANAGEABLE, 'manage')
# An errored instance can be rebuilt
# ironic/conductor/manager.py:do_node_deploy()
machine.add_transition(ERROR, DEPLOYING, 'rebuild')
# or deleted
# ironic/conductor/manager.py:do_node_tear_down()
machine.add_transition(ERROR, DELETING, 'delete')
# Added transitions for inspection.
# Initiate inspection.
machine.add_transition(MANAGEABLE, INSPECTING, 'inspect')
# ironic/conductor/manager.py:inspect_hardware().
machine.add_transition(INSPECTING, MANAGEABLE, 'done')
# Inspection may fail.
machine.add_transition(INSPECTING, INSPECTFAIL, 'fail')
# Move the node to manageable state for any other
# action.
machine.add_transition(INSPECTFAIL, MANAGEABLE, 'manage')
# Reinitiate the inspect after inspectfail.
machine.add_transition(INSPECTFAIL, INSPECTING, 'inspect')
# Start power credentials verification
machine.add_transition(ENROLL, VERIFYING, 'manage')
# Verification can succeed
machine.add_transition(VERIFYING, MANAGEABLE, 'done')
# Verification can fail with setting last_error and rolling back to ENROLL
machine.add_transition(VERIFYING, ENROLL, 'fail')
|
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the various stages that a builder runs."""
from __future__ import print_function
import json
import logging
import os
from chromite.cbuildbot import commands
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot.stages import artifact_stages
from chromite.lib import cros_build_lib
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import timeout_util
from chromite.lib.paygen import gspaths
from chromite.lib.paygen import paygen_build_lib
class InvalidTestConditionException(Exception):
"""Raised when pre-conditions for a test aren't met."""
class SignerTestStage(artifact_stages.ArchivingStage):
"""Run signer related tests."""
option_name = 'tests'
config_name = 'signer_tests'
# If the signer tests take longer than 30 minutes, abort. They usually take
# five minutes to run.
SIGNER_TEST_TIMEOUT = 30 * 60
def PerformStage(self):
if not self.archive_stage.WaitForRecoveryImage():
raise InvalidTestConditionException('Missing recovery image.')
with timeout_util.Timeout(self.SIGNER_TEST_TIMEOUT):
commands.RunSignerTests(self._build_root, self._current_board)
class SignerResultsTimeout(failures_lib.StepFailure):
"""The signer did not produce any results inside the expected time."""
class SignerFailure(failures_lib.StepFailure):
"""The signer returned an error result."""
class MissingInstructionException(failures_lib.StepFailure):
"""We didn't receive the list of signing instructions PushImage uploaded."""
class MalformedResultsException(failures_lib.StepFailure):
"""The Signer results aren't formatted as we expect."""
class PaygenSigningRequirementsError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenCrostoolsNotAvailableError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenNoPaygenConfigForBoard(failures_lib.StepFailure):
"""Paygen can't run with a release.conf config for the board."""
class PaygenStage(artifact_stages.ArchivingStage):
"""Stage that generates release payloads.
If this stage is created with a 'channels' argument, it can run
independently. Otherwise, it's dependent on values queued up by
the ArchiveStage (push_image).
"""
option_name = 'paygen'
config_name = 'paygen'
# Poll for new results every 30 seconds.
SIGNING_PERIOD = 30
# Timeout for the signing process. 2 hours in seconds.
SIGNING_TIMEOUT = 2 * 60 * 60
FINISHED = 'finished'
def __init__(self, builder_run, board, archive_stage, channels=None,
**kwargs):
"""Init that accepts the channels argument, if present.
Args:
builder_run: See builder_run on ArchivingStage.
board: See board on ArchivingStage.
archive_stage: See archive_stage on ArchivingStage.
channels: Explicit list of channels to generate payloads for.
If empty, will instead wait on values from push_image.
Channels is normally None in release builds, and normally set
for trybot 'payloads' builds.
"""
super(PaygenStage, self).__init__(builder_run, board, archive_stage,
**kwargs)
self.signing_results = {}
self.channels = channels
def _HandleStageException(self, exc_info):
"""Override and don't set status to FAIL but FORGIVEN instead."""
exc_type, exc_value, _exc_tb = exc_info
# If Paygen fails to find anything needed in release.conf, treat it
# as a warning, not a failure. This is common during new board bring up.
if issubclass(exc_type, PaygenNoPaygenConfigForBoard):
return self._HandleExceptionAsWarning(exc_info)
# Warn so people look at ArchiveStage for the real error.
if issubclass(exc_type, MissingInstructionException):
return self._HandleExceptionAsWarning(exc_info)
# If the exception is a TestLabFailure that means we couldn't schedule the
# test. We don't fail the build for that. We do the CompoundFailure dance,
# because that's how we'll get failures from background processes returned
# to us.
if (issubclass(exc_type, failures_lib.TestLabFailure) or
(issubclass(exc_type, failures_lib.CompoundFailure) and
exc_value.MatchesFailureType(failures_lib.TestLabFailure))):
return self._HandleExceptionAsWarning(exc_info)
return super(PaygenStage, self)._HandleStageException(exc_info)
def _JsonFromUrl(self, gs_ctx, url):
"""Fetch a GS Url, and parse it as Json.
Args:
gs_ctx: GS Context.
url: Url to fetch and parse.
Returns:
None if the Url doesn't exist.
Parsed Json structure if it did.
Raises:
MalformedResultsException if it failed to parse.
"""
try:
signer_txt = gs_ctx.Cat(url)
except gs.GSNoSuchKey:
return None
try:
return json.loads(signer_txt)
except ValueError:
# We should never see malformed Json, even for intermediate statuses.
raise MalformedResultsException(signer_txt)
def _SigningStatusFromJson(self, signer_json):
"""Extract a signing status from a signer result Json DOM.
Args:
signer_json: The parsed json status from a signer operation.
Returns:
string with a simple status: 'passed', 'failed', 'downloading', etc,
or '' if the json doesn't contain a status.
"""
return (signer_json or {}).get('status', {}).get('status', '')
def _CheckForResults(self, gs_ctx, instruction_urls_per_channel,
channel_notifier):
"""timeout_util.WaitForSuccess func to check a list of signer results.
Args:
gs_ctx: Google Storage Context.
instruction_urls_per_channel: Urls of the signer result files
we're expecting.
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Returns:
Number of results not yet collected.
"""
COMPLETED_STATUS = ('passed', 'failed')
# Assume we are done, then try to prove otherwise.
results_completed = True
for channel in instruction_urls_per_channel.keys():
self.signing_results.setdefault(channel, {})
if (len(self.signing_results[channel]) ==
len(instruction_urls_per_channel[channel])):
continue
for url in instruction_urls_per_channel[channel]:
# Convert from instructions URL to instructions result URL.
url += '.json'
# We already have a result for this URL.
if url in self.signing_results[channel]:
continue
signer_json = self._JsonFromUrl(gs_ctx, url)
if self._SigningStatusFromJson(signer_json) in COMPLETED_STATUS:
# If we find a completed result, remember it.
self.signing_results[channel][url] = signer_json
# If we don't have full results for this channel, we aren't done
# waiting.
if (len(self.signing_results[channel]) !=
len(instruction_urls_per_channel[channel])):
results_completed = False
continue
# If we reach here, the channel has just been completed for the first
# time.
# If all results 'passed' the channel was successfully signed.
channel_success = True
for signer_result in self.signing_results[channel].values():
if self._SigningStatusFromJson(signer_result) != 'passed':
channel_success = False
# If we successfully completed the channel, inform paygen.
if channel_success:
channel_notifier(channel)
return results_completed
def _WaitForPushImage(self):
"""Block until push_image data is ready.
Returns:
Push_image results, expected to be of the form:
{ 'channel': ['gs://instruction_uri1', 'gs://signer_instruction_uri2'] }
Raises:
MissingInstructionException: If push_image sent us an error, or timed out.
"""
# This call will NEVER time out.
instruction_urls_per_channel = self.board_runattrs.GetParallel(
'instruction_urls_per_channel', timeout=None)
# A value of None signals an error in PushImage.
if instruction_urls_per_channel is None:
raise MissingInstructionException(
'ArchiveStage: PushImage failed. No images means no Paygen.')
return instruction_urls_per_channel
def _WaitForSigningResults(self,
instruction_urls_per_channel,
channel_notifier):
"""Do the work of waiting for signer results and logging them.
Args:
instruction_urls_per_channel: push_image data (see _WaitForPushImage).
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Raises:
ValueError: If the signer result isn't valid json.
RunCommandError: If we are unable to download signer results.
"""
gs_ctx = gs.GSContext(dry_run=self._run.debug)
try:
cros_build_lib.Info('Waiting for signer results.')
timeout_util.WaitForReturnTrue(
self._CheckForResults,
func_args=(gs_ctx, instruction_urls_per_channel, channel_notifier),
timeout=self.SIGNING_TIMEOUT, period=self.SIGNING_PERIOD)
except timeout_util.TimeoutError:
msg = 'Image signing timed out.'
cros_build_lib.Error(msg)
cros_build_lib.PrintBuildbotStepText(msg)
raise SignerResultsTimeout(msg)
# Log all signer results, then handle any signing failures.
failures = []
for url_results in self.signing_results.values():
for url, signer_result in url_results.iteritems():
result_description = os.path.basename(url)
cros_build_lib.PrintBuildbotStepText(result_description)
cros_build_lib.Info('Received results for: %s', result_description)
cros_build_lib.Info(json.dumps(signer_result, indent=4))
status = self._SigningStatusFromJson(signer_result)
if status != 'passed':
failures.append(result_description)
cros_build_lib.Error('Signing failed for: %s', result_description)
if failures:
cros_build_lib.Error('Failure summary:')
for failure in failures:
cros_build_lib.Error(' %s', failure)
raise SignerFailure(', '.join([str(f) for f in failures]))
def PerformStage(self):
"""Do the work of generating our release payloads."""
# Convert to release tools naming for boards.
board = self._current_board.replace('_', '-')
version = self._run.attrs.release_tag
assert version, "We can't generate payloads without a release_tag."
logging.info("Generating payloads for: %s, %s", board, version)
# Test to see if the current board has a Paygen configuration. We do
# this here, no in the sub-process so we don't have to pass back a
# failure reason.
try:
paygen_build_lib.ValidateBoardConfig(board)
except paygen_build_lib.BoardNotConfigured:
raise PaygenNoPaygenConfigForBoard(
'No release.conf entry was found for board %s. Get a TPM to fix.' %
board)
with parallel.BackgroundTaskRunner(self._RunPaygenInProcess) as per_channel:
def channel_notifier(channel):
per_channel.put((channel, board, version, self._run.debug,
self._run.config.paygen_skip_testing,
self._run.config.paygen_skip_delta_payloads))
if self.channels:
logging.info("Using explicit channels: %s", self.channels)
# If we have an explicit list of channels, use it.
for channel in self.channels:
channel_notifier(channel)
else:
instruction_urls_per_channel = self._WaitForPushImage()
self._WaitForSigningResults(instruction_urls_per_channel,
channel_notifier)
def _RunPaygenInProcess(self, channel, board, version, debug,
disable_tests, skip_delta_payloads):
"""Helper for PaygenStage that invokes payload generation.
This method is intended to be safe to invoke inside a process.
Args:
channel: Channel of payloads to generate ('stable', 'beta', etc)
board: Board of payloads to generate ('x86-mario', 'x86-alex-he', etc)
version: Version of payloads to generate.
debug: Flag telling if this is a real run, or a test run.
disable_tests: Do not generate test artifacts are run payload tests.
skip_delta_payloads: Skip generating delta payloads.
"""
# Convert to release tools naming for channels.
if not channel.endswith('-channel'):
channel += '-channel'
with osutils.TempDir(sudo_rm=True) as tempdir:
# Create the definition of the build to generate payloads for.
build = gspaths.Build(channel=channel,
board=board,
version=version)
try:
# Generate the payloads.
self._PrintLoudly('Starting %s, %s, %s' % (channel, version, board))
paygen_build_lib.CreatePayloads(build,
work_dir=tempdir,
dry_run=debug,
run_parallel=True,
run_on_builder=True,
skip_delta_payloads=skip_delta_payloads,
disable_tests=disable_tests)
except (paygen_build_lib.BuildFinished,
paygen_build_lib.BuildLocked,
paygen_build_lib.BuildSkip) as e:
# These errors are normal if it's possible for another process to
# work on the same build. This process could be a Paygen server, or
# another builder (perhaps by a trybot generating payloads on request).
#
# This means the build was finished by the other process, is already
# being processed (so the build is locked), or that it's been marked
# to skip (probably done manually).
cros_build_lib.Info('Paygen skipped because: %s', e)
|
|
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from contextlib import contextmanager
from base64 import b64encode
from swift.common.middleware import tempauth as auth
from swift.common.swob import Request, Response
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def set(self, key, value, timeout=0):
self.store[key] = value
return True
def incr(self, key, timeout=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
class FakeApp(object):
def __init__(self, status_headers_body_iter=None, acl=None, sync_key=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {}, '')])
self.acl = acl
self.sync_key = sync_key
def __call__(self, env, start_response):
self.calls += 1
self.request = Request.blank('', environ=env)
if self.acl:
self.request.acl = self.acl
if self.sync_key:
self.request.environ['swift_sync_key'] = self.sync_key
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.request)
if resp:
return resp(env, start_response)
status, headers, body = self.status_headers_body_iter.next()
return Response(status=status, headers=headers,
body=body)(env, start_response)
class FakeConn(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {}, '')])
def request(self, method, path, headers):
self.calls += 1
self.request_path = path
self.status, self.headers, self.body = \
self.status_headers_body_iter.next()
self.status, self.reason = self.status.split(' ', 1)
self.status = int(self.status)
def getresponse(self):
return self
def read(self):
body = self.body
self.body = ''
return body
class TestAuth(unittest.TestCase):
def setUp(self):
self.test_auth = auth.filter_factory({})(FakeApp())
def _make_request(self, path, **kwargs):
req = Request.blank(path, **kwargs)
req.environ['swift.cache'] = FakeMemcache()
return req
def test_reseller_prefix_init(self):
app = FakeApp()
ath = auth.filter_factory({})(app)
self.assertEquals(ath.reseller_prefix, 'AUTH_')
ath = auth.filter_factory({'reseller_prefix': 'TEST'})(app)
self.assertEquals(ath.reseller_prefix, 'TEST_')
ath = auth.filter_factory({'reseller_prefix': 'TEST_'})(app)
self.assertEquals(ath.reseller_prefix, 'TEST_')
def test_auth_prefix_init(self):
app = FakeApp()
ath = auth.filter_factory({})(app)
self.assertEquals(ath.auth_prefix, '/auth/')
ath = auth.filter_factory({'auth_prefix': ''})(app)
self.assertEquals(ath.auth_prefix, '/auth/')
ath = auth.filter_factory({'auth_prefix': '/'})(app)
self.assertEquals(ath.auth_prefix, '/auth/')
ath = auth.filter_factory({'auth_prefix': '/test/'})(app)
self.assertEquals(ath.auth_prefix, '/test/')
ath = auth.filter_factory({'auth_prefix': '/test'})(app)
self.assertEquals(ath.auth_prefix, '/test/')
ath = auth.filter_factory({'auth_prefix': 'test/'})(app)
self.assertEquals(ath.auth_prefix, '/test/')
ath = auth.filter_factory({'auth_prefix': 'test'})(app)
self.assertEquals(ath.auth_prefix, '/test/')
def test_top_level_deny(self):
req = self._make_request('/')
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
self.assertEquals(req.environ['swift.authorize'],
self.test_auth.denied_response)
def test_anon(self):
req = self._make_request('/v1/AUTH_account')
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
self.assertEquals(req.environ['swift.authorize'],
self.test_auth.authorize)
def test_override_asked_for_but_not_allowed(self):
self.test_auth = \
auth.filter_factory({'allow_overrides': 'false'})(FakeApp())
req = self._make_request('/v1/AUTH_account',
environ={'swift.authorize_override': True})
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
self.assertEquals(req.environ['swift.authorize'],
self.test_auth.authorize)
def test_override_asked_for_and_allowed(self):
self.test_auth = \
auth.filter_factory({'allow_overrides': 'true'})(FakeApp())
req = self._make_request('/v1/AUTH_account',
environ={'swift.authorize_override': True})
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 404)
self.assertTrue('swift.authorize' not in req.environ)
def test_override_default_allowed(self):
req = self._make_request('/v1/AUTH_account',
environ={'swift.authorize_override': True})
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 404)
self.assertTrue('swift.authorize' not in req.environ)
def test_auth_deny_non_reseller_prefix(self):
req = self._make_request('/v1/BLAH_account',
headers={'X-Auth-Token': 'BLAH_t'})
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
self.assertEquals(req.environ['swift.authorize'],
self.test_auth.denied_response)
def test_auth_deny_non_reseller_prefix_no_override(self):
fake_authorize = lambda x: Response(status='500 Fake')
req = self._make_request('/v1/BLAH_account',
headers={'X-Auth-Token': 'BLAH_t'},
environ={'swift.authorize': fake_authorize}
)
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 500)
self.assertEquals(req.environ['swift.authorize'], fake_authorize)
def test_auth_no_reseller_prefix_deny(self):
# Ensures that when we have no reseller prefix, we don't deny a request
# outright but set up a denial swift.authorize and pass the request on
# down the chain.
local_app = FakeApp()
local_auth = auth.filter_factory({'reseller_prefix': ''})(local_app)
req = self._make_request('/v1/account',
headers={'X-Auth-Token': 't'})
resp = req.get_response(local_auth)
self.assertEquals(resp.status_int, 401)
self.assertEquals(local_app.calls, 1)
self.assertEquals(req.environ['swift.authorize'],
local_auth.denied_response)
def test_auth_no_reseller_prefix_no_token(self):
# Check that normally we set up a call back to our authorize.
local_auth = \
auth.filter_factory({'reseller_prefix': ''})(FakeApp(iter([])))
req = self._make_request('/v1/account')
resp = req.get_response(local_auth)
self.assertEquals(resp.status_int, 401)
self.assertEquals(req.environ['swift.authorize'],
local_auth.authorize)
# Now make sure we don't override an existing swift.authorize when we
# have no reseller prefix.
local_auth = \
auth.filter_factory({'reseller_prefix': ''})(FakeApp())
local_authorize = lambda req: Response('test')
req = self._make_request('/v1/account', environ={'swift.authorize':
local_authorize})
resp = req.get_response(local_auth)
self.assertEquals(resp.status_int, 200)
self.assertEquals(req.environ['swift.authorize'], local_authorize)
def test_auth_fail(self):
resp = self._make_request(
'/v1/AUTH_cfa',
headers={'X-Auth-Token': 'AUTH_t'}).get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
def test_authorize_bad_path(self):
req = self._make_request('/badpath')
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 401)
req = self._make_request('/badpath')
req.remote_user = 'act:usr,act,AUTH_cfa'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
def test_authorize_account_access(self):
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act,AUTH_cfa'
self.assertEquals(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
def test_authorize_acl_group_access(self):
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act'
self.assertEquals(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act:usr'
self.assertEquals(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act2'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act:usr2'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
def test_deny_cross_reseller(self):
# Tests that cross-reseller is denied, even if ACLs/group names match
req = self._make_request('/v1/OTHER_cfa')
req.remote_user = 'act:usr,act,AUTH_cfa'
req.acl = 'act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
def test_authorize_acl_referrer_access(self):
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.acl = '.r:*,.rlistings'
self.assertEquals(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.acl = '.r:*' # No listings allowed
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.acl = '.r:.example.com,.rlistings'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.referer = 'http://www.example.com/index.html'
req.acl = '.r:.example.com,.rlistings'
self.assertEquals(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa/c')
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 401)
req = self._make_request('/v1/AUTH_cfa/c')
req.acl = '.r:*,.rlistings'
self.assertEquals(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa/c')
req.acl = '.r:*' # No listings allowed
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 401)
req = self._make_request('/v1/AUTH_cfa/c')
req.acl = '.r:.example.com,.rlistings'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 401)
req = self._make_request('/v1/AUTH_cfa/c')
req.referer = 'http://www.example.com/index.html'
req.acl = '.r:.example.com,.rlistings'
self.assertEquals(self.test_auth.authorize(req), None)
def test_account_put_permissions(self):
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,AUTH_other'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
# Even PUTs to your own account as account admin should fail
req = self._make_request('/v1/AUTH_old',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,AUTH_old'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,.reseller_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp, None)
# .super_admin is not something the middleware should ever see or care
# about
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,.super_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
def test_account_delete_permissions(self):
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,AUTH_other'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
# Even DELETEs to your own account as account admin should fail
req = self._make_request('/v1/AUTH_old',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,AUTH_old'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,.reseller_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp, None)
# .super_admin is not something the middleware should ever see or care
# about
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,.super_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp.status_int, 403)
def test_get_token_fail(self):
resp = self._make_request('/auth/v1.0').get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
resp = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'act:usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
def test_get_token_fail_invalid_x_auth_user_format(self):
resp = self._make_request(
'/auth/v1/act/auth',
headers={'X-Auth-User': 'usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
def test_get_token_fail_non_matching_account_in_request(self):
resp = self._make_request(
'/auth/v1/act/auth',
headers={'X-Auth-User': 'act2:usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
def test_get_token_fail_bad_path(self):
resp = self._make_request(
'/auth/v1/act/auth/invalid',
headers={'X-Auth-User': 'act:usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEquals(resp.status_int, 400)
def test_get_token_fail_missing_key(self):
resp = self._make_request(
'/auth/v1/act/auth',
headers={'X-Auth-User': 'act:usr'}).get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
def test_storage_url_default(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
del req.environ['HTTP_HOST']
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['x-storage-url'],
'http://bob:1234/v1/AUTH_test')
def test_storage_url_based_on_host(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
req.environ['HTTP_HOST'] = 'somehost:5678'
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['x-storage-url'],
'http://somehost:5678/v1/AUTH_test')
def test_storage_url_overriden_scheme(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing',
'storage_url_scheme': 'fake'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
req.environ['HTTP_HOST'] = 'somehost:5678'
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['x-storage-url'],
'fake://somehost:5678/v1/AUTH_test')
def test_reseller_admin_is_owner(self):
orig_authorize = self.test_auth.authorize
owner_values = []
def mitm_authorize(req):
rv = orig_authorize(req)
owner_values.append(req.environ.get('swift_owner', False))
return rv
self.test_auth.authorize = mitm_authorize
req = self._make_request('/v1/AUTH_cfa',
headers={'X-Auth-Token': 'AUTH_t'})
req.remote_user = '.reseller_admin'
self.test_auth.authorize(req)
self.assertEquals(owner_values, [True])
def test_admin_is_owner(self):
orig_authorize = self.test_auth.authorize
owner_values = []
def mitm_authorize(req):
rv = orig_authorize(req)
owner_values.append(req.environ.get('swift_owner', False))
return rv
self.test_auth.authorize = mitm_authorize
req = self._make_request(
'/v1/AUTH_cfa',
headers={'X-Auth-Token': 'AUTH_t'})
req.remote_user = 'AUTH_cfa'
self.test_auth.authorize(req)
self.assertEquals(owner_values, [True])
def test_regular_is_not_owner(self):
orig_authorize = self.test_auth.authorize
owner_values = []
def mitm_authorize(req):
rv = orig_authorize(req)
owner_values.append(req.environ.get('swift_owner', False))
return rv
self.test_auth.authorize = mitm_authorize
req = self._make_request(
'/v1/AUTH_cfa/c',
headers={'X-Auth-Token': 'AUTH_t'})
req.remote_user = 'act:usr'
self.test_auth.authorize(req)
self.assertEquals(owner_values, [False])
def test_sync_request_success(self):
self.test_auth.app = FakeApp(iter([('204 No Content', {}, '')]),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 204)
def test_sync_request_fail_key(self):
self.test_auth.app = FakeApp(iter([('204 No Content', {}, '')]),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'wrongsecret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
self.test_auth.app = FakeApp(iter([('204 No Content', {}, '')]),
sync_key='othersecret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
self.test_auth.app = FakeApp(iter([('204 No Content', {}, '')]),
sync_key=None)
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
def test_sync_request_fail_no_timestamp(self):
self.test_auth.app = FakeApp(iter([('204 No Content', {}, '')]),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 401)
def test_sync_request_success_lb_sync_host(self):
self.test_auth.app = FakeApp(iter([('204 No Content', {}, '')]),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456',
'x-forwarded-for': '127.0.0.1'})
req.remote_addr = '127.0.0.2'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 204)
self.test_auth.app = FakeApp(iter([('204 No Content', {}, '')]),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456',
'x-cluster-client-ip': '127.0.0.1'})
req.remote_addr = '127.0.0.2'
resp = req.get_response(self.test_auth)
self.assertEquals(resp.status_int, 204)
def test_options_call(self):
req = self._make_request('/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'})
resp = self.test_auth.authorize(req)
self.assertEquals(resp, None)
class TestParseUserCreation(unittest.TestCase):
def test_parse_user_creation(self):
auth_filter = auth.filter_factory({
'reseller_prefix': 'ABC',
'user_test_tester3': 'testing',
'user_has_url': 'urlly .admin http://a.b/v1/DEF_has',
'user_admin_admin': 'admin .admin .reseller_admin',
})(FakeApp())
self.assertEquals(auth_filter.users, {
'admin:admin': {
'url': '$HOST/v1/ABC_admin',
'groups': ['.admin', '.reseller_admin'],
'key': 'admin'
}, 'test:tester3': {
'url': '$HOST/v1/ABC_test',
'groups': [],
'key': 'testing'
}, 'has:url': {
'url': 'http://a.b/v1/DEF_has',
'groups': ['.admin'],
'key': 'urlly'
},
})
def test_base64_encoding(self):
auth_filter = auth.filter_factory({
'reseller_prefix': 'ABC',
'user64_%s_%s' % (
b64encode('test').rstrip('='),
b64encode('tester3').rstrip('=')):
'testing .reseller_admin',
'user64_%s_%s' % (
b64encode('user_foo').rstrip('='),
b64encode('ab').rstrip('=')):
'urlly .admin http://a.b/v1/DEF_has',
})(FakeApp())
self.assertEquals(auth_filter.users, {
'test:tester3': {
'url': '$HOST/v1/ABC_test',
'groups': ['.reseller_admin'],
'key': 'testing'
}, 'user_foo:ab': {
'url': 'http://a.b/v1/DEF_has',
'groups': ['.admin'],
'key': 'urlly'
},
})
def test_key_with_no_value(self):
self.assertRaises(ValueError, auth.filter_factory({
'user_test_tester3': 'testing',
'user_bob_bobby': '',
'user_admin_admin': 'admin .admin .reseller_admin',
}), FakeApp())
if __name__ == '__main__':
unittest.main()
|
|
from sympy.core import (Rational, Symbol, S, Float, Integer, Number, Pow,
Basic, I, nan)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.exponential import exp
from sympy.utilities.pytest import XFAIL
def test_issue153():
#test that is runs:
a = sqrt(2*(1+sqrt(2)))
def test_rational():
a = Rational(1, 5)
r = sqrt(5)/5
assert sqrt(a) == r
assert 2*sqrt(a) == 2*r
r = a*a**Rational(1, 2)
assert a**Rational(3, 2) == r
assert 2*a**Rational(3, 2) == 2*r
r = a**5*a**Rational(2, 3)
assert a**Rational(17, 3) == r
assert 2 * a**Rational(17, 3) == 2*r
def test_large_rational():
e = (Rational(123712**12-1,7)+Rational(1,7))**Rational(1,3)
assert e == 234232585392159195136 * (Rational(1,7)**Rational(1,3))
def test_negative_real():
def feq(a,b):
return abs(a - b) < 1E-10
assert feq(S.One / Float(-0.5), -Integer(2))
def test_expand():
x = Symbol('x')
assert (2**(-1-x)).expand() == Rational(1,2)*2**(-x)
def test_issue350():
#test if powers are simplified correctly
#see also issue 896
x = Symbol('x')
assert ((x**Rational(1,3))**Rational(2)) == x**Rational(2,3)
assert ((x**Rational(3))**Rational(2,5)) == (x**Rational(3))**Rational(2,5)
a = Symbol('a', real=True)
b = Symbol('b', real=True)
assert (a**2)**b == (abs(a)**b)**2
assert sqrt(1/a) != 1/sqrt(a) # e.g. for a = -1
assert (a**3)**Rational(1,3) != a
assert (x**a)**b != x**(a*b) # e.g. x = -1, a=2, b=1/2
assert (x**.5)**b == x**(.5*b)
assert (x**.5)**.5 == x**.25
assert (x**2.5)**.5 != x**1.25 # e.g. for x = 5*I
k = Symbol('k',integer=True)
m = Symbol('m',integer=True)
assert (x**k)**m == x**(k*m)
assert Number(5)**Rational(2,3)==Number(25)**Rational(1,3)
assert (x**.5)**2 == x**1.0
assert (x**2)**k == (x**k)**2 == x**(2*k)
a = Symbol('a', positive=True)
assert (a**3)**Rational(2,5) == a**Rational(6,5)
assert (a**2)**b == (a**b)**2
assert (a**Rational(2, 3))**x == (a**(2*x/3)) != (a**x)**Rational(2, 3)
def test_issue767():
assert --sqrt(sqrt(5)-1)==sqrt(sqrt(5)-1)
def test_negative_one():
x = Symbol('x', complex=True)
y = Symbol('y', complex=True)
assert 1/x**y == x**(-y)
def test_issue1263():
neg = Symbol('neg', negative=True)
nonneg = Symbol('nonneg', nonnegative=True)
any = Symbol('any')
num, den = sqrt(1/neg).as_numer_denom()
assert num == sqrt(-1)
assert den == sqrt(-neg)
num, den = sqrt(1/nonneg).as_numer_denom()
assert num == 1
assert den == sqrt(nonneg)
num, den = sqrt(1/any).as_numer_denom()
assert num == sqrt(1/any)
assert den == 1
def eqn(num, den, pow):
return (num/den)**pow
npos=1
nneg=-1
dpos=2-sqrt(3)
dneg=1-sqrt(3)
assert dpos > 0 and dneg < 0 and npos > 0 and nneg < 0
# pos or neg integer
eq=eqn(npos, dpos, 2);assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2)
eq=eqn(npos, dneg, 2);assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2)
eq=eqn(nneg, dpos, 2);assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2)
eq=eqn(nneg, dneg, 2);assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2)
eq=eqn(npos, dpos, -2);assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1)
eq=eqn(npos, dneg, -2);assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1)
eq=eqn(nneg, dpos, -2);assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1)
eq=eqn(nneg, dneg, -2);assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1)
# pos or neg rational
pow = S.Half
eq=eqn(npos, dpos, pow);assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow)
eq=eqn(npos, dneg, pow);assert eq.is_Pow and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow)
eq=eqn(nneg, dpos, pow);assert not eq.is_Pow or eq.as_numer_denom() == (nneg**pow, dpos**pow)
eq=eqn(nneg, dneg, pow);assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow)
eq=eqn(npos, dpos, -pow);assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, npos**pow)
eq=eqn(npos, dneg, -pow);assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-npos)**pow)
eq=eqn(nneg, dpos, -pow);assert not eq.is_Pow or eq.as_numer_denom() == (dpos**pow, nneg**pow)
eq=eqn(nneg, dneg, -pow);assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow)
# unknown exponent
pow = 2*any
eq=eqn(npos, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow)
eq=eqn(npos, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow)
eq=eqn(nneg, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (nneg**pow, dpos**pow)
eq=eqn(nneg, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow)
eq=eqn(npos, dpos, -pow)
assert eq.as_numer_denom() == (dpos**pow, npos**pow)
eq=eqn(npos, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-npos)**pow)
eq=eqn(nneg, dpos, -pow)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, nneg**pow)
eq=eqn(nneg, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow)
x = Symbol('x')
y = Symbol('y')
assert ((1/(1 + x/3))**(-S.One)).as_numer_denom() == (3 + x, 3)
notp = Symbol('notp', positive=False) # not positive does not imply real
b = ((1 + x/notp)**-2)
assert (b**(-y)).as_numer_denom() == (1, b**y)
assert (b**(-S.One)).as_numer_denom() == ((notp + x)**2, notp**2)
nonp = Symbol('nonp', nonpositive=True)
assert (((1 + x/nonp)**-2)**(-S.One)).as_numer_denom() == ((-nonp - x)**2, nonp**2)
n = Symbol('n', negative=True)
assert (x**n).as_numer_denom() == (1, x**-n)
assert sqrt(1/n).as_numer_denom() == (S.ImaginaryUnit, sqrt(-n))
n = Symbol('0 or neg', nonpositive=True)
# if x and n are split up without negating each term and n is negative
# then the answer might be wrong; if n is 0 it won't matter since
# 1/oo and 1/zoo are both zero as is sqrt(0)/sqrt(-x) unless x is also
# zero (in which case the negative sign doesn't matter):
# 1/sqrt(1/-1) = -I but sqrt(-1)/sqrt(1) = I
assert (1/sqrt(x/n)).as_numer_denom() == (sqrt(-n), sqrt(-x))
c = Symbol('c', complex=True)
e = sqrt(1/c)
assert e.as_numer_denom() == (e, 1)
i = Symbol('i', integer=True)
assert (((1 + x/y)**i)).as_numer_denom() == ((x + y)**i, y**i)
def test_Pow_signs():
"""Cf. issues 1496 and 2151"""
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', even=True)
assert (3-y)**2 != (y-3)**2
assert (3-y)**n != (y-3)**n
assert (-3+y-x)**2 != (3-y+x)**2
assert (y-3)**3 != -(3-y)**3
def test_power_with_noncommutative_mul_as_base():
x = Symbol('x', commutative=False)
y = Symbol('y', commutative=False)
assert not (x*y)**3 == x**3*y**3
assert (2*x*y)**3 == 8*(x*y)**3
def test_zero():
x = Symbol('x')
y = Symbol('y')
assert 0**x != 0
assert 0**(2*x) == 0**x
assert 0**(1.0*x) == 0**x
assert 0**(2.0*x) == 0**x
assert (0**(2 - x)).as_base_exp() == (0, 2 - x)
assert 0**(x - 2) != S.Infinity**(2 - x)
assert 0**(2*x*y) == 0**(x*y)
assert 0**(-2*x*y) == S.Infinity**(x*y)
assert 0**I == nan
i = Symbol('i', imaginary=True)
assert 0**i == nan
def test_pow_as_base_exp():
x = Symbol('x')
assert (S.Infinity**(2 - x)).as_base_exp() == (S.Infinity, 2 - x)
assert (S.Infinity**(x - 2)).as_base_exp() == (S.Infinity, x - 2)
p = S.Half**x
assert p.base, p.exp == p.as_base_exp() == (S(2), -x)
def test_issue_3001():
x = Symbol('x')
y = Symbol('y')
assert x**1.0 == x
assert x == x**1.0
assert True != x**1.0
assert x**1.0 != True
assert x != True
assert x*y == (x*y)**1.0
assert (x**1.0)**1.0 == x
assert (x**1.0)**2.0 == x**2
b = Basic()
assert Pow(b, 1.0, evaluate=False) == b
# if the following gets distributed as a Mul (x**1.0*y**1.0 then
# __eq__ methods could be added to Symbol and Pow to detect the
# power-of-1.0 case.
assert ((x*y)**1.0).func is Pow
def test_issue_3109():
from sympy import root, Rational
I = S.ImaginaryUnit
assert sqrt(33**(9*I/10)) == -33**(9*I/20)
assert root((6*I)**(2*I), 3).as_base_exp()[1] == Rational(1, 3) # != 2*I/3
assert root((6*I)**(I/3), 3).as_base_exp()[1] == I/9
assert sqrt(exp(3*I)) == exp(3*I/2)
assert sqrt(-sqrt(3)*(1 + 2*I)) == sqrt(sqrt(3))*sqrt(-1 - 2*I)
@XFAIL
def test_issue_3109_fail():
from sympy import root, Rational
I = S.ImaginaryUnit
assert sqrt(exp(5*I)) == -exp(5*I/2)
assert root(exp(5*I), 3).exp == Rational(1, 3)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'annotation_attempt'
db.create_table('annotations_annotation_attempt', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Source'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Profile'])),
))
db.send_create_signal('annotations', ['annotation_attempt'])
# Adding model 'LabelGroup'
db.create_table('annotations_labelgroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=45, blank=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=5, blank=True)),
))
db.send_create_signal('annotations', ['LabelGroup'])
# Adding model 'Label'
db.create_table('annotations_label', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=45, blank=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=5, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['annotations.LabelGroup'])),
))
db.send_create_signal('annotations', ['Label'])
# Adding model 'LabelSet'
db.create_table('annotations_labelset', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('location', self.gf('django.db.models.fields.CharField')(max_length=45, blank=True)),
('attempt', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['annotations.annotation_attempt'])),
))
db.send_create_signal('annotations', ['LabelSet'])
# Adding model 'Annotation'
db.create_table('annotations_annotation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('annotation_date', self.gf('django.db.models.fields.DateField')()),
('point', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Point'])),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Image'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Profile'])),
('label', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['annotations.Label'])),
('attempt', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['annotations.annotation_attempt'])),
))
db.send_create_signal('annotations', ['Annotation'])
def backwards(self, orm):
# Deleting model 'annotation_attempt'
db.delete_table('annotations_annotation_attempt')
# Deleting model 'LabelGroup'
db.delete_table('annotations_labelgroup')
# Deleting model 'Label'
db.delete_table('annotations_label')
# Deleting model 'LabelSet'
db.delete_table('annotations_labelset')
# Deleting model 'Annotation'
db.delete_table('annotations_annotation')
models = {
'accounts.profile': {
'Meta': {'object_name': 'Profile'},
'about_me': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '5'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'annotations.annotation': {
'Meta': {'object_name': 'Annotation'},
'annotation_date': ('django.db.models.fields.DateField', [], {}),
'attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.annotation_attempt']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.Label']"}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Point']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Profile']"})
},
'annotations.annotation_attempt': {
'Meta': {'object_name': 'annotation_attempt'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Profile']"})
},
'annotations.label': {
'Meta': {'object_name': 'Label'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'annotations.labelgroup': {
'Meta': {'object_name': 'LabelGroup'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'annotations.labelset': {
'Meta': {'object_name': 'LabelSet'},
'attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.annotation_attempt']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'images.camerainfo': {
'Meta': {'object_name': 'CameraInfo'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'pixel_cm_ratio': ('django.db.models.fields.IntegerField', [], {}),
'water_quality': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'images.image': {
'Meta': {'object_name': 'Image'},
'camera': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.CameraInfo']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'total_points': ('django.db.models.fields.IntegerField', [], {})
},
'images.point': {
'Meta': {'object_name': 'Point'},
'annotation_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'column': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'point_number': ('django.db.models.fields.IntegerField', [], {}),
'row': ('django.db.models.fields.IntegerField', [], {})
},
'images.source': {
'Meta': {'object_name': 'Source'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'visibility': ('django.db.models.fields.CharField', [], {'max_length': '1'})
}
}
complete_apps = ['annotations']
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gen_nn_ops
def GetInceptionMaxPoolShapes():
"""Iterator for some of the max pool ops in the Inception 2015 model.
Yields:
Tuple (name, input_size, filter_size, out_size, strides, padding)
"""
names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
input_sizes = [[32, 71, 71, 192],
[32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]]
filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1],
[1, 3, 3, 1], [1, 3, 3, 1]]
output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288],
[32, 8, 8, 1248], [32, 8, 8, 2048]]
strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1],
[1, 1, 1, 1]]
paddings = ["VALID", "VALID", "VALID", "SAME"]
for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,
strides, paddings):
yield n, i, f, o, s, p
class PoolingTest(tf.test.TestCase):
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected, use_gpu):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t = tf.constant(x, shape=input_sizes)
t = pool_func(t, ksize=ksize, strides=strides, padding=padding)
actual = t.eval()
self.assertAllClose(expected, actual.flatten())
self.assertShapeEqual(actual, t)
def _testAvgPoolValidPadding(self, use_gpu):
expected_output = [7.0, 8.0, 9.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePadding(self, use_gpu):
expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 4, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu)
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu):
self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0,
6.0, 7.0, 7.0, 8.0,
10.0, 11.0, 11.0, 12.0,
14.0, 15.0, 15.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[3.0, 4.0, 5.0, 6.0,
5.0, 6.0, 7.0, 8.0,
11.0, 12.0, 13.0, 14.0,
13.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
def _testAvgPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
use_gpu=use_gpu)
def _testAvgPoolSamePadding4(self, use_gpu):
expected_output = [11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0,
44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket4(self, use_gpu):
expected_output = [21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0,
45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket8(self, use_gpu):
expected_output = [73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 105.0, 106.0,
107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 117.0, 118.0,
119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0, 202.0,
203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0,
219.0, 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0,
235.0, 236.0, 237.0, 238.0, 239.0, 240.0, 245.0, 246.0,
247.0, 248.0, 249.0, 250.0, 251.0, 252.0, 329.0, 330.0,
331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0, 346.0,
347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0,
363.0, 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0,
375.0, 376.0, 377.0, 378.0, 379.0, 380.0, 425.0, 426.0,
427.0, 428.0, 429.0, 430.0, 431.0, 432.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0, 458.0,
459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0,
471.0, 472.0, 473.0, 474.0, 475.0, 476.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def testAvgPooling(self):
for use_gpu in True, False:
self._testAvgPoolValidPadding(use_gpu)
self._testAvgPoolSamePadding(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindow(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu)
self._testAvgPoolValidPaddingUnevenStride(use_gpu)
self._testAvgPoolSamePadding4(use_gpu)
self._testAvgPoolSamePaddingPacket4(use_gpu)
self._testAvgPoolSamePaddingPacket8(use_gpu)
def _testMaxPoolValidPadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu)
def _testMaxPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket4(self, use_gpu):
expected_output = [21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0,
54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket8(self, use_gpu):
expected_output = [145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0,
161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0,
177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0,
185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0,
273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0,
305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0,
313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0,
401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0,
417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0,
441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0,
465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0,
481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0,
497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def testMaxPooling(self):
for use_gpu in True, False:
self._testMaxPoolValidPadding(use_gpu)
self._testMaxPoolSamePadding(use_gpu)
self._testMaxPoolSamePaddingNonSquareWindow(use_gpu)
self._testMaxPoolValidPaddingUnevenStride(use_gpu)
self._testMaxPoolSamePaddingPacket4(use_gpu)
self._testMaxPoolSamePaddingPacket8(use_gpu)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False)
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
use_gpu=False)
def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides,
error_msg, use_gpu=False):
t = tf.constant(1.0, shape=in_size)
with self.assertRaisesRegexp(ValueError, error_msg):
t = tf.nn.max_pool(t, ksize=ksize, strides=strides, padding="SAME")
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 2, 2, 2],
[1, 1, 1, 2], "exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 2],
[1, 1, 1, 1], "depth window to equal the depth stride")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 3],
[1, 1, 1, 3], "evenly divide")
if tf.test.IsBuiltWithCuda():
with self.test_session(use_gpu=True):
t = tf.constant(1.0, shape=[1, 2, 2, 4])
with self.assertRaisesOpError("for CPU devices"):
tf.nn.max_pool(t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same resuts.
def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
tensor_input = np.random.rand(*input_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = tf.constant(tensor_input, shape=input_shape)
out_op, _ = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
t = tf.constant(tensor_input, shape=input_shape)
out_op = tf.nn.max_pool(t, ksize, strides, padding)
cpu_val = out_op.eval()
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3,
input_shape).astype(np.float32)
tensor_output = np.random.rand(*output_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = tf.constant(tensor_input, shape=input_shape)
_, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
ksize, strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
t = tf.constant(tensor_input, shape=input_shape)
out_op = tf.nn.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize,
strides, padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testMaxPoolingWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
with self.test_session(use_gpu=True) as sess:
t = tf.constant(tensor_input, shape=[1, 3, 3, 1])
out_op, argmax_op = tf.nn.max_pool_with_argmax(t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
Targmax=tf.int64,
padding="VALID")
out, argmax = sess.run([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0])
self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5])
def testMaxPoolingGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0]
tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
with self.test_session(use_gpu=True) as sess:
orig_in = tf.constant(orig_input, shape=[1, 3, 3, 1])
t = tf.constant(tensor_input, shape=[1, 2, 2, 1])
argmax = tf.constant(tensor_argmax, shape=[1, 2, 2, 1],
dtype=tf.int64)
out_op = gen_nn_ops._max_pool_grad_with_argmax(orig_in, t, argmax,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID")
out = out_op.eval().flatten()
self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0,
14.0, 0.0, 0.0, 0.0])
def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes,
window_rows, window_cols, row_stride,
col_stride, padding, use_gpu,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu):
input_tensor = tf.constant(x, shape=input_sizes, name="input")
if pool_func == tf.nn.avg_pool:
func_name = "avg_pool"
err_margin = 1e-4
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_margin = 1e-3
t = pool_func(input_tensor, ksize=[1, window_rows, window_rows, 1],
strides=[1, row_stride, col_stride, 1],
padding=padding, name=func_name)
err = tf.test.compute_gradient_error(input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
print("%s gradient error = " % func_name, err)
self.assertLess(err, err_margin)
def _testMaxPoolGradValidPadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def testMaxPoolGrad(self):
for use_gpu in True, False:
self._testMaxPoolGradValidPadding1_1(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_1_6(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_1_7(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_2(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding1_1(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding2_1(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding2_2(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding3_1(use_gpu=use_gpu)
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops._max_pool_grad(
orig_input, orig_output, grad,
[1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1],
padding)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
input_tensor = tf.constant(input_data, shape=input_sizes)
output_tensor = tf.nn.max_pool(
input_tensor, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
output_backprop_tensor = tf.constant(output_backprop,
shape=output_sizes)
input_backprop_tensor = self._MaxPoolGrad(
input_tensor, output_tensor, output_backprop_tensor,
window_rows, window_cols, row_stride, col_stride, padding)
actual_input_backprop = input_backprop_tensor.eval()
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
actual_input_backprop = actual_input_backprop.flatten()
actual_input_backprop = self._GetNdArray(actual_input_backprop)
actual_output = output_tensor.eval().flatten()
actual_output = self._GetNdArray(actual_output)
self.assertAllClose(expected_input_backprop, actual_input_backprop,
rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0]
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 12.0, 13.0, 0.0,
15.0, 16.0, 17.0, 0.0,
19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradDirect1_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0]
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0,
0.0, 31.0, 0.0, 17.0,
19.0, 0.0, 41.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradDirect1_3(self):
input_data = [
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,]
output_backprop = [
11.0, 12.0, 13.0, 14.0,
15.0, 16.0, 17.0, 18.0,
19.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0]
expected_input_backprop = [
54, 0.0, 62, 0.0,
0.0, 60, 0.0, 22.0,
47, 0.0, 51, 0.0,
0.0, 0.0, 0.0, 0.0,]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1],
window_rows=3, window_cols=3, row_stride=1, col_stride=1,
padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
11.0, 12.0, 13.0, 0.0,
15.0, 16.0, 17.0, 0.0,
19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=False)
if not tf.test.IsBuiltWithCuda():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=True)
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
float("nan"), 12.0, 13.0,
15.0, float("nan"), 17.0,
19.0, 20.0, float("nan")]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
float("nan"), 12.0, 13.0, 0.0,
15.0, float("nan"), 17.0, 0.0,
19.0, 20.0, float("nan"), 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=False)
if not tf.test.IsBuiltWithCuda():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=True)
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
self._testMaxPoolGradDirect1_2()
self._testMaxPoolGradDirect1_3()
self._testMaxPoolGradDirectWithNans2_1()
self._testMaxPoolGradDirectWithNans2_2()
def testAvgPoolGrad(self):
for use_gpu in False, True:
self._testAvgPoolGradValidPadding1_1(use_gpu)
self._testAvgPoolGradValidPadding2_1(use_gpu)
self._testAvgPoolGradValidPadding2_2(use_gpu)
self._testAvgPoolGradSamePadding1_1(use_gpu)
self._testAvgPoolGradSamePadding2_1(use_gpu)
self._testAvgPoolGradSamePadding2_2(use_gpu)
self._testAvgPoolGradSamePadding3_1(use_gpu)
def _testAvgPoolGradValidPadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]:
p = tf.nn.max_pool(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
p, am = tf.nn.max_pool_with_argmax(
tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaises(ValueError):
pool_func(tf.placeholder(tf.float32, shape=[1, 3]),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME")
# Illegal strides.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(ValueError, "strides in the batch"):
pool_func(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[2, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
tf.nn.avg_pool(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 2], padding="SAME")
# Filter larger than input.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(ValueError,
"filter must not be larger than the input"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1], strides=[1, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError,
"filter must not be larger than the input"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1], strides=[1, 1, 1, 1], padding="SAME")
# Stride larger than filter.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(
ValueError, "stride must be less than or equal to filter"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 5, 3, 1], strides=[1, 5, 5, 1], padding="SAME")
with self.assertRaisesRegexp(
ValueError, "stride must be less than or equal to filter"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 3, 5, 1], strides=[1, 5, 5, 1], padding="SAME")
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
self._CompareMaxPoolingBk(input_size, output_size,
filter_size, strides, padding)
return Test
if __name__ == "__main__":
for (name_, input_size_, filter_size_, output_size_, stride_,
padding_) in GetInceptionMaxPoolShapes():
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
GetMaxPoolGradTest(input_size_, filter_size_, output_size_,
stride_, padding_))
tf.test.main()
|
|
#!/usr/bin/env python
#
# Copyright 2011 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Implementation of module with classes and functions for transform python
classes in xml schema:
See the next example:
from tornadows.complextypes import ComplexType, StringProperty, IntegerProperty
class Person(ComplexType):
name = StringProperty()
age = IntegerProperty()
or you can use some python types
class Person(ComplexType):
name = str
age = int
is equivalent to:
<xsd:complexType name="Person">
<xsd:sequence>
<xsd:element name="name" type="xsd:string"/>
<xsd:element name="age" type="xsd:integer"/>
</xsd:sequence>
</xsd:complexType>
"""
import webui.server.tornadows.xmltypes
import xml.dom.minidom
import inspect
from datetime import date, datetime, time
class Property:
""" Class base for definition of properties of the attributes of a python class """
pass
class IntegerProperty(Property):
""" Class for definitions of Integer Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Integer
self.value = None
class DecimalProperty(Property):
""" Class for definitions of Decimal Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Decimal
self.value = None
class DoubleProperty(Property):
""" Class for definitions of Double Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Double
self.value = None
class FloatProperty(Property):
""" Class for definitions of Float Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Float
self.value = None
class DurationProperty(Property):
""" Class for definitions of Duration Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Duration
self.value = None
class DateProperty(Property):
""" Class for definitions of Date Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Date
self.value = None
class TimeProperty(Property):
""" Class for definitions of Time Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Time
self.value = None
class DateTimeProperty(Property):
""" Class for definitions of DateTime Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.DateTime
self.value = None
class StringProperty(Property):
""" Class for definitions of String Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.String
self.value = None
class BooleanProperty(Property):
""" Class for definitions of Boolean Property """
def __init__(self):
self.type = webui.server.tornadows.xmltypes.Boolean
self.value = None
class ArrayProperty(list):
""" For create a list of classes """
def __init__(self, object, minOccurs = 1, maxOccurs=None, data=[]):
list.__init__(self,data)
self._minOccurs = minOccurs
self._maxOccurs = maxOccurs
self._object = object
self.append(self._object)
def toXSD(self,namespace='xsd',nameelement=None):
""" Create xml complex type for ArrayProperty """
xsd = self._object.toXSD()
if self._maxOccurs == None:
xsd += '<%s:element name="%s" type="tns:%s" minOccurs="%s"/>'%(namespace,nameelement,self._object.getName(),self._minOccurs)
elif self._maxOccurs != None:
xsd += '<%s:element name="%s" type="tns:%s" minOccurs="%s" maxOccurs="%s"/>'%(namespace,nameelement,self._object.getName(),str(self._minOccurs),str(self._maxOccurs))
return xsd
class ComplexType(object):
""" Base class for definitions of python class like xml document and schema:
from webui.server.tornadows.complextypes import ComplexType,StringProperty, IntegerProperty
class Person(ComplexType):
name = StringProperty
age = IntegerProperty
if __name__ == '__main__':
print 'XML Schema : '
print(Person.toXSD())
p = Person()
p.name.value = 'Steve J'
p.age.value = 38
print('XML Document : ')
print(p.toXML())
or you if you want to use some python types (int, str, float, bool)
from webui.server.tornadows.complextypes import ComplexType
class Person(ComplexType):
name = str
age = int
if __name__ == '__main__':
print('XML Schema : ')
print(Person.toXSD())
p = Person()
p.name.value = 'Steve J'
p.age.value = 38
print('XML Document : ')
print(p.toXML())
"""
def __init__(self):
""" Class constructor for ComplexType """
default_attr = dir(type('default',(object,),{}))
for attr in self.__class__.__dict__.keys():
if default_attr.count(attr) > 0 or callable(attr):
continue
else:
element = self.__class__.__dict__[attr]
typeobj = self._createAttributeType(element)
setattr(self,attr,typeobj)
def toXML(self,name=None,method=''):
""" Method that creates the XML document for the instance of python class.
Return a string with the xml document.
"""
nameroot = None
if name == None:
nameroot = self.__class__.__name__
else:
nameroot = name
nameroot += method
xml = '<%s>'%nameroot
default_attr = dir(type('default',(object,),{}))
for key in dir(self):
if default_attr.count(key) > 0:
continue
element = findElementFromDict(self.__dict__,key)
if element == None:
continue
if isinstance(element,list):
for e in element:
if isinstance(e,ComplexType):
xml += e.toXML(name=key)
else:
xml += '<%s>%s</%s>'%(key,e,key)
elif isinstance(element,Property):
xml += '<%s>%s</%s>'%(key,element.value,key)
elif isinstance(element,ComplexType):
xml += element.toXML(name=key)
else:
xml += '<%s>%s</%s>'%(key,convert(type(element).__name__,element),key)
xml += '</%s>'%nameroot
return str(xml)
@classmethod
def toXSD(cls,xmlns='http://www.w3.org/2001/XMLSchema',namespace='xsd',method='', ltype=[]):
""" Class method that creates the XSD document for the python class.
Return a string with the xml schema.
"""
name = cls.__name__
xsd = cls._generateXSD(ltype=ltype)
return xsd
@classmethod
def _generateXSD(cls,xmlns='http://www.w3.org/2001/XMLSchema',namespace='xsd', ltype=[]):
""" Class method for get the xml schema with the document definition.
Return a string with the xsd document.
"""
default_attr = dir(type('default',(object,),{}))
name = cls.__name__
xsd = '<%s:complexType name="%s" xmlns:%s="%s">'%(namespace,name,namespace,xmlns)
xsd += '<%s:sequence>'%namespace
complextype = []
for key in dir(cls):
if default_attr.count(key) > 0:
continue
element = findElementFromDict(cls.__dict__,key)
if element == None:
continue
if isinstance(element,Property):
xsd += element.type.createElement(str(key))
elif isinstance(element,ComplexType):
nameinstance = key
if ltype.count(self._elementInput.getName()) == 0:
ltype.append(self._elementInput.getName())
complextype.append(element._generateXSD(ltype=[]))
xsd += '<%s:element name="%s" type="tns:%s"/>'%(namespace,nameinstance,element.getName())
elif inspect.isclass(element) and issubclass(element,ComplexType):
nameinstance = key
if ltype.count(element.getName()) == 0:
ltype.append(element.getName())
complextype.append(element._generateXSD(ltype=[]))
xsd += '<%s:element name="%s" type="tns:%s"/>'%(namespace,nameinstance,element.getName())
elif isinstance(element,ArrayProperty):
if isinstance(element[0],ComplexType) or issubclass(element[0],ComplexType):
complextype.append(element[0]._generateXSD(ltype=[]))
xsd += '<%s:element name="%s" type="tns:%s" maxOccurs="unbounded"/>'%(namespace,key,element[0].__name__)
else:
typeelement = createPythonType2XMLType(element[0].__name__)
xsd += '<%s:element name="%s" type="%s:%s" maxOccurs="unbounded"/>'%(namespace,key,namespace,typeelement)
elif isinstance(element,list):
if isinstance(element[0],ComplexType) or issubclass(element[0],ComplexType):
if ltype.count(element[0].__name__) == 0:
ltype.append(element[0].__name__)
complextype.append(element[0]._generateXSD(ltype=[]))
xsd += '<%s:element name="%s" type="tns:%s" maxOccurs="unbounded"/>'%(namespace,key,element[0].__name__)
else:
typeelement = createPythonType2XMLType(element[0].__name__)
xsd += '<%s:element name="%s" type="%s:%s" maxOccurs="unbounded"/>'%(namespace,key,namespace,typeelement)
elif hasattr(element,'__name__'):
typeelement = createPythonType2XMLType(element.__name__)
xsd += '<%s:element name="%s" type="%s:%s"/>'%(namespace,str(key),namespace,typeelement)
xsd += '</%s:sequence>'%namespace
xsd += '</%s:complexType>'%namespace
if len(complextype) > 0:
for ct in complextype:
xsd += ct
return xsd
@classmethod
def getName(cls):
""" Class method return the name of the class """
return cls.__name__
@classmethod
def _createAttributeType(self,element):
""" Class method to create the types of the attributes of a ComplexType """
if isinstance(element,list):
return list()
elif isinstance(element,IntegerProperty):
return IntegerProperty()
elif isinstance(element,DecimalProperty):
return DecimalProperty()
elif isinstance(element,DoubleProperty):
return DoubleProperty()
elif isinstance(element,FloatProperty):
return FloatProperty()
elif isinstance(element,DurationProperty):
return DurationProperty()
elif isinstance(element,DateProperty):
return DateProperty()
elif isinstance(element,TimeProperty):
return TimeProperty()
elif isinstance(element,DateTimeProperty):
return DateTimeProperty()
elif isinstance(element,StringProperty):
return StringProperty()
elif isinstance(element,BooleanProperty):
return BooleanProperty()
elif issubclass(element,ComplexType):
return element()
else:
if element.__name__ == 'int':
return int
elif element.__name__ == 'decimal':
return float
elif element.__name__ == 'double':
return float
elif element.__name__ == 'float':
return float
elif element.__name__ == 'duration':
return str
elif element.__name__ == 'date':
return date
elif element.__name__ == 'time':
return time
elif element.__name__ == 'dateTime':
return datetime
elif element.__name__ == 'str':
return str
elif element.__name__ == 'bool':
return bool
def xml2object(xml,xsd,complex,method=''):
""" Function that converts a XML document in a instance of a python class """
namecls = complex.getName()
types = xsd2dict(xsd)
lst = xml2list(xml,namecls,types,method=method)
tps = cls2dict(complex)
obj = generateOBJ(lst,namecls,tps)
return obj
def cls2dict(complex):
""" Function that creates a dictionary from a ComplexType class with the attributes and types """
default_attr = dir(type('default',(object,),{}))
dct = {}
for attr in dir(complex):
if default_attr.count(attr) > 0 or callable(attr):
continue
else:
elem = findElementFromDict(complex.__dict__,attr)
if elem != None:
dct[attr] = elem
return dct
def xsd2dict(xsd,namespace='xsd'):
""" Function that creates a dictionary from a xml schema with the type of element """
types = ['xsd:integer','xsd:decimal','xsd:double','xsd:float','xsd:duration','xsd:date','xsd:time','xsd:dateTime','xsd:string','xsd:boolean']
dct = {}
element = '%s:element'%namespace
elems = xsd.getElementsByTagName(element)
for e in elems:
val = 'complexType'
typ = str(e.getAttribute('type'))
lst = e.hasAttribute('maxOccurs')
if types.count(typ) > 0:
val = 'element'
dct[str(e.getAttribute('name'))] = (val,typ,lst)
return dct
def xml2list(xmldoc,name,types,method=''):
""" Function that creates a list from xml documento with a tuple element and value """
name = name+method
x = xml.dom.minidom.parseString(xmldoc)
c = None
if x.documentElement.prefix != None:
c = x.getElementsByTagName(x.documentElement.prefix+':'+name)
else:
c = x.getElementsByTagName(name)
attrs = genattr(c)
lst = []
for a in attrs:
t = types[a.nodeName]
typ = t[0]
typxml = t[1]
isarray = t[2]
if typ == 'complexType' or typ == 'list':
l = xml2list(a.toxml(),str(a.nodeName),types)
lst.append((str(a.nodeName),l,isarray))
else:
val = None
if len(a.childNodes) > 0:
val = convert(typxml,str(a.childNodes[0].nodeValue))
# Convert str to bool.
if val == 'true':
val = True
elif val == 'false':
val = False
lst.append((str(a.nodeName),val,isarray))
return lst
def generateOBJ(d,namecls,types):
""" Function that creates a object from a xml document """
dct = {}
lst = []
for a in d:
name = a[0]
value = a[1]
isarray = a[2]
if isinstance(value,list):
o = generateOBJ(value,name,types)
if isarray:
lst.append(o)
dct[name] = lst
else:
dct[name] = o
else:
typ = findElementFromDict(types,name)
if isinstance(typ,Property):
dct[name] = createProperty(typ,value)
else:
dct[name] = value
return type(namecls,(ComplexType,),dct)
def createProperty(typ,value):
""" Function that creates a Property class instance, with the value """
ct = None
if isinstance(typ,IntegerProperty):
ct = IntegerProperty()
ct.value = webui.server.tornadows.xmltypes.Integer.genType(value)
elif isinstance(typ,DecimalProperty):
ct = DecimalProperty()
ct.value = webui.server.tornadows.xmltypes.Decimal.genType(value)
elif isinstance(typ,DoubleProperty):
ct = DoubleProperty()
ct.value = webui.server.tornadows.xmltypes.Double.genType(value)
elif isinstance(typ,FloatProperty):
ct = FloatProperty()
ct.value = webui.server.tornadows.xmltypes.Float.genType(value)
elif isinstance(typ,DurationProperty):
ct = DurationProperty()
ct.value = webui.server.tornadows.xmltypes.Duration.genType(value)
elif isinstance(typ,DateProperty):
ct = DateProperty()
ct.value = webui.server.tornadows.xmltypes.Date.genType(value)
elif isinstance(typ,TimeProperty):
ct = TimeProperty()
ct.value = webui.server.tornadows.xmltypes.Time.genType(value)
elif isinstance(typ,DateTimeProperty):
ct = DateTimeProperty()
ct.value = webui.server.tornadows.xmltypes.DateTime.genType(value)
elif isinstance(typ,StringProperty):
ct = StringProperty()
ct.value = webui.server.tornadows.xmltypes.String.genType(value)
elif isinstance(typ,BooleanProperty):
ct = BooleanProperty()
ct.value = webui.server.tornadows.xmltypes.Boolean.genType(value)
return ct
def genattr(elems):
""" Function that generates a list with the childnodes of a xml element """
d = []
for e in elems[0].childNodes:
if e.nodeType == e.ELEMENT_NODE:
d.append(e)
return d
def findElementFromDict(dictionary,key):
""" Function to find a element into a dictionary for the key """
element = None
try:
element = dictionary[key]
return element
except KeyError:
return None
def convert(typeelement,value):
""" Function that converts a value depending his type """
if typeelement == 'xsd:integer' or typeelement == 'int':
return int(value)
elif typeelement == 'xsd:decimal':
return float(value)
elif typeelement == 'xsd:double':
return float(value)
elif typeelement == 'xsd:float' or typeelement == 'float':
return float(value)
elif typeelement == 'xsd:duration':
return str(value)
elif typeelement == 'xsd:date' or typeelement == 'date':
sdate = str(value).split('-')
return date(int(sdate[0]),int(sdate[1]),int(sdate[2]))
elif typeelement == 'xsd:time' or typeelement == 'time':
stime = str(value).split(':')
hour = stime[0]
min = stime[1]
seg = '00'
if len(stime) >= 3:
seg = stime[2].split('.')[0]
return time(int(hour),int(min),int(seg))
elif typeelement == 'xsd:dateTime' or typeelement == 'datetime':
sdatetime = str(value).replace('T','-').replace(' ','-').replace('+','-').split('-')
year = sdatetime[0]
mon = sdatetime[1]
day = sdatetime[2]
stime = sdatetime[3].split(':')
hour = stime[0]
min = stime[1]
seg = '00'
if len(stime) >= 3:
seg = stime[2].split('.')[0]
return datetime(int(year),int(mon),int(day),int(hour),int(min),int(seg)).isoformat('T')
elif typeelement == 'xsd:string' or typeelement == 'str' or typeelement == 'unicode':
return str(value)
elif typeelement == 'xsd:boolean' or typeelement == 'bool':
return str(value).lower()
def createPythonType2XMLType(pyType):
""" Function that creates a xml type from a python type """
xmlType = None
if pyType == 'int':
xmlType = 'integer'
elif pyType == 'decimal':
xmlType = 'decimal'
elif pyType == 'double':
xmlType = 'float'
elif pyType == 'float':
xmlType = 'float'
elif pyType == 'duration':
xmlType = 'duration'
elif pyType == 'date':
xmlType = 'date'
elif pyType == 'time':
xmlType = 'time'
elif pyType == 'datetime':
xmlType = 'dateTime'
elif pyType == 'str':
xmlType = 'string'
elif pyType == 'bool':
xmlType = 'boolean'
return xmlType
|
|
#!/usr/bin/env python3
import sys
from os import walk
import os
import re
import yaml
import pprint
from devicetree import parse_file
# globals
compatibles = {}
phandles = {}
aliases = {}
chosen = {}
reduced = {}
def convert_string_to_label(s):
# Transmute ,- to _
s = s.replace("-", "_");
s = s.replace(",", "_");
return s
def get_all_compatibles(d, name, comp_dict):
if 'props' in d:
compat = d['props'].get('compatible')
enabled = d['props'].get('status')
if enabled == "disabled":
return comp_dict
if compat != None:
comp_dict[name] = compat
if name != '/':
name += '/'
if isinstance(d,dict):
if d['children']:
for k,v in d['children'].items():
get_all_compatibles(v, name + k, comp_dict)
return comp_dict
def get_aliases(root):
if 'children' in root:
if 'aliases' in root['children']:
for k,v in root['children']['aliases']['props'].items():
aliases[v] = k
return
def get_compat(node):
compat = None
if 'props' in node:
compat = node['props'].get('compatible')
if isinstance(compat, list):
compat = compat[0]
return compat
def get_chosen(root):
if 'children' in root:
if 'chosen' in root['children']:
for k,v in root['children']['chosen']['props'].items():
chosen[k] = v
return
def get_phandles(root, name, handles):
if 'props' in root:
handle = root['props'].get('phandle')
enabled = root['props'].get('status')
if enabled == "disabled":
return
if handle != None:
phandles[handle] = name
if name != '/':
name += '/'
if isinstance(root, dict):
if root['children']:
for k,v in root['children'].items():
get_phandles(v, name + k, handles)
return
class Loader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.realpath(stream.name)
super(Loader, self).__init__(stream)
Loader.add_constructor('!include', Loader.include)
Loader.add_constructor('!import', Loader.include)
def include(self, node):
if isinstance(node, yaml.ScalarNode):
return self.extractFile(self.construct_scalar(node))
elif isinstance(node, yaml.SequenceNode):
result = []
for filename in self.construct_sequence(node):
result += self.extractFile(filename)
return result
elif isinstance(node, yaml.MappingNode):
result = {}
for k,v in self.construct_mapping(node).iteritems():
result[k] = self.extractFile(v)
return result
else:
print("Error:: unrecognised node type in !include statement")
raise yaml.constructor.ConstructorError
def extractFile(self, filename):
filepath = os.path.join(os.path.dirname(self._root), filename)
if not os.path.isfile(filepath):
# we need to look in common directory
# take path and back up 2 directories and tack on '/common/yaml'
filepath = os.path.dirname(self._root).split('/')
filepath = '/'.join(filepath[:-2])
filepath = os.path.join(filepath + '/common/yaml', filename)
with open(filepath, 'r') as f:
return yaml.load(f, Loader)
def insert_defs(node_address, defs, new_defs, new_aliases):
if node_address in defs:
if 'aliases' in defs[node_address]:
defs[node_address]['aliases'].update(new_aliases)
else:
defs[node_address]['aliases'] = new_aliases
defs[node_address].update(new_defs)
else:
new_defs['aliases'] = new_aliases
defs[node_address] = new_defs
return
def find_node_by_path(nodes, path):
d = nodes
for k in path[1:].split('/'):
d = d['children'][k]
return d
def compress_nodes(nodes, path):
if 'props' in nodes:
status = nodes['props'].get('status')
if status == "disabled":
return
if isinstance(nodes, dict):
reduced[path] = dict(nodes)
reduced[path].pop('children', None)
if path != '/':
path += '/'
if nodes['children']:
for k,v in nodes['children'].items():
compress_nodes(v, path + k)
return
def find_parent_irq_node(node_address):
address = ''
for comp in node_address.split('/')[1:]:
address += '/' + comp
if 'interrupt-parent' in reduced[address]['props']:
interrupt_parent = reduced[address]['props'].get('interrupt-parent')
return reduced[phandles[interrupt_parent]]
def extract_interrupts(node_address, yaml, y_key, names, defs, def_label):
node = reduced[node_address]
try:
props = list(node['props'].get(y_key))
except:
props = [node['props'].get(y_key)]
irq_parent = find_parent_irq_node(node_address)
l_base = def_label.split('/')
index = 0
while props:
prop_def = {}
prop_alias = {}
l_idx = [str(index)]
if y_key == 'interrupts-extended':
cell_parent = reduced[phandles[props.pop(0)]]
name = []
else:
try:
name = [names.pop(0).upper()]
except:
name = []
cell_parent = irq_parent
cell_yaml = yaml[get_compat(cell_parent)]
l_cell_prefix = [yaml[get_compat(irq_parent)].get('cell_string', []).upper()]
for i in range(cell_parent['props']['#interrupt-cells']):
l_cell_name = [cell_yaml['#cells'][i].upper()]
if l_cell_name == l_cell_prefix:
l_cell_name = []
l_fqn = '_'.join(l_base + l_cell_prefix + l_idx + l_cell_name)
prop_def[l_fqn] = props.pop(0)
if len(name):
prop_alias['_'.join(l_base + name + l_cell_prefix)] = l_fqn
index += 1
insert_defs(node_address, defs, prop_def, prop_alias)
return
def extract_reg_prop(node_address, names, defs, def_label, div):
node = reduced[node_address]
props = list(reduced[node_address]['props']['reg'])
address_cells = reduced['/']['props'].get('#address-cells')
size_cells = reduced['/']['props'].get('#size-cells')
address = ''
for comp in node_address.split('/')[1:]:
address += '/' + comp
address_cells = reduced[address]['props'].get('#address-cells', address_cells)
size_cells = reduced[address]['props'].get('#size-cells', size_cells)
index = 0
l_base = def_label.split('/')
l_addr = ["BASE_ADDRESS"]
l_size = ["SIZE"]
while props:
prop_def = {}
prop_alias = {}
addr = 0
size = 0
l_idx = [str(index)]
try:
name = [names.pop(0).upper()]
except:
name = []
for x in range(address_cells):
addr += props.pop(0) << (32 * x)
for x in range(size_cells):
size += props.pop(0) << (32 * x)
l_addr_fqn = '_'.join(l_base + l_addr + l_idx)
l_size_fqn = '_'.join(l_base + l_size + l_idx)
prop_def[l_addr_fqn] = hex(addr)
prop_def[l_size_fqn] = int(size / div)
if len(name):
prop_alias['_'.join(l_base + name + l_addr)] = l_addr_fqn
prop_alias['_'.join(l_base + name + l_size)] = l_size_fqn
if index == 0:
prop_alias['_'.join(l_base + l_addr)] = l_addr_fqn
prop_alias['_'.join(l_base + l_size)] = l_size_fqn
insert_defs(node_address, defs, prop_def, prop_alias)
# increment index for definition creation
index += 1
return
def extract_cells(node_address, yaml, y_key, names, index, prefix, defs, def_label):
try:
props = list(reduced[node_address]['props'].get(y_key))
except:
props = [reduced[node_address]['props'].get(y_key)]
cell_parent = reduced[phandles[props.pop(0)]]
try:
cell_yaml = yaml[get_compat(cell_parent)]
except:
raise Exception("Could not find yaml description for " + cell_parent['name'])
try:
name = names.pop(0).upper()
except:
name = []
l_cell = [str(cell_yaml.get('cell_string',''))]
l_base = def_label.split('/')
l_base += prefix
l_idx = [str(index)]
prop_def = {}
prop_alias = {}
for k in cell_parent['props'].keys():
if k[0] == '#' and '-cells' in k:
for i in range(cell_parent['props'].get(k)):
l_cellname = [str(cell_yaml['#cells'][i]).upper()]
if l_cell == l_cellname:
label = l_base + l_cell + l_idx
else:
label = l_base + l_cell + l_cellname + l_idx
label_name = l_base + name + l_cellname
prop_def['_'.join(label)] = props.pop(0)
if len(name):
prop_alias['_'.join(label_name)] = '_'.join(label)
if index == 0:
prop_alias['_'.join(label[:-1])] = '_'.join(label)
insert_defs(node_address, defs, prop_def, prop_alias)
# recurse if we have anything left
if len(props):
extract_cells(node_address, yaml, y_key, names, index + 1, prefix, defs, def_label)
return
def extract_pinctrl(node_address, yaml, pinconf, names, index, defs, def_label):
prop_list = []
if not isinstance(pinconf,list):
prop_list.append(pinconf)
else:
prop_list = list(pinconf)
def_prefix = def_label.split('_')
target_node = node_address
prop_def = {}
for p in prop_list:
pin_node_address = phandles[p]
pin_entry = reduced[pin_node_address]
parent_address = '/'.join(pin_node_address.split('/')[:-1])
pin_parent = reduced[parent_address]
cell_yaml = yaml[get_compat(pin_parent)]
cell_prefix = cell_yaml.get('cell_string', None)
post_fix = []
if cell_prefix != None:
post_fix.append(cell_prefix)
for subnode in reduced.keys():
if pin_node_address in subnode and pin_node_address != subnode:
# found a subnode underneath the pinmux handle
node_label = subnode.split('/')[-2:]
pin_label = def_prefix + post_fix + subnode.split('/')[-2:]
for i, pin in enumerate(reduced[subnode]['props']['pins']):
key_label = list(pin_label) + [cell_yaml['#cells'][0]] + [str(i)]
func_label = key_label[:-2] + [cell_yaml['#cells'][1]] + [str(i)]
key_label = convert_string_to_label('_'.join(key_label)).upper()
func_label = convert_string_to_label('_'.join(func_label)).upper()
prop_def[key_label] = pin
prop_def[func_label] = reduced[subnode]['props']['function']
insert_defs(node_address, defs, prop_def, {})
def extract_single(node_address, yaml, prop, key, prefix, defs, def_label):
prop_def = {}
if isinstance(prop, list):
for i, p in enumerate(prop):
k = convert_string_to_label(key).upper()
label = def_label + '_' + k
prop_def[label + '_' + str(i)] = p
else:
k = convert_string_to_label(key).upper()
label = def_label + '_' + k
prop_def[label] = prop
if node_address in defs:
defs[node_address].update(prop_def)
else:
defs[node_address] = prop_def
return
def extract_property(yaml, node_address, y_key, y_val, names, prefix, defs):
node = reduced[node_address]
def_label = convert_string_to_label(get_compat(node)).upper()
def_label += '_' + node_address.split('@')[-1].upper()
if y_key == 'reg':
extract_reg_prop(node_address, names, defs, def_label, 1)
elif y_key == 'interrupts' or y_key == 'interupts-extended':
extract_interrupts(node_address, yaml, y_key, names, defs, def_label)
elif 'pinctrl-' in y_key:
p_index = int(y_key.split('-')[1])
extract_pinctrl(node_address, yaml, reduced[node_address]['props'][y_key],
names[p_index], p_index, defs, def_label)
elif 'clocks' in y_key:
extract_cells(node_address, yaml, y_key,
names, 0, prefix, defs, def_label)
else:
extract_single(node_address, yaml[get_compat(reduced[node_address])],
reduced[node_address]['props'][y_key], y_key,
prefix, defs, def_label)
return
def extract_node_include_info(reduced, node_address, yaml, defs, structs):
node = reduced[node_address]
node_compat = get_compat(node)
if not node_compat in yaml.keys():
return {}, {}
y_node = yaml[node_compat]
# check to see if we need to process the properties
for yp in y_node['properties']:
for k,v in yp.items():
if 'generation' in v:
if v['generation'] == 'define':
label = v.get('define_string')
storage = defs
else:
label = v.get('structures_string')
storage = structs
prefix = []
if v.get('use-name-prefix') != None:
prefix = [convert_string_to_label(k.upper())]
for c in node['props'].keys():
if c.endswith("-names"):
pass
if re.match(k + '$', c):
if 'pinctrl-' in c:
names = node['props'].get('pinctrl-names', [])
else:
names = node['props'].get(c[:-1] + '-names', [])
if not names:
names = node['props'].get(c + '-names', [])
if not isinstance(names, list):
names = [names]
extract_property(yaml, node_address, c, v, names, prefix, defs)
return
def yaml_collapse(yaml_list):
collapsed = dict(yaml_list)
for k,v in collapsed.items():
props = set()
if 'properties' in v:
for entry in v['properties']:
for key in entry:
props.add(key)
if 'inherits' in v:
for inherited in v['inherits']:
for prop in inherited['properties']:
for key in prop:
if key not in props:
v['properties'].append(prop)
v.pop('inherits')
return collapsed
def print_key_value(k, v, tabstop):
label = "#define " + k
# calculate the name's tabs
if len(label) % 8:
tabs = (len(label) + 7) >> 3
else:
tabs = (len(label) >> 3) + 1
sys.stdout.write(label)
for i in range(0, tabstop - tabs + 1):
sys.stdout.write('\t')
sys.stdout.write(str(v))
sys.stdout.write("\n")
return
def generate_include_file(defs):
compatible = reduced['/']['props']['compatible'][0]
sys.stdout.write("/**************************************************\n")
sys.stdout.write(" * Generated include file for " + compatible)
sys.stdout.write("\n")
sys.stdout.write(" * DO NOT MODIFY\n");
sys.stdout.write(" */\n")
sys.stdout.write("\n")
sys.stdout.write("#ifndef _DEVICE_TREE_BOARD_H" + "\n");
sys.stdout.write("#define _DEVICE_TREE_BOARD_H" + "\n");
sys.stdout.write("\n")
node_keys = sorted(defs.keys())
for node in node_keys:
sys.stdout.write('/* ' + node.split('/')[-1] + ' */')
sys.stdout.write("\n")
maxlength = max(len(s + '#define ') for s in defs[node].keys())
if maxlength % 8:
maxtabstop = (maxlength + 7) >> 3
else:
maxtabstop = (maxlength >> 3) + 1
if (maxtabstop * 8 - maxlength) <= 2:
maxtabstop += 1
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
print_key_value(entry, defs[node][prop].get(entry), maxtabstop)
else:
print_key_value(prop, defs[node].get(prop), maxtabstop)
sys.stdout.write("\n")
sys.stdout.write("#endif\n");
def main(args):
if len(args) < 2:
print('Usage: %s filename.dts path_to_yaml' % args[0])
return 1
try:
with open(args[1], "r") as fd:
d = parse_file(fd)
except:
raise Exception("Input file " + os.path.abspath(args[1]) + " does not exist.")
# compress list to nodes w/ paths, add interrupt parent
compress_nodes(d['/'], '/')
# build up useful lists
compatibles = get_all_compatibles(d['/'], '/', {})
get_phandles(d['/'], '/', {})
get_aliases(d['/'])
get_chosen(d['/'])
# find unique set of compatibles across all active nodes
s = set()
for k,v in compatibles.items():
if isinstance(v,list):
for item in v:
s.add(item)
else:
s.add(v)
# scan YAML files and find the ones we are interested in
yaml_files = []
for (dirpath, dirnames, filenames) in walk(args[2]):
yaml_files.extend([f for f in filenames if re.match('.*\.yaml\Z', f)])
yaml_files = [dirpath + '/' + t for t in yaml_files]
break
yaml_list = {}
file_load_list = set()
for file in yaml_files:
for line in open(file, 'r'):
if re.search('^\s+constraint:*', line):
c = line.split(':')[1].strip()
c = c.strip('"')
if c in s:
if not file in file_load_list:
file_load_list.add(file)
with open(file, 'r') as yf:
yaml_list[c] = yaml.load(yf, Loader)
if yaml_list == {}:
raise Exception("Missing YAML information. Check YAML sources")
# collapse the yaml inherited information
yaml_list = yaml_collapse(yaml_list)
# load zephyr specific nodes
flash = {}
console = {}
sram = {}
if 'zephyr,flash' in chosen:
flash = reduced[chosen['zephyr,flash']]
if 'zephyr,console' in chosen:
console = reduced[chosen['zephyr,console']]
if 'zephyr,sram' in chosen:
sram = reduced[chosen['zephyr,sram']]
defs = {}
structs = {}
for k, v in reduced.items():
node_compat = get_compat(v)
if node_compat != None and node_compat in yaml_list:
extract_node_include_info(reduced, k, yaml_list, defs, structs)
if defs == {}:
raise Exception("No information parsed from dts file.")
if flash:
extract_reg_prop(chosen['zephyr,flash'], None, defs, "CONFIG_FLASH", 1024)
else:
# We will add address and size of 0 for systems with no flash controller
# This is what they already do in the Kconfig options anyway
defs['dummy-flash'] = { 'CONFIG_FLASH_BASE_ADDRESS': 0, 'CONFIG_FLASH_SIZE': 0 }
if sram:
extract_reg_prop(chosen['zephyr,sram'], None, defs, "CONFIG_SRAM", 1024)
# generate include file
generate_include_file(defs)
if __name__ == '__main__':
# test1.py executed as script
# do something
sys.exit(main(sys.argv))
|
|
import albow
from albow.dialogs import Dialog
from config import config
import pygame
from albow.translate import _, buildTemplate
import sys
import os
import logging
import traceback
import directories
old_lang = None
old_fprop = None
class OptionsPanel(Dialog):
anchor = 'wh'
def __init__(self, mcedit):
Dialog.__init__(self)
self.mcedit = mcedit
self.langs = {}
self.sgnal = {}
self.portableVar = albow.AttrRef(self, 'portableLabelText')
self.saveOldPortable = self.portableVar.get()
self.saveOldConfig = {
config.controls.autobrake: config.controls.autobrake.get(),
config.controls.swapAxes: config.controls.swapAxes.get(),
config.controls.cameraAccel: config.controls.cameraAccel.get(),
config.controls.cameraDrag: config.controls.cameraDrag.get(),
config.controls.cameraMaxSpeed: config.controls.cameraMaxSpeed.get(),
config.controls.cameraBrakingSpeed: config.controls.cameraBrakingSpeed.get(),
config.controls.mouseSpeed: config.controls.mouseSpeed.get(),
config.settings.undoLimit: config.settings.undoLimit.get(),
config.settings.maxCopies: config.settings.maxCopies.get(),
config.controls.invertMousePitch: config.controls.invertMousePitch.get(),
config.settings.spaceHeight: config.settings.spaceHeight.get(),
albow.AttrRef(self, 'blockBuffer'): albow.AttrRef(self, 'blockBuffer').get(),
config.settings.setWindowPlacement: config.settings.setWindowPlacement.get(),
config.settings.rotateBlockBrush: config.settings.rotateBlockBrush.get(),
config.settings.shouldResizeAlert: config.settings.shouldResizeAlert.get(),
config.settings.superSecretSettings: config.settings.superSecretSettings.get(),
config.settings.longDistanceMode: config.settings.longDistanceMode.get(),
config.settings.flyMode: config.settings.flyMode.get(),
config.settings.langCode: config.settings.langCode.get(),
config.settings.compassToggle: config.settings.compassToggle.get(),
config.settings.compassSize: config.settings.compassSize.get(),
config.settings.fontProportion: config.settings.fontProportion.get(),
config.settings.fogIntensity: config.settings.fogIntensity.get(),
}
global old_lang
if old_lang == None:
old_lang = config.settings.langCode.get()
global old_fprop
if old_fprop == None:
old_fprop = config.settings.fontProportion.get()
def initComponents(self):
"""Initilize the window components. Call this after translation hs been loaded."""
autoBrakeRow = albow.CheckBoxLabel("Autobrake",
ref=config.controls.autobrake,
tooltipText="Apply brake when not pressing movement keys")
swapAxesRow = albow.CheckBoxLabel("Swap Axes Looking Down",
ref=config.controls.swapAxes,
tooltipText="Change the direction of the Forward and Backward keys when looking down")
cameraAccelRow = albow.FloatInputRow("Camera Acceleration: ",
ref=config.controls.cameraAccel, width=100, min=5.0)
cameraDragRow = albow.FloatInputRow("Camera Drag: ",
ref=config.controls.cameraDrag, width=100, min=1.0)
cameraMaxSpeedRow = albow.FloatInputRow("Camera Max Speed: ",
ref=config.controls.cameraMaxSpeed, width=100, min=1.0)
cameraBrakeSpeedRow = albow.FloatInputRow("Camera Braking Speed: ",
ref=config.controls.cameraBrakingSpeed, width=100,
min=1.0)
mouseSpeedRow = albow.FloatInputRow("Mouse Speed: ",
ref=config.controls.mouseSpeed, width=100, min=0.1,
max=20.0)
undoLimitRow = albow.IntInputRow("Undo Limit: ",
ref=config.settings.undoLimit, width=100, min=0)
maxCopiesRow = albow.IntInputRow("Copy Stack Size: ",
ref=config.settings.maxCopies, width=100, min=0,
tooltipText="Maximum number of copied objects.")
compassSizeRow = albow.IntInputRow("Compass Size (%): ",
ref=config.settings.compassSize, width=100, min=0, max=100)
fontProportion = albow.IntInputRow("Fonts Proportion (%): ",
ref=config.settings.fontProportion, width=100, min=0,
tooltipText="Fonts sizing proportion. The number is a percentage.\nRestart needed!")
albow.resource.font_proportion = config.settings.fontProportion.get()
fogIntensityRow = albow.IntInputRow("Fog Intensity (%): ",
ref=config.settings.fogIntensity, width=100, min=0, max=100)
invertRow = albow.CheckBoxLabel("Invert Mouse",
ref=config.controls.invertMousePitch,
tooltipText="Reverse the up and down motion of the mouse.")
spaceHeightRow = albow.IntInputRow("Low Detail Height",
ref=config.settings.spaceHeight,
tooltipText="When you are this far above the top of the world, move fast and use low-detail mode.")
blockBufferRow = albow.IntInputRow("Block Buffer (MB):",
ref=albow.AttrRef(self, 'blockBuffer'), min=1,
tooltipText="Amount of memory used for temporary storage. When more than this is needed, the disk is used instead.")
setWindowPlacementRow = albow.CheckBoxLabel("Set Window Placement",
ref=config.settings.setWindowPlacement,
tooltipText="Try to save and restore the window position.")
rotateBlockBrushRow = albow.CheckBoxLabel("Rotate block with brush",
ref=config.settings.rotateBlockBrush,
tooltipText="When rotating your brush, also rotate the orientation of the block your brushing with")
compassToggleRow =albow.CheckBoxLabel("Toggle compass",
ref=config.settings.compassToggle)
windowSizeRow = albow.CheckBoxLabel("Window Resize Alert",
ref=config.settings.shouldResizeAlert,
tooltipText="Reminds you that the cursor won't work correctly after resizing the window.")
superSecretSettingsRow = albow.CheckBoxLabel("Super Secret Settings",
ref=config.settings.superSecretSettings,
tooltipText="Weird stuff happen!")
longDistanceRow = albow.CheckBoxLabel("Long-Distance Mode",
ref=config.settings.longDistanceMode,
tooltipText="Always target the farthest block under the cursor, even in mouselook mode.")
flyModeRow = albow.CheckBoxLabel("Fly Mode",
ref=config.settings.flyMode,
tooltipText="Moving forward and Backward will not change your altitude in Fly Mode.")
showCommandsRow = albow.CheckBoxLabel("Show Commands",
ref=config.settings.showCommands,
tooltipText="Show the command in a Command Block when hovering over it.")
lng = config.settings.langCode.get()
langs = sorted(self.getLanguageChoices().items())
langNames = [k for k, v in langs]
self.languageButton = albow.ChoiceButton(langNames, choose=self.changeLanguage, doNotTranslate=True)
if self.sgnal[lng] in self.languageButton.choices:
self.languageButton.selectedChoice = self.sgnal[lng]
langButtonRow = albow.Row((albow.Label("Language", tooltipText="Choose your language."), self.languageButton))
portableList = ["Portable", "Fixed"]
self.goPortableButton = goPortableButton = albow.ChoiceButton(portableList, choose=self.togglePortable)
goPortableButton.selectedChoice = self.saveOldPortable
goPortableButton.tooltipText = self.portableButtonTooltip()
goPortableRow = albow.Row((albow.Label("Install Mode"), goPortableButton))
# Disabled Crash Reporting Option
# reportRow = albow.CheckBoxLabel("Report Errors",
# ref=config.settings.reportCrashes,
# tooltipText="Automatically report errors to the developer.")
self.inputs = (
spaceHeightRow,
cameraAccelRow,
cameraDragRow,
cameraMaxSpeedRow,
cameraBrakeSpeedRow,
blockBufferRow,
mouseSpeedRow,
undoLimitRow,
maxCopiesRow,
compassSizeRow,
fontProportion,
fogIntensityRow,
)
options = (
longDistanceRow,
flyModeRow,
autoBrakeRow,
swapAxesRow,
invertRow,
superSecretSettingsRow,
rotateBlockBrushRow,
compassToggleRow,
showCommandsRow,
langButtonRow,
) + (
((sys.platform == "win32" and pygame.version.vernum == (1, 9, 1)) and (windowSizeRow,) or ())
) + (
(sys.platform == "win32") and (setWindowPlacementRow,) or ()
) + (
(not sys.platform == "darwin") and (goPortableRow,) or ()
)
rightcol = albow.Column(options, align='r')
leftcol = albow.Column(self.inputs, align='r')
optionsColumn = albow.Column((albow.Label("Options"),
albow.Row((leftcol, rightcol), align="t")))
settingsRow = albow.Row((optionsColumn,))
buttonsRow = albow.Row((albow.Button("OK", action=self.dismiss), albow.Button("Cancel", action=self.cancel)))
resetToDefaultRow = albow.Row((albow.Button("Reset to default", action=self.resetDefault),))
optionsColumn = albow.Column((settingsRow, buttonsRow, resetToDefaultRow))
optionsColumn.key_down = self.key_down
self.add(optionsColumn)
self.shrink_wrap()
@property
def blockBuffer(self):
return config.settings.blockBuffer.get() / 1048576
@blockBuffer.setter
def blockBuffer(self, val):
config.settings.blockBuffer.set(int(val * 1048576))
def getLanguageChoices(self, current=None):
files = os.listdir(albow.translate.langPath)
langs = {}
sgnal = {}
for file in files:
name, ext = os.path.splitext(file)
if ext == ".trn" and len(name) == 5 and name[2] == "_":
langName = albow.translate.getLangName(file)
langs[langName] = name
sgnal[name] = langName
if "English (US)" not in langs.keys():
langs[u"English (US)"] = "en_US"
sgnal["en_US"] = u"English (US)"
self.langs = langs
self.sgnal = sgnal
logging.debug("Detected languages: %s"%self.langs)
return langs
def changeLanguage(self):
if albow.translate.buildTemplate:
self.languageButton.selectedChoice = 'English (US)'
return
langName = self.languageButton.selectedChoice
if langName not in self.langs:
lng = "en_US"
else:
lng = self.langs[langName]
config.settings.langCode.set(lng)
#-# Translation live update preparation
logging.debug('*** Language change detected.')
logging.debug(' Former language: %s.'%albow.translate.getLang())
logging.debug(' New language: %s.'%lng)
albow.translate.langPath = os.sep.join((directories.getDataDir(), "lang"))
update = albow.translate.setLang(lng)[2]
logging.debug(' Update done? %s (Magic %s)'%(update, update or lng == 'en_US'))
self.mcedit.root.set_update_translation(update or lng == 'en_US')
self.mcedit.root.set_update_translation(False)
self.mcedit.editor.set_update_translation(update or lng == 'en_US')
self.mcedit.editor.set_update_translation(False)
#-#
@staticmethod
def portableButtonTooltip():
return (
"Click to make your MCEdit install self-contained by moving the settings and schematics into the program folder",
"Click to make your MCEdit install persistent by moving the settings and schematics into your Documents folder")[
directories.portable]
@property
def portableLabelText(self):
return ("Portable", "Fixed")[1 - directories.portable]
@portableLabelText.setter
def portableLabelText(self, *args, **kwargs):
pass
def togglePortable(self):
if sys.platform == "darwin":
return False
textChoices = [
_("This will make your MCEdit \"portable\" by moving your settings and schematics into the same folder as {0}. Continue?").format(
(sys.platform == "darwin" and _("the MCEdit application") or _("MCEditData"))),
_("This will move your settings and schematics to your Documents folder. Continue?"),
]
alertText = textChoices[directories.portable]
if albow.ask(alertText) == "OK":
try:
[directories.goPortable, directories.goFixed][directories.portable]()
except Exception, e:
traceback.print_exc()
albow.alert(_(u"Error while moving files: {0}").format(repr(e)))
else:
self.goPortableButton.selectedChoice = self.saveOldPortable
self.goPortableButton.tooltipText = self.portableButtonTooltip()
return True
def dismiss(self, *args, **kwargs):
"""Used to change the language and the font proportion"""
lang = config.settings.langCode.get() == old_lang or config.settings.langCode.get() == self.saveOldConfig[config.settings.langCode]
font = config.settings.fontProportion.get() == old_fprop or config.settings.fontProportion.get() == self.saveOldConfig[config.settings.fontProportion]
#-# The following lines will be used for the language and font dynamic changes
#-# The restart boxes will be suppressed.
# lang = config.settings.langCode.get() == self.saveOldConfig[config.settings.langCode]
# font = config.settings.fontProportion.get() == self.saveOldConfig[config.settings.fontProportion]
# self.changeLanguage()
# if not font or not lang:
# editor = self.mcedit.editor
# if editor and editor.unsavedEdits:
# result = albow.ask("You must restart MCEdit to see language changes", ["Save and Restart", "Restart", "Later"])
# else:
# result = albow.ask("You must restart MCEdit to see language changes", ["Restart", "Later"])
# if result == "Save and Restart":
# editor.saveFile()
# self.mcedit.restart()
# elif result == "Restart":
# self.mcedit.restart()
# elif result == "Later":
# pass
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
self.saveOldConfig[key] = key.get()
config.save()
Dialog.dismiss(self, *args, **kwargs)
def cancel(self, *args, **kwargs):
Changes = False
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if key.get() != self.saveOldConfig[key]:
Changes = True
oldLanguage = self.saveOldConfig[config.settings.langCode]
if config.settings.langCode.get() != oldLanguage:
Changes = True
newPortable = self.portableVar.get()
if newPortable != self.saveOldPortable:
Changes = True
if not Changes:
Dialog.dismiss(self, *args, **kwargs)
return
result = albow.ask("Do you want to save your changes?", ["Save", "Don't Save", "Cancel"])
if result == "Cancel":
return
if result == "Save":
self.dismiss(*args, **kwargs)
return
if config.settings.langCode.get() != oldLanguage:
self.languageButton.selectedChoice = self.sgnal[oldLanguage]
self.changeLanguage()
if _(newPortable) != _(self.saveOldPortable):
self.portableVar.set(newPortable)
self.togglePortable()
for key in self.saveOldConfig.keys():
key.set(self.saveOldConfig[key])
config.save()
Dialog.dismiss(self, *args, **kwargs)
def resetDefault(self):
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if "AttrRef" in str(key):
key.set(config.settings.blockBuffer.default / 1048576)
elif "lang" not in str(key):
key.set(key.default)
if config.settings.langCode.get() != "en_US":
config.settings.langCode.set("en_US")
self.changeLanguage()
if "Fixed" != self.portableVar.get():
self.portableVar.set("Fixed")
self.togglePortable()
config.save()
def reshowNumberFields(self):
for key in self.inputs:
key.subwidgets[1].editing = False
def dispatch_key(self, name, evt):
super(OptionsPanel, self).dispatch_key(name, evt)
if name == "key_down":
keyname = self.get_root().getKey(evt)
if keyname == 'Escape':
self.cancel()
|
|
import os
from staccato.tests import utils
from staccato.common import config
import staccato.common.exceptions as exceptions
import staccato.xfer.constants as constants
class TestDB(utils.TempFileCleanupBaseTest):
def setUp(self):
super(TestDB, self).setUp()
self.owner = 'someperson'
self.tmp_db = self.get_tempfile()
self.db_url = 'sqlite:///%s' % (self.tmp_db)
conf_d = {'sql_connection': self.db_url,
'protocol_policy': ''}
self.conf_file = self.make_confile(conf_d)
self.conf = config.get_config_object(
args=[],
default_config_files=[self.conf_file])
self.db = self.make_db(self.conf)
def test_db_creation(self):
self.assertTrue(os.path.exists(self.tmp_db))
def test_db_new_xfer(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
self.assertEqual(src, xfer.srcurl)
self.assertEqual(dst, xfer.dsturl)
self.assertEqual(sm, xfer.src_module_name)
self.assertEqual(dm, xfer.dst_module_name)
def test_db_xfer_lookup(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.lookup_xfer_request_by_id(xfer1.id)
self.assertEqual(xfer1.id, xfer2.id)
def test_db_xfer_lookup_with_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.lookup_xfer_request_by_id(xfer1.id, owner=self.owner)
self.assertEqual(xfer1.id, xfer2.id)
def test_db_xfer_lookup_with_wrong_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
self.assertRaises(exceptions.StaccatoNotFoundInDBException,
self.db.lookup_xfer_request_by_id,
xfer1.id, **{'owner': 'someoneelse'})
def test_db_xfer_lookup_not_there(self):
self.assertRaises(exceptions.StaccatoNotFoundInDBException,
self.db.lookup_xfer_request_by_id,
"notthere")
def test_db_xfer_update(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer1.next_ndx = 10
self.db.save_db_obj(xfer1)
xfer2 = self.db.lookup_xfer_request_by_id(xfer1.id)
self.assertEqual(xfer2.next_ndx, 10)
def test_lookup_all_no_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer_list = self.db.lookup_xfer_request_all()
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
def test_lookup_all_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer_list = self.db.lookup_xfer_request_all(owner=self.owner)
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
def test_lookup_all_wrong_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer_list = self.db.lookup_xfer_request_all(owner='notme')
self.assertEqual(len(xfer_list), 0)
def test_lookup_all_many_owners(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer('notme', src, dst, sm, dm)
xfer4 = self.db.get_new_xfer('notme', src, dst, sm, dm)
xfer5 = self.db.get_new_xfer('someoneelse', src, dst, sm, dm)
xfer_list = self.db.lookup_xfer_request_all(owner=self.owner)
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
def test_get_all_ready_new_no_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer_list = self.db.get_all_ready()
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
def test_get_all_ready_new_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer_list = self.db.get_all_ready(owner=self.owner)
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
def test_get_all_ready_wrong_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer_list = self.db.get_all_ready(owner='notme')
self.assertEqual(len(xfer_list), 0)
def test_get_all_ready_some_not(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3.state = constants.States.STATE_RUNNING
self.db.save_db_obj(xfer3)
xfer_list = self.db.get_all_ready()
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
def test_get_all_ready_some_error(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2.state = constants.States.STATE_ERROR
self.db.save_db_obj(xfer2)
xfer_list = self.db.get_all_ready()
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
def test_get_all_running(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
for x in [xfer1, xfer2, xfer3]:
x.state = constants.States.STATE_RUNNING
self.db.save_db_obj(x)
xfer_list = self.db.get_all_running()
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 3)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
self.assertTrue(xfer3.id in id_list)
def test_get_all_running_some_not(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
for x in [xfer1, xfer3]:
x.state = constants.States.STATE_RUNNING
self.db.save_db_obj(x)
xfer_list = self.db.get_all_running()
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer3.id in id_list)
def test_delete_from_db(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
self.db.delete_db_obj(xfer2)
xfer_list = self.db.get_all_ready()
id_list = [x.id for x in xfer_list]
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer3.id in id_list)
def test_get_many_requests_no_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
id_list = [xfer1.id, xfer2.id, xfer3.id]
xfer_list = self.db.get_xfer_requests(ids=id_list)
self.assertEqual(len(xfer_list), 3)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
self.assertTrue(xfer3.id in id_list)
def test_get_many_requests_owner(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
id_list = [xfer1.id, xfer2.id, xfer3.id]
xfer_list = self.db.get_xfer_requests(ids=id_list, owner=self.owner)
self.assertEqual(len(xfer_list), 3)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
self.assertTrue(xfer3.id in id_list)
def test_get_many_requests_owner_subset(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
id_list = [xfer1.id, xfer3.id]
xfer_list = self.db.get_xfer_requests(ids=id_list, owner=self.owner)
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer3.id in id_list)
def test_get_many_requests_some_wrong_owner_subset(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer3 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer4 = self.db.get_new_xfer('notme', src, dst, sm, dm)
id_list = [xfer1.id, xfer3.id]
xfer_list = self.db.get_xfer_requests(ids=id_list, owner=self.owner)
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer3.id in id_list)
def test_get_many_requests_get_invalid(self):
src = "src://url"
dst = "dst://url"
sm = "src.module"
dm = "dst.module"
xfer1 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
xfer2 = self.db.get_new_xfer(self.owner, src, dst, sm, dm)
id_list = [xfer1.id, xfer2.id, 'nothereatall']
xfer_list = self.db.get_xfer_requests(ids=id_list, owner=self.owner)
self.assertEqual(len(xfer_list), 2)
self.assertTrue(xfer1.id in id_list)
self.assertTrue(xfer2.id in id_list)
|
|
#!/usr/bin/env python2
from __future__ import division
import os
import sys
import json
import re
import uuid
import pprint; pp = pprint.pprint
from lxml import etree
import mp4check
# ----------------------------------------------------------------------
# movmark: takes trecmarkers output and patches it into the XMP_ box of a .mov file
#
# the .mov file has to have:
# * moov.udta.XMP_ box
# * ... at the end of the file
# * "Chapters" track in the XMP data
#
# add a *chapter* marker to the mov file within Premiere to make this happen.
# ----------------------------------------------------------------------
# definitions and utils
xpacket_start = u'<?xpacket begin="\ufeff" id="W5M0MpCehiHzreSzNTczkc9d"?>'.encode('utf8')
xpacket_end = u'<?xpacket end="w"?>'.encode('utf8')
def xmppad(n, w=100):
res = []
while n >= w:
res.append(' ' * (w-1) + '\n')
n -= w
res.append(' ' * n)
return ''.join(res)
# http://effbot.org/zone/element-namespaces.htm
# http://lxml.de/tutorial.html#using-xpath-to-find-text
# my own definitions, *coincidentally* the same as in the XMP data, but logically they're distinct
nsmap = {
"x": "adobe:ns:meta/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"xmp": "http://ns.adobe.com/xap/1.0/",
"xmpDM": "http://ns.adobe.com/xmp/1.0/DynamicMedia/",
"stDim": "http://ns.adobe.com/xap/1.0/sType/Dimensions#",
"xmpMM": "http://ns.adobe.com/xap/1.0/mm/",
"stEvt": "http://ns.adobe.com/xap/1.0/sType/ResourceEvent#",
"stRef": "http://ns.adobe.com/xap/1.0/sType/ResourceRef#",
"bext": "http://ns.adobe.com/bwf/bext/1.0/",
"creatorAtom": "http://ns.adobe.com/creatorAtom/1.0/",
"dc": "http://purl.org/dc/elements/1.1/",
}
def deNS(text):
for key in nsmap:
prefix = key + ":"
text = text.replace(prefix, "{%s}" % nsmap[key])
return text
def iround(x):
return int(round(x))
# ----------------------------------------------------------------------
if __name__ == '__main__':
movfname = sys.argv[1]
markersfname = '-' # TODO: parse args
assert movfname.endswith('.mov')
markertype = "Chapter"
if len(sys.argv) >= 3:
markertype = sys.argv[2]
# read markers (json) from stdin or file
markers = json.load(sys.stdin if (markersfname == '-') else open(markersfname))
# ----------------------------------------------------------------------
# parse, check box positions
# open output file
filebuf = mp4check.FileBuffer(movfname, 'r+b')
root = mp4check.parse(filebuf)
# locate moov
assert root[-1].type == 'moov'
moovbox = root[-1]
moovcontent = moovbox.content
# locate udta
assert moovbox.content[-1].type == 'udta'
udtabox = moovbox.content[-1]
# locate XMP_
assert udtabox.content[-1].type == 'XMP_'
xmpbox = udtabox.content[-1]
# XMP data really is at end of file
xmpbuf = xmpbox.content
assert xmpbuf.stop == filebuf.stop, "there must not be more data after the XMP_ atom!"
# get at the XML
xmpdata = xmpbuf.str()
xmptree = etree.XML(xmpdata)
# reset instance ID
(node,) = xmptree.xpath("/x:xmpmeta/rdf:RDF/rdf:Description", namespaces=nsmap)
node.set(
deNS("xmpMM:InstanceID"),
"xmp.iid:{0}".format(uuid.uuid4())) # random UUID
# find a track with given marker type
chaptertracks = xmptree.xpath("/x:xmpmeta/rdf:RDF/rdf:Description/xmpDM:Tracks/rdf:Bag/rdf:li/rdf:Description[@xmpDM:trackName='{0}']".format(markertype), namespaces=nsmap)
assert chaptertracks
(chaptertrack,) = chaptertracks
# TODO: create chapters track if not found
(framerate,) = chaptertrack.xpath('@xmpDM:frameRate', namespaces=nsmap)
framerate = int(re.match(r'f(\d+)$', framerate).group(1))
# this is the list of markers within the chapters track
(chapterseq,) = chaptertrack.xpath('xmpDM:markers/rdf:Seq', namespaces=nsmap)
# to prevent duplication
existing = {
(
int(node.get(deNS('xmpDM:startTime'))),
node.get(deNS('xmpDM:name'))
)
for node
in chapterseq.xpath("rdf:li/rdf:Description", namespaces=nsmap)
}
# ----------------------------------------------------------------------
# add markers
for marker in markers['chapters']:
markername = marker['name']
markertime = marker['start']
timeindex = iround(markertime * framerate)
#error = timeindex / framerate - markertime
if (timeindex, markername) in existing:
print "exists:", marker
continue
# insert marker
item = etree.SubElement(chapterseq, deNS("rdf:li"))
descr = etree.SubElement(item, deNS("rdf:Description"))
descr.set(deNS('xmpDM:startTime'), str(timeindex))
descr.set(deNS('xmpDM:name'), markername)
existing.add((timeindex, markername))
# ----------------------------------------------------------------------
# serialize and patch
xmpdata = etree.tostring(xmptree, encoding='utf8')
# before: len(xmpbuf)
# now:
payload = len(xmpdata) + len(xpacket_start) + len(xpacket_end)
# padding...
padlen = 0
if payload < len(xmpbuf):
padlen = len(xmpbuf) - payload
padlen = max(8000, padlen)
payload += padlen
# for adjusting moov+udta+XMP_ box lengths
delta = payload - len(xmpbuf)
assert delta >= 0
# if not, padding must have gone wrong
# this will be written
xmpdata = xpacket_start + xmpdata + xmppad(padlen) + xpacket_end
# only handle 32-bit box lengths
assert moovbox.buf[">I"] >= 8
assert udtabox.buf[">I"] >= 8
assert xmpbox.buf[">I"] >= 8
# if 1, a 64 bit value follows the tag
# if 0, box extends to end of file
# patch moov length
moovbox.buf[">I"] += delta
# patch udta length
udtabox.buf[">I"] += delta
# patch XMP_ length
xmpbox.buf[">I"] += delta
filebuf.fp.seek(xmpbuf.start)
filebuf.fp.write(xmpdata)
filebuf.fp.flush()
|
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import datetime
from datetime import timedelta
import luigi.date_interval
import luigi
import luigi.interface
from worker_test import EmailTest
import luigi.notifications
from luigi.parameter import UnknownConfigException
luigi.notifications.DEBUG = True
import unittest
from helpers import with_config
EMAIL_CONFIG = {"core": {"error-email": "not-a-real-email-address-for-test-only"}}
class A(luigi.Task):
p = luigi.IntParameter()
class WithDefault(luigi.Task):
x = luigi.Parameter(default='xyz')
class Foo(luigi.Task):
bar = luigi.Parameter()
p2 = luigi.IntParameter()
multi = luigi.Parameter(is_list=True)
not_a_param = "lol"
class Bar(luigi.Task):
multibool = luigi.BooleanParameter(is_list=True)
def run(self):
Bar._val = self.multibool
class Baz(luigi.Task):
bool = luigi.BooleanParameter()
def run(self):
Baz._val = self.bool
class ForgotParam(luigi.Task):
param = luigi.Parameter()
def run(self):
pass
class ForgotParamDep(luigi.Task):
def requires(self):
return ForgotParam()
def run(self):
pass
class HasGlobalParam(luigi.Task):
x = luigi.Parameter()
global_param = luigi.IntParameter(is_global=True, default=123) # global parameters need default values
global_bool_param = luigi.BooleanParameter(is_global=True, default=False)
def run(self):
self.complete = lambda: True
def complete(self):
return False
class HasGlobalParamDep(luigi.Task):
x = luigi.Parameter()
def requires(self):
return HasGlobalParam(self.x)
_shared_global_param = luigi.Parameter(is_global=True, default='123')
class SharedGlobalParamA(luigi.Task):
shared_global_param = _shared_global_param
class SharedGlobalParamB(luigi.Task):
shared_global_param = _shared_global_param
class ParameterTest(EmailTest):
def setUp(self):
super(ParameterTest, self).setUp()
# Need to restore some defaults for the global params since they are overriden
HasGlobalParam.global_param.set_default(123)
HasGlobalParam.global_bool_param.set_default(False)
def test_default_param(self):
self.assertEquals(WithDefault().x, 'xyz')
def test_missing_param(self):
def create_a():
return A()
self.assertRaises(luigi.parameter.MissingParameterException, create_a)
def test_unknown_param(self):
def create_a():
return A(p=5, q=4)
self.assertRaises(luigi.parameter.UnknownParameterException, create_a)
def test_unknown_param_2(self):
def create_a():
return A(1, 2, 3)
self.assertRaises(luigi.parameter.UnknownParameterException, create_a)
def test_duplicated_param(self):
def create_a():
return A(5, p=7)
self.assertRaises(luigi.parameter.DuplicateParameterException, create_a)
def test_parameter_registration(self):
self.assertEquals(len(Foo.get_params()), 3)
def test_task_creation(self):
f = Foo("barval", p2=5, multi=('m1', 'm2'))
self.assertEquals(len(f.get_params()), 3)
self.assertEquals(f.bar, "barval")
self.assertEquals(f.p2, 5)
self.assertEquals(f.multi, ('m1', 'm2'))
self.assertEquals(f.not_a_param, "lol")
def test_multibool(self):
luigi.run(['--local-scheduler', 'Bar', '--multibool', 'true', '--multibool', 'false'])
self.assertEquals(Bar._val, (True, False))
def test_multibool_empty(self):
luigi.run(['--local-scheduler', 'Bar'])
self.assertEquals(Bar._val, tuple())
def test_bool_false(self):
luigi.run(['--local-scheduler', 'Baz'])
self.assertEquals(Baz._val, False)
def test_bool_true(self):
luigi.run(['--local-scheduler', 'Baz', '--bool'])
self.assertEquals(Baz._val, True)
def test_forgot_param(self):
self.assertRaises(luigi.parameter.MissingParameterException, luigi.run, ['--local-scheduler', 'ForgotParam'],)
@with_config(EMAIL_CONFIG)
def test_forgot_param_in_dep(self):
# A programmatic missing parameter will cause an error email to be sent
luigi.run(['--local-scheduler', 'ForgotParamDep'])
self.assertNotEquals(self.last_email, None)
def test_default_param_cmdline(self):
luigi.run(['--local-scheduler', 'WithDefault'])
self.assertEquals(WithDefault().x, 'xyz')
def test_global_param_defaults(self):
h = HasGlobalParam(x='xyz')
self.assertEquals(h.global_param, 123)
self.assertEquals(h.global_bool_param, False)
def test_global_param_cmdline(self):
luigi.run(['--local-scheduler', 'HasGlobalParam', '--x', 'xyz', '--global-param', '124'])
h = HasGlobalParam(x='xyz')
self.assertEquals(h.global_param, 124)
self.assertEquals(h.global_bool_param, False)
def test_global_param_override(self):
def f():
return HasGlobalParam(x='xyz', global_param=124)
self.assertRaises(luigi.parameter.ParameterException, f) # can't override a global parameter
def test_global_param_dep_cmdline(self):
luigi.run(['--local-scheduler', 'HasGlobalParamDep', '--x', 'xyz', '--global-param', '124'])
h = HasGlobalParam(x='xyz')
self.assertEquals(h.global_param, 124)
self.assertEquals(h.global_bool_param, False)
def test_global_param_dep_cmdline_optparse(self):
luigi.run(['--local-scheduler', '--task', 'HasGlobalParamDep', '--x', 'xyz', '--global-param', '124'], use_optparse=True)
h = HasGlobalParam(x='xyz')
self.assertEquals(h.global_param, 124)
self.assertEquals(h.global_bool_param, False)
def test_global_param_dep_cmdline_bool(self):
luigi.run(['--local-scheduler', 'HasGlobalParamDep', '--x', 'xyz', '--global-bool-param'])
h = HasGlobalParam(x='xyz')
self.assertEquals(h.global_param, 123)
self.assertEquals(h.global_bool_param, True)
def test_global_param_shared(self):
luigi.run(['--local-scheduler', 'SharedGlobalParamA', '--shared-global-param', 'abc'])
b = SharedGlobalParamB()
self.assertEquals(b.shared_global_param, 'abc')
def test_insignificant_parameter(self):
class InsignificantParameterTask(luigi.Task):
foo = luigi.Parameter(significant=False)
bar = luigi.Parameter()
t = InsignificantParameterTask(foo='x', bar='y')
self.assertEquals(t.task_id, 'InsignificantParameterTask(bar=y)')
class TestParamWithDefaultFromConfig(unittest.TestCase):
def testNoSection(self):
self.assertRaises(UnknownConfigException, lambda: luigi.Parameter(default_from_config=dict(section="foo", name="bar")).default)
@with_config({"foo": {}})
def testNoValue(self):
self.assertRaises(UnknownConfigException, lambda: luigi.Parameter(default_from_config=dict(section="foo", name="bar")).default)
@with_config({"foo": {"bar": "baz"}})
def testDefault(self):
class A(luigi.Task):
p = luigi.Parameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals("baz", A().p)
self.assertEquals("boo", A(p="boo").p)
@with_config({"foo": {"bar": "2001-02-03T04"}})
def testDateHour(self):
p = luigi.DateHourParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(datetime.datetime(2001, 2, 3, 4, 0, 0), p.default)
@with_config({"foo": {"bar": "2001-02-03"}})
def testDate(self):
p = luigi.DateParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(datetime.date(2001, 2, 3), p.default)
@with_config({"foo": {"bar": "123"}})
def testInt(self):
p = luigi.IntParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(123, p.default)
@with_config({"foo": {"bar": "true"}})
def testBool(self):
p = luigi.BooleanParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(True, p.default)
@with_config({"foo": {"bar": "2001-02-03-2001-02-28"}})
def testDateInterval(self):
p = luigi.DateIntervalParameter(default_from_config=dict(section="foo", name="bar"))
expected = luigi.date_interval.Custom.parse("2001-02-03-2001-02-28")
self.assertEquals(expected, p.default)
@with_config({"foo": {"bar": "1 day"}})
def testTimeDelta(self):
p = luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(timedelta(days = 1), p.default)
@with_config({"foo": {"bar": "2 seconds"}})
def testTimeDeltaPlural(self):
p = luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(timedelta(seconds = 2), p.default)
@with_config({"foo": {"bar": "3w 4h 5m"}})
def testTimeDeltaMultiple(self):
p = luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(timedelta(weeks = 3, hours = 4, minutes = 5), p.default)
@with_config({"foo": {"bar": "P4DT12H30M5S"}})
def testTimeDelta8601(self):
p = luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(timedelta(days = 4, hours = 12, minutes = 30, seconds = 5), p.default)
@with_config({"foo": {"bar": "P5D"}})
def testTimeDelta8601NoTimeComponent(self):
p = luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(timedelta(days = 5), p.default)
@with_config({"foo": {"bar": "P5W"}})
def testTimeDelta8601Weeks(self):
p = luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(timedelta(weeks = 5), p.default)
@with_config({"foo": {"bar": "P3Y6M4DT12H30M5S"}})
def testTimeDelta8601YearMonthNotSupported(self):
def f():
return luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar")).default
self.assertRaises(luigi.parameter.ParameterException, f) # ISO 8601 durations with years or months are not supported
@with_config({"foo": {"bar": "PT6M"}})
def testTimeDelta8601MAfterT(self):
p = luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar"))
self.assertEquals(timedelta(minutes = 6), p.default)
@with_config({"foo": {"bar": "P6M"}})
def testTimeDelta8601MBeforeT(self):
def f():
return luigi.TimeDeltaParameter(default_from_config=dict(section="foo", name="bar")).default
self.assertRaises(luigi.parameter.ParameterException, f) # ISO 8601 durations with months are not supported
def testTwoDefaults(self):
self.assertRaises(luigi.parameter.ParameterException, lambda: luigi.Parameter(default="baz", default_from_config=dict(section="foo", name="bar")))
def testHasDefaultNoSection(self):
luigi.Parameter(default_from_config=dict(section="foo", name="bar")).has_default
self.assertFalse(luigi.Parameter(default_from_config=dict(section="foo", name="bar")).has_default)
@with_config({"foo": {}})
def testHasDefaultNoValue(self):
self.assertFalse(luigi.Parameter(default_from_config=dict(section="foo", name="bar")).has_default)
@with_config({"foo": {"bar": "baz"}})
def testHasDefaultWithBoth(self):
self.assertTrue(luigi.Parameter(default_from_config=dict(section="foo", name="bar")).has_default)
@with_config({"foo": {"bar": "one\n\ttwo\n\tthree\n"}})
def testDefaultList(self):
p = luigi.Parameter(is_list=True, default_from_config=dict(section="foo", name="bar"))
self.assertEquals(('one', 'two', 'three'), p.default)
@with_config({"foo": {"bar": "1\n2\n3"}})
def testDefaultIntList(self):
p = luigi.IntParameter(is_list=True, default_from_config=dict(section="foo", name="bar"))
self.assertEquals((1, 2, 3), p.default)
if __name__ == '__main__':
luigi.run(use_optparse=True)
|
|
import ctypes
from pyglet import com
lib = ctypes.oledll.dinput8
LPVOID = ctypes.c_void_p
WORD = ctypes.c_uint16
DWORD = ctypes.c_uint32
LPDWORD = ctypes.POINTER(DWORD)
BOOL = ctypes.c_int
WCHAR = ctypes.c_wchar
UINT = ctypes.c_uint
HWND = ctypes.c_uint32
HANDLE = LPVOID
MAX_PATH = 260
DIENUM_STOP = 0
DIENUM_CONTINUE = 1
DIEDFL_ALLDEVICES = 0x00000000
DIEDFL_ATTACHEDONLY = 0x00000001
DIEDFL_FORCEFEEDBACK = 0x00000100
DIEDFL_INCLUDEALIASES = 0x00010000
DIEDFL_INCLUDEPHANTOMS = 0x00020000
DIEDFL_INCLUDEHIDDEN = 0x00040000
DI8DEVCLASS_ALL = 0
DI8DEVCLASS_DEVICE = 1
DI8DEVCLASS_POINTER = 2
DI8DEVCLASS_KEYBOARD = 3
DI8DEVCLASS_GAMECTRL = 4
DI8DEVTYPE_DEVICE = 0x11
DI8DEVTYPE_MOUSE = 0x12
DI8DEVTYPE_KEYBOARD = 0x13
DI8DEVTYPE_JOYSTICK = 0x14
DI8DEVTYPE_GAMEPAD = 0x15
DI8DEVTYPE_DRIVING = 0x16
DI8DEVTYPE_FLIGHT = 0x17
DI8DEVTYPE_1STPERSON = 0x18
DI8DEVTYPE_DEVICECTRL = 0x19
DI8DEVTYPE_SCREENPOINTER = 0x1A
DI8DEVTYPE_REMOTE = 0x1B
DI8DEVTYPE_SUPPLEMENTAL = 0x1C
DI8DEVTYPEMOUSE_UNKNOWN = 1
DI8DEVTYPEMOUSE_TRADITIONAL = 2
DI8DEVTYPEMOUSE_FINGERSTICK = 3
DI8DEVTYPEMOUSE_TOUCHPAD = 4
DI8DEVTYPEMOUSE_TRACKBALL = 5
DI8DEVTYPEMOUSE_ABSOLUTE = 6
DI8DEVTYPEKEYBOARD_UNKNOWN = 0
DI8DEVTYPEKEYBOARD_PCXT = 1
DI8DEVTYPEKEYBOARD_OLIVETTI = 2
DI8DEVTYPEKEYBOARD_PCAT = 3
DI8DEVTYPEKEYBOARD_PCENH = 4
DI8DEVTYPEKEYBOARD_NOKIA1050 = 5
DI8DEVTYPEKEYBOARD_NOKIA9140 = 6
DI8DEVTYPEKEYBOARD_NEC98 = 7
DI8DEVTYPEKEYBOARD_NEC98LAPTOP = 8
DI8DEVTYPEKEYBOARD_NEC98106 = 9
DI8DEVTYPEKEYBOARD_JAPAN106 = 10
DI8DEVTYPEKEYBOARD_JAPANAX = 11
DI8DEVTYPEKEYBOARD_J3100 = 12
DI8DEVTYPE_LIMITEDGAMESUBTYPE = 1
DI8DEVTYPEJOYSTICK_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEJOYSTICK_STANDARD = 2
DI8DEVTYPEGAMEPAD_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEGAMEPAD_STANDARD = 2
DI8DEVTYPEGAMEPAD_TILT = 3
DI8DEVTYPEDRIVING_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEDRIVING_COMBINEDPEDALS = 2
DI8DEVTYPEDRIVING_DUALPEDALS = 3
DI8DEVTYPEDRIVING_THREEPEDALS = 4
DI8DEVTYPEDRIVING_HANDHELD = 5
DI8DEVTYPEFLIGHT_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEFLIGHT_STICK = 2
DI8DEVTYPEFLIGHT_YOKE = 3
DI8DEVTYPEFLIGHT_RC = 4
DI8DEVTYPE1STPERSON_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPE1STPERSON_UNKNOWN = 2
DI8DEVTYPE1STPERSON_SIXDOF = 3
DI8DEVTYPE1STPERSON_SHOOTER = 4
DI8DEVTYPESCREENPTR_UNKNOWN = 2
DI8DEVTYPESCREENPTR_LIGHTGUN = 3
DI8DEVTYPESCREENPTR_LIGHTPEN = 4
DI8DEVTYPESCREENPTR_TOUCH = 5
DI8DEVTYPEREMOTE_UNKNOWN = 2
DI8DEVTYPEDEVICECTRL_UNKNOWN = 2
DI8DEVTYPEDEVICECTRL_COMMSSELECTION = 3
DI8DEVTYPEDEVICECTRL_COMMSSELECTION_HARDWIRED = 4
DI8DEVTYPESUPPLEMENTAL_UNKNOWN = 2
DI8DEVTYPESUPPLEMENTAL_2NDHANDCONTROLLER = 3
DI8DEVTYPESUPPLEMENTAL_HEADTRACKER = 4
DI8DEVTYPESUPPLEMENTAL_HANDTRACKER = 5
DI8DEVTYPESUPPLEMENTAL_SHIFTSTICKGATE = 6
DI8DEVTYPESUPPLEMENTAL_SHIFTER = 7
DI8DEVTYPESUPPLEMENTAL_THROTTLE = 8
DI8DEVTYPESUPPLEMENTAL_SPLITTHROTTLE = 9
DI8DEVTYPESUPPLEMENTAL_COMBINEDPEDALS = 10
DI8DEVTYPESUPPLEMENTAL_DUALPEDALS = 11
DI8DEVTYPESUPPLEMENTAL_THREEPEDALS = 12
DI8DEVTYPESUPPLEMENTAL_RUDDERPEDALS = 13
DIDC_ATTACHED = 0x00000001
DIDC_POLLEDDEVICE = 0x00000002
DIDC_EMULATED = 0x00000004
DIDC_POLLEDDATAFORMAT = 0x00000008
DIDC_FORCEFEEDBACK = 0x00000100
DIDC_FFATTACK = 0x00000200
DIDC_FFFADE = 0x00000400
DIDC_SATURATION = 0x00000800
DIDC_POSNEGCOEFFICIENTS = 0x00001000
DIDC_POSNEGSATURATION = 0x00002000
DIDC_DEADBAND = 0x00004000
DIDC_STARTDELAY = 0x00008000
DIDC_ALIAS = 0x00010000
DIDC_PHANTOM = 0x00020000
DIDC_HIDDEN = 0x00040000
def DIDFT_GETINSTANCE(n):
return (n >> 8) & 0xffff
DIDFT_ALL = 0x00000000
DIDFT_RELAXIS = 0x00000001
DIDFT_ABSAXIS = 0x00000002
DIDFT_AXIS = 0x00000003
DIDFT_PSHBUTTON = 0x00000004
DIDFT_TGLBUTTON = 0x00000008
DIDFT_BUTTON = 0x0000000C
DIDFT_POV = 0x00000010
DIDFT_COLLECTION = 0x00000040
DIDFT_NODATA = 0x00000080
DIDFT_ANYINSTANCE = 0x00FFFF00
DIDFT_INSTANCEMASK = DIDFT_ANYINSTANCE
DIDFT_FFACTUATOR = 0x01000000
DIDFT_FFEFFECTTRIGGER = 0x02000000
DIDFT_OUTPUT = 0x10000000
DIDFT_VENDORDEFINED = 0x04000000
DIDFT_ALIAS = 0x08000000
DIDFT_OPTIONAL = 0x80000000
DIDFT_NOCOLLECTION = 0x00FFFF00
DIA_FORCEFEEDBACK = 0x00000001
DIA_APPMAPPED = 0x00000002
DIA_APPNOMAP = 0x00000004
DIA_NORANGE = 0x00000008
DIA_APPFIXED = 0x00000010
DIAH_UNMAPPED = 0x00000000
DIAH_USERCONFIG = 0x00000001
DIAH_APPREQUESTED = 0x00000002
DIAH_HWAPP = 0x00000004
DIAH_HWDEFAULT = 0x00000008
DIAH_DEFAULT = 0x00000020
DIAH_ERROR = 0x80000000
DIAFTS_NEWDEVICELOW = 0xFFFFFFFF
DIAFTS_NEWDEVICEHIGH = 0xFFFFFFFF
DIAFTS_UNUSEDDEVICELOW = 0x00000000
DIAFTS_UNUSEDDEVICEHIGH = 0x00000000
DIDBAM_DEFAULT = 0x00000000
DIDBAM_PRESERVE = 0x00000001
DIDBAM_INITIALIZE = 0x00000002
DIDBAM_HWDEFAULTS = 0x00000004
DIDSAM_DEFAULT = 0x00000000
DIDSAM_NOUSER = 0x00000001
DIDSAM_FORCESAVE = 0x00000002
DICD_DEFAULT = 0x00000000
DICD_EDIT = 0x00000001
DIDOI_FFACTUATOR = 0x00000001
DIDOI_FFEFFECTTRIGGER = 0x00000002
DIDOI_POLLED = 0x00008000
DIDOI_ASPECTPOSITION = 0x00000100
DIDOI_ASPECTVELOCITY = 0x00000200
DIDOI_ASPECTACCEL = 0x00000300
DIDOI_ASPECTFORCE = 0x00000400
DIDOI_ASPECTMASK = 0x00000F00
DIDOI_GUIDISUSAGE = 0x00010000
DIPH_DEVICE = 0
DIPH_BYOFFSET = 1
DIPH_BYID = 2
DIPH_BYUSAGE = 3
DISCL_EXCLUSIVE = 0x00000001
DISCL_NONEXCLUSIVE = 0x00000002
DISCL_FOREGROUND = 0x00000004
DISCL_BACKGROUND = 0x00000008
DISCL_NOWINKEY = 0x00000010
DIPROP_BUFFERSIZE = 1
GUID_XAxis = \
com.GUID(0xA36D02E0, 0xC9F3, 0x11CF, 0xBF, 0xC7,
0x44, 0x45, 0x53, 0x54, 0x00, 0x00)
class DIDEVICEINSTANCE(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('guidInstance', com.GUID),
('guidProduct', com.GUID),
('dwDevType', DWORD),
('tszInstanceName', WCHAR * MAX_PATH),
('tszProductName', WCHAR * MAX_PATH),
('guidFFDriver', com.GUID),
('wUsagePage', WORD),
('wUsage', WORD)
)
LPDIDEVICEINSTANCE = ctypes.POINTER(DIDEVICEINSTANCE)
LPDIENUMDEVICESCALLBACK = ctypes.WINFUNCTYPE(BOOL, LPDIDEVICEINSTANCE, LPVOID)
class DIDEVICEOBJECTINSTANCE(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('guidType', com.GUID),
('dwOfs', DWORD),
('dwType', DWORD),
('dwFlags', DWORD),
('tszName', WCHAR * MAX_PATH),
('dwFFMaxForce', DWORD),
('dwFFForceResolution', DWORD),
('wCollectionNumber', WORD),
('wDesignatorIndex', WORD),
('wUsagePage', WORD),
('wUsage', WORD),
('dwDimension', DWORD),
('wExponent', WORD),
('wReportId', WORD)
)
LPDIDEVICEOBJECTINSTANCE = ctypes.POINTER(DIDEVICEOBJECTINSTANCE)
LPDIENUMDEVICEOBJECTSCALLBACK = \
ctypes.WINFUNCTYPE(BOOL, LPDIDEVICEOBJECTINSTANCE, LPVOID)
class DIOBJECTDATAFORMAT(ctypes.Structure):
_fields_ = (
('pguid', ctypes.POINTER(com.GUID)),
('dwOfs', DWORD),
('dwType', DWORD),
('dwFlags', DWORD)
)
__slots__ = [n for n, t in _fields_]
LPDIOBJECTDATAFORMAT = ctypes.POINTER(DIOBJECTDATAFORMAT)
class DIDATAFORMAT(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('dwObjSize', DWORD),
('dwFlags', DWORD),
('dwDataSize', DWORD),
('dwNumObjs', DWORD),
('rgodf', LPDIOBJECTDATAFORMAT)
)
__slots__ = [n for n, t in _fields_]
LPDIDATAFORMAT = ctypes.POINTER(DIDATAFORMAT)
class DIDEVICEOBJECTDATA(ctypes.Structure):
_fields_ = (
('dwOfs', DWORD),
('dwData', DWORD),
('dwTimeStamp', DWORD),
('dwSequence', DWORD),
('uAppData', ctypes.POINTER(UINT))
)
LPDIDEVICEOBJECTDATA = ctypes.POINTER(DIDEVICEOBJECTDATA)
class DIPROPHEADER(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('dwHeaderSize', DWORD),
('dwObj', DWORD),
('dwHow', DWORD)
)
LPDIPROPHEADER = ctypes.POINTER(DIPROPHEADER)
class DIPROPDWORD(ctypes.Structure):
_fields_ = (
('diph', DIPROPHEADER),
('dwData', DWORD)
)
# All method names in the interfaces are filled in, but unused (so far)
# methods have no parameters.. they'll crash when we try and use them, at
# which point we can go in and fill them in.
# IDirect* interfaces are all Unicode (e.g. IDirectInputDevice8W).
class IDirectInputDevice8(com.IUnknown):
_methods_ = [
('GetCapabilities',
com.STDMETHOD()),
('EnumObjects',
com.STDMETHOD(LPDIENUMDEVICEOBJECTSCALLBACK, LPVOID, DWORD)),
('GetProperty',
com.STDMETHOD()),
('SetProperty',
com.STDMETHOD(LPVOID, LPDIPROPHEADER)),
('Acquire',
com.STDMETHOD()),
('Unacquire',
com.STDMETHOD()),
('GetDeviceState',
com.STDMETHOD()),
('GetDeviceData',
com.STDMETHOD(DWORD, LPDIDEVICEOBJECTDATA, LPDWORD, DWORD)),
('SetDataFormat',
com.STDMETHOD(LPDIDATAFORMAT)),
('SetEventNotification',
com.STDMETHOD(HANDLE)),
('SetCooperativeLevel',
com.STDMETHOD(HWND, DWORD)),
('GetObjectInfo',
com.STDMETHOD()),
('GetDeviceInfo',
com.STDMETHOD()),
('RunControlPanel',
com.STDMETHOD()),
('Initialize',
com.STDMETHOD()),
('CreateEffect',
com.STDMETHOD()),
('EnumEffects',
com.STDMETHOD()),
('GetEffectInfo',
com.STDMETHOD()),
('GetForceFeedbackState',
com.STDMETHOD()),
('SendForceFeedbackCommand',
com.STDMETHOD()),
('EnumCreatedEffectObjects',
com.STDMETHOD()),
('Escape',
com.STDMETHOD()),
('Poll',
com.STDMETHOD()),
('SendDeviceData',
com.STDMETHOD()),
('EnumEffectsInFile',
com.STDMETHOD()),
('WriteEffectToFile',
com.STDMETHOD()),
('BuildActionMap',
com.STDMETHOD()),
('SetActionMap',
com.STDMETHOD()),
('GetImageInfo',
com.STDMETHOD()),
]
class IDirectInput8(com.IUnknown):
_methods_ = [
('CreateDevice',
com.STDMETHOD(ctypes.POINTER(com.GUID),
ctypes.POINTER(IDirectInputDevice8),
ctypes.c_void_p)),
('EnumDevices',
com.STDMETHOD(DWORD, LPDIENUMDEVICESCALLBACK, LPVOID, DWORD)),
('GetDeviceStatus',
com.STDMETHOD()),
('RunControlPanel',
com.STDMETHOD()),
('Initialize',
com.STDMETHOD()),
('FindDevice',
com.STDMETHOD()),
('EnumDevicesBySemantics',
com.STDMETHOD()),
('ConfigureDevices',
com.STDMETHOD()),
]
IID_IDirectInput8W = \
com.GUID(0xBF798031, 0x483A, 0x4DA2, 0xAA, 0x99,
0x5D, 0x64, 0xED, 0x36, 0x97, 0x00)
DIRECTINPUT_VERSION = 0x0800
DirectInput8Create = lib.DirectInput8Create
DirectInput8Create.argtypes = \
(ctypes.c_void_p, DWORD, com.LPGUID, ctypes.c_void_p, ctypes.c_void_p)
|
|
# -*- coding: utf-8 -*-
# Copyright 2012-2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from datetime import datetime, timedelta
from django.utils.timezone import utc
from django.utils import simplejson
from moocng.api.tests.outputs import BASIC_UNITS, BASIC_UNIT, BASIC_UNIT_PK
from moocng.api.tests.utils import ApiTestCase
from moocng.courses.models import Unit
class UnitsTestCase(ApiTestCase):
def test_get_units_annonymous(self):
# TODO: Check not "public" course
owner = self.create_test_user_owner()
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_units(course, is_possible=False)
def test_get_units_user(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_units(course, is_possible=True)
def test_get_units_alum(self):
owner = self.create_test_user_owner()
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_units(course, is_possible=True)
def test_get_units_teacher(self):
owner = self.create_test_user_owner()
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
# Test public course
course = self.create_test_basic_course(owner, teacher=teacher1)
self.check_test_get_units(course, is_possible=True)
def test_get_units_owner(self):
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
# Test public course/
course = self.create_test_basic_course(owner)
self.check_test_get_units(course, is_possible=True)
def test_get_units_admin(self):
owner = self.create_test_user_owner()
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_units(course, is_possible=True)
def test_get_units_userkey(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_units(course, is_possible=False, key=key)
# Auxiliary function
def check_test_get_units(self, course, is_possible=False, key=None):
# Test units with no start, no deadline (only normal units)
unit = self.create_test_basic_unit(course, 'n')
if key:
response = self.client.get('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key))
else:
response = self.client.get('/api/%s/unit/%s' % (self.api_name, self.format_append))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, BASIC_UNITS)
else:
self.assertEqual(response.status_code, 401)
now = datetime.utcnow().replace(tzinfo=utc)
aux_basic_units = simplejson.loads(BASIC_UNITS)
# Test units with start and deadline, referenced date before start
# strftime('%Y-%m-%dT%H:%M:%S%z')
start = now + timedelta(days=1)
deadline = now + timedelta(days=2)
unit.unittype = 'h'
unit.start = start
unit.deadline = deadline
unit.save()
aux_basic_units['objects'][0]['unittype'] = u'h'
aux_basic_units['objects'][0]['start'] = unicode(start.isoformat())
aux_basic_units['objects'][0]['deadline'] = unicode(deadline.isoformat())
if key:
response = self.client.get('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key))
else:
response = self.client.get('/api/%s/unit/%s' % (self.api_name, self.format_append))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_units)
else:
self.assertEqual(response.status_code, 401)
unit.unittype = 'e'
aux_basic_units['objects'][0]['unittype'] = u'e'
unit.save()
if key:
response = self.client.get('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key))
else:
response = self.client.get('/api/%s/unit/%s' % (self.api_name, self.format_append))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_units)
else:
self.assertEqual(response.status_code, 401)
# Test units with start and deadline, referenced date between start, deadline
start = now - timedelta(days=1)
deadline = now + timedelta(days=1)
unit.unittype = 'h'
unit.start = start
unit.deadline = deadline
unit.save()
aux_basic_units['objects'][0]['unittype'] = u'h'
aux_basic_units['objects'][0]['start'] = unicode(start.isoformat())
aux_basic_units['objects'][0]['deadline'] = unicode(deadline.isoformat())
if key:
response = self.client.get('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key))
else:
response = self.client.get('/api/%s/unit/%s' % (self.api_name, self.format_append))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_units)
else:
self.assertEqual(response.status_code, 401)
unit.unittype = 'e'
aux_basic_units['objects'][0]['unittype'] = u'e'
unit.save()
if key:
response = self.client.get('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key))
else:
response = self.client.get('/api/%s/unit/%s' % (self.api_name, self.format_append))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_units)
else:
self.assertEqual(response.status_code, 401)
# Test units with start and deadline, referenced date after deadline
start = now - timedelta(days=2)
deadline = now - timedelta(days=1)
unit.unittype = 'h'
unit.start = start
unit.deadline = deadline
unit.save()
aux_basic_units['objects'][0]['unittype'] = u'h'
aux_basic_units['objects'][0]['start'] = unicode(start.isoformat())
aux_basic_units['objects'][0]['deadline'] = unicode(deadline.isoformat())
if key:
response = self.client.get('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key))
else:
response = self.client.get('/api/%s/unit/%s' % (self.api_name, self.format_append))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_units)
else:
self.assertEqual(response.status_code, 401)
unit.unittype = 'e'
aux_basic_units['objects'][0]['unittype'] = u'e'
unit.save()
if key:
response = self.client.get('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key))
else:
response = self.client.get('/api/%s/unit/%s' % (self.api_name, self.format_append))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_units)
else:
self.assertEqual(response.status_code, 401)
class UnitTestCase(ApiTestCase):
def test_get_unit_annonymous(self):
# TODO: Check not "public" course
owner = self.create_test_user_owner()
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_unit(course, is_possible=False)
def test_get_unit_user(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_unit(course, is_possible=True)
def test_get_unit_alum(self):
owner = self.create_test_user_owner()
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_unit(course, is_possible=True)
def test_get_unit_teacher(self):
owner = self.create_test_user_owner()
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
# Test public course
course = self.create_test_basic_course(owner, teacher=teacher1)
self.check_test_get_unit(course, is_possible=True)
def test_get_unit_owner(self):
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_unit(course, is_possible=True)
def test_get_unit_admin(self):
owner = self.create_test_user_owner()
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_unit(course, is_possible=True)
def test_get_unit_userkey(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
# Test public course
course = self.create_test_basic_course(owner)
self.check_test_get_unit(course, is_possible=False, key=key)
# Auxiliary function
def check_test_get_unit(self, course, is_possible=False, key=None):
# Test unit with no start, no deadline (normal unit)
unit = self.create_test_basic_unit(course, 'n')
if key:
response = self.client.get('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
else:
response = self.client.get('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, BASIC_UNIT)
else:
self.assertEqual(response.status_code, 401)
now = datetime.utcnow().replace(tzinfo=utc)
aux_basic_unit = simplejson.loads(BASIC_UNIT)
# Test unit with start and deadline, referenced date before start
# strftime('%Y-%m-%dT%H:%M:%S%z')
start = now + timedelta(days=1)
deadline = now + timedelta(days=2)
unit.unittype = 'h'
unit.start = start
unit.deadline = deadline
unit.save()
aux_basic_unit['unittype'] = u'h'
aux_basic_unit['start'] = unicode(start.isoformat())
aux_basic_unit['deadline'] = unicode(deadline.isoformat())
if key:
response = self.client.get('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
else:
response = self.client.get('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_unit)
else:
self.assertEqual(response.status_code, 401)
unit.unittype = 'e'
aux_basic_unit['unittype'] = u'e'
unit.save()
if key:
response = self.client.get('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
else:
response = self.client.get('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_unit)
else:
self.assertEqual(response.status_code, 401)
# Test unit with start and deadline, referenced date between start, deadline
start = now - timedelta(days=1)
deadline = now + timedelta(days=1)
unit.unittype = 'h'
unit.start = start
unit.deadline = deadline
unit.save()
aux_basic_unit['unittype'] = u'h'
aux_basic_unit['start'] = unicode(start.isoformat())
aux_basic_unit['deadline'] = unicode(deadline.isoformat())
if key:
response = self.client.get('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
else:
response = self.client.get('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_unit)
else:
self.assertEqual(response.status_code, 401)
unit.unittype = 'e'
aux_basic_unit['unittype'] = u'e'
unit.save()
if key:
response = self.client.get('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
else:
response = self.client.get('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_unit)
else:
self.assertEqual(response.status_code, 401)
# Test unit with start and deadline, referenced date after deadline
start = now - timedelta(days=2)
deadline = now - timedelta(days=1)
unit.unittype = 'h'
unit.start = start
unit.deadline = deadline
unit.save()
aux_basic_unit['unittype'] = u'h'
aux_basic_unit['start'] = unicode(start.isoformat())
aux_basic_unit['deadline'] = unicode(deadline.isoformat())
if key:
response = self.client.get('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
else:
response = self.client.get('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_unit)
else:
self.assertEqual(response.status_code, 401)
unit.unittype = 'e'
aux_basic_unit['unittype'] = u'e'
unit.save()
if key:
response = self.client.get('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
else:
response = self.client.get('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
if is_possible:
self.assertEqual(response.status_code, 200)
self.assertEqual(simplejson.loads(response.content), aux_basic_unit)
else:
self.assertEqual(response.status_code, 401)
# Create Unit
def test_create_unit_annonymous(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner)
response = self.client.post('/api/%s/unit/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
def test_create_unit_user(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
course = self.create_test_basic_course(owner)
response = self.client.post('/api/%s/unit/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
def test_create_unit_alum(self):
owner = self.create_test_user_owner()
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
course = self.create_test_basic_course(owner=owner, student=alum1)
response = self.client.post('/api/%s/unit/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
def test_create_unit_teacher(self):
owner = self.create_test_user_owner()
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
course = self.create_test_basic_course(owner=owner, teacher=teacher1)
response = self.client.post('/api/%s/unit/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content, BASIC_UNIT)
created_unit = Unit.objects.filter(id=1)
self.assertEqual(len(created_unit), 1)
def test_create_unit_owner(self):
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
course = self.create_test_basic_course(owner=owner)
response = self.client.post('/api/%s/unit/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content, BASIC_UNIT)
created_unit = Unit.objects.filter(id=1)
self.assertEqual(len(created_unit), 1)
def test_create_unit__admin(self):
owner = self.create_test_user_owner()
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
course = self.create_test_basic_course(owner=owner)
response = self.client.post('/api/%s/unit/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content, BASIC_UNIT)
created_unit = Unit.objects.filter(id=1)
self.assertEqual(len(created_unit), 1)
def test_create_unit_userkey(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner=owner)
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
response = self.client.post('/api/%s/unit/%s&key=%s' % (self.api_name, self.format_append, key),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
# Update Unit
def test_put_unit_annonymous(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner)
self.create_test_basic_unit(course, 'n')
response = self.client.put('/api/%s/unit/1/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
def test_put_unit_user(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner)
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
self.create_test_basic_unit(course, 'n')
response = self.client.put('/api/%s/unit/1/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
def test_put_unit_alum(self):
owner = self.create_test_user_owner()
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
course = self.create_test_basic_course(owner=owner, student=alum1)
self.create_test_basic_unit(course, 'n')
response = self.client.put('/api/%s/unit/1/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
def test_put_unit_teacher(self):
owner = self.create_test_user_owner()
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
course = self.create_test_basic_course(owner=owner, teacher=teacher1)
self.create_test_basic_unit(course, 'n')
response = self.client.put('/api/%s/unit/1/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, BASIC_UNIT_PK)
def test_put_unit_owner(self):
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
course = self.create_test_basic_course(owner=owner)
self.create_test_basic_unit(course, 'n')
response = self.client.put('/api/%s/unit/1/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, BASIC_UNIT_PK)
def test_put_unit_admin(self):
owner = self.create_test_user_owner()
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
course = self.create_test_basic_course(owner=owner)
self.create_test_basic_unit(course, 'n')
response = self.client.put('/api/%s/unit/1/%s' % (self.api_name, self.format_append),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, BASIC_UNIT_PK)
def test_put_unit_userkey(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner=owner)
self.create_test_basic_unit(course, 'n')
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
response = self.client.put('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key),
BASIC_UNIT, content_type='application/json')
self.assertEqual(response.status_code, 401)
# Delete Unit
def test_delete_unit_annonymous(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner)
self.create_test_basic_unit(course, 'n')
response = self.client.delete('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_delete_unit_user(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner)
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
self.create_test_basic_unit(course, 'n')
response = self.client.delete('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_delete_unit_alum(self):
owner = self.create_test_user_owner()
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
course = self.create_test_basic_course(owner=owner, student=alum1)
self.create_test_basic_unit(course, 'n')
response = self.client.delete('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_delete_unit_teacher(self):
owner = self.create_test_user_owner()
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
course = self.create_test_basic_course(owner=owner, teacher=teacher1)
self.create_test_basic_unit(course, 'n')
response = self.client.delete('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 204)
unit = Unit.objects.filter(id=1)
self.assertEqual(len(unit), 0)
def test_delete_unit_owner(self):
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
course = self.create_test_basic_course(owner=owner)
self.create_test_basic_unit(course, 'n')
response = self.client.delete('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 204)
unit = Unit.objects.filter(id=1)
self.assertEqual(len(unit), 0)
def test_delete_unit_admin(self):
owner = self.create_test_user_owner()
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
course = self.create_test_basic_course(owner=owner)
self.create_test_basic_unit(course, 'n')
response = self.client.delete('/api/%s/unit/1/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 204)
unit = Unit.objects.filter(id=1)
self.assertEqual(len(unit), 0)
def test_delete_unit_userkey(self):
owner = self.create_test_user_owner()
course = self.create_test_basic_course(owner=owner)
self.create_test_basic_unit(course, 'n')
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
response = self.client.delete('/api/%s/unit/1/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 401)
|
|
#!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
__rcs_id__ = '$Id: dbapi20.py,v 1.11 2005/01/02 02:41:01 zenzen Exp $'
__version__ = '$Revision: 1.12 $'[11:-2]
__author__ = 'Stuart Bishop <[email protected]>'
import unittest
import time
import sys
# Revision 1.12 2009/02/06 03:35:11 kf7xm
# Tested okay with Python 3.0, includes last minute patches from Mark H.
#
# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole
# Include latest changes from main branch
# Updates for py3k
#
# Revision 1.11 2005/01/02 02:41:01 zenzen
# Update author email address
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception hierarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propagates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
def str2bytes(sval):
if sys.version_info < (3,0) and isinstance(sval, str):
sval = sval.decode("latin1")
return sval.encode("latin1")
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.failUnless(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.failUnless(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined hierarchy.
if sys.version[0] == '3': #under Python 3 StardardError no longer exists
self.failUnless(issubclass(self.driver.Warning,Exception))
self.failUnless(issubclass(self.driver.Error,Exception))
else:
self.failUnless(issubclass(self.driver.Warning,StandardError))
self.failUnless(issubclass(self.driver.Error,StandardError))
self.failUnless(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.failUnless(con.Warning is drv.Warning)
self.failUnless(con.Error is drv.Error)
self.failUnless(con.InterfaceError is drv.InterfaceError)
self.failUnless(con.DatabaseError is drv.DatabaseError)
self.failUnless(con.OperationalError is drv.OperationalError)
self.failUnless(con.IntegrityError is drv.IntegrityError)
self.failUnless(con.InternalError is drv.InternalError)
self.failUnless(con.ProgrammingError is drv.ProgrammingError)
self.failUnless(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# connection.commit should raise an Error if called after connection'
# closed.'
self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
# Issue discussed on DB-SIG: consensus seem that close() should not
# raised if called on closed objects. Issue reported back to Stuart.
# self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.failUnless(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.failUnless(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.failUnless(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.failUnless(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.failUnless(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.failUnless(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.failUnless(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.failUnless(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependent
raise NotImplementedError('Driver needed to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary(str2bytes('Something'))
b = self.driver.Binary(str2bytes(''))
def test_STRING(self):
self.failUnless(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.failUnless(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.failUnless(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.failUnless(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.failUnless(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_ops as math_ops_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device
class DeviceFunctionsTest(test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_0",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_1",
container="",
shared_name="")
var_2 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_2",
container="",
shared_name="")
var_3 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_3",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_4",
container="",
shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_5",
container="",
shared_name="")
var_6 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_6",
container="",
shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
def testNestedDeviceFunctions(self):
with ops.Graph().as_default():
var_0 = variables.Variable(0)
with ops.device(test_device_func_pin_variable_to_cpu):
var_1 = variables.Variable(1)
with ops.device(lambda op: "/device:GPU:0"):
var_2 = variables.Variable(2)
with ops.device("/device:GPU:0"): # Implicit merging device function.
var_3 = variables.Variable(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testConvertVariablesToConstsWithFunctions(self):
@function.Defun(dtypes.float32)
def plus_one(x):
return x + 1.0
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
defun_node = plus_one(variable_node)
output_node = math_ops_lib.multiply(
defun_node, 2.0, name="output_node")
with session.Session() as sess:
init = variables.initialize_variables([variable_node])
sess.run(init)
output = sess.run(output_node)
self.assertNear(4.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is set,
# note that if variable_names_whitelist is not set an error will be
# thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
self.assertEqual(variable_graph_def.library,
constant_graph_def.library)
def testConvertVariablesToConsts(self):
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
output_node = math_ops_lib.multiply(
variable_node, 2.0, name="output_node")
with session.Session() as sess:
init = variables.initialize_variables([variable_node])
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is set,
# note that if variable_names_whitelist is not set an error will be
# thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
sess.run(variables.global_variables_initializer())
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(sess, variable_graph_def,
["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(
str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Test variable name black list. This should result in the variable not
# being a const.
sess.run(variables.global_variables_initializer())
constant_graph_def_with_blacklist = (
graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_blacklist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
self.assertEqual(variable_node.op, "VariableV2")
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotEqual("Variable", node.op)
self.assertNotEqual("VariableV2", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
def testRemoveIdentityChains(self):
"""Check that chains of Identity nodes are correctly pruned.
Create a chain of four nodes, A, B, C, and D where A inputs B, B inputs C,
and C inputs D. Nodes B and C are "Identity" and should be pruned, resulting
in the nodes A and D, where A inputs D.
"""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_node_def("Aop", "A", ["B"]), self.create_node_def(
"Identity", "B", ["C"]), self.create_node_def(
"Identity", "C", ["D"]), self.create_node_def("Dop", "D", [])
])
expected_graph_def = graph_pb2.GraphDef()
expected_graph_def.node.extend([
self.create_node_def("Aop", "A", ["D"]), self.create_node_def(
"Dop", "D", [])
])
self.assertProtoEquals(expected_graph_def,
graph_util.remove_training_nodes(graph_def))
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 16:19:12 2013
@author: Janwillem van Dijk
@email: [email protected]
Module for generating skew normal random numbers (Adelchi Azzalini)
===================================================================
http://azzalini.stat.unipd.it/SN/
Licensing:
This code is distributed under the GNU LGPL license.
- rnd_skewnormal: returns random valuse for sn distribution with given
location scale and shape
- random_skewnormal: returns random valuse for sn distribution with given
mean, stdev and skewness
- skewnormal_parms: returns location, scale and shape given
mean, stdev and skewnessof the sn distribution
- skewnormal_stats: returns mean, stdev and skewness given
location scale and shape
- pdf_skewnormal: returns values for the pdf of a skew normal distribution
- cdf_skewnormal: returns values for the cdf of a skew normal distribution
- T_owen returns: values for Owens T as used by cdf_skewnormal
- skew_max: returns the maximum skewness of a sn distribution
"""
from math import sqrt, copysign, pi
import numpy.random as random
from numpy import where, zeros, ones, float64, array
from numpy import inner, kron
from numpy import exp as np_exp
from numpy import arctan as np_arctan
from scipy.stats import norm
from scipy.special import gamma as sp_gamma
try:
"""
Try to use owen.f90 compiled into python module with
f2py -c -m owens owens.f90
ginving owens.so
http://people.sc.fsu.edu/~jburkardt/f_src/owens/owens.f90
"""
owens = None
#import owens
except:
print 'owens not found'
def T_Owen_int(h, a, jmax=50, cut_point=6):
"""
Return Owens T
==============
@param: h the h parameter of Owen's T
@param: a the a parameter of Owen's T (-1 <= a <= 1)
Python-numpy-scipy version for Owen's T translated from matlab version
T_owen.m of R module sn.T_int
"""
if type(h) in (float, float64):
h = array([h])
low = where(h <= cut_point)[0]
high = where(h > cut_point)[0]
n_low = low.size
n_high = high.size
irange = arange(0, jmax)
series = zeros(h.size)
if n_low > 0:
h_low = h[low].reshape(n_low, 1)
b = fui(h_low, irange)
cumb = b.cumsum(axis=1)
b1 = np_exp(-0.5 * h_low ** 2) * cumb
matr = ones((jmax, n_low)) - b1.transpose() # matlab ' means transpose
jk = kron(ones(jmax), [1.0, -1.0])
jk = jk[0: jmax] / (2 * irange + 1)
matr = inner((jk.reshape(jmax, 1) * matr).transpose(),
a ** (2 * irange + 1))
series[low] = (np_arctan(a) - matr.flatten(1)) / (2 * pi)
if n_high > 0:
h_high = h[high]
atana = np_arctan(a)
series[high] = (atana * np_exp(-0.5 * (h_high ** 2) * a / atana) *
(1.0 + 0.00868 * (h_high ** 4) * a ** 4) / (2.0 * pi))
return series
def fui(h, i):
return (h ** (2 * i)) / ((2 ** i) * sp_gamma(i + 1))
def T_Owen_series(h, a, jmax=50, cut_point=6):
"""
Return Owens T
==============
@param: h the h parameter of Owen's T
@param: a the a parameter of Owen's T
Python-numpy-scipy version for Owen's T
Python-numpy-scipy version for Owen's T translated from matlab version
T_owen.m of R module sn.T_Owen
"""
if abs(a) <= 1.0:
return T_Owen_int(h, a, jmax=jmax, cut_point=cut_point)
else:
"""D.B. Owen Ann. Math. Stat. Vol 27, #4 (1956), 1075-1090
eqn 2.3, 2.4 and 2.5
Available at: http://projecteuclid.org/DPubS/Repository/1.0/
Disseminate?view=body&id=pdf_1&handle=euclid.aoms/1177728074"""
signt = copysign(1.0, a)
a = abs(a)
h = abs(h)
ha = a * h
gh = norm.cdf(h)
gha = norm.cdf(ha)
t = 0.5 * gh + 0.5 * gha - gh * gha - \
T_Owen_int(ha, 1.0 / a, jmax=jmax, cut_point=cut_point)
return signt * t
def T_Owen(h, a):
"""
Return Owens T
==============
@param: h the h parameter of Owens T
@param: a the a parameter of Owens T
Try to use owens.f90 version else python version
owens.f90 is approximately a factor 100 faster
"""
if owens:
"""Owen's T using owens.f90 by Patefield and Brown
http://www.jstatsoft.org/v05/a05/paper
Fortran source by Burkhard
http://people.sc.fsu.edu/~jburkardt/f_src/owens/owens.f90"""
if type(h) in [float, float64]:
return owens.t(h, a)
else:
t = zeros(h.size)
for i in range(h.size):
t[i] = owens.t(h[i], a)
return t
else:
"""
Owens T after sn.T_Owen(H, a) D.B. Owen (1956)
"""
return T_Owen_series(h, a)
def cdf_skewnormal(x, location=0.0, scale=1.0, shape=0.0):
"""
Return skew normal cdf
======================
@param location: location of sn distribution
@param scale: scale of sn distribution
@param shape: shape of sn distribution
http://azzalini.stat.unipd.it/SN/
"""
xi = (x - location) / scale
return norm.cdf(xi) - 2.0 * T_Owen(xi, shape)
def pdf_skewnormal(x, location=0.0, scale=1.0, shape=0.0):
"""
Return skew normal pdf
======================
@param location: location of sn distribution
@param scale: scale of sn distribution
@param shape: shape of sn distribution
http://azzalini.stat.unipd.it/SN/
"""
t = (x - location) / scale
return 2.0 / scale * norm.pdf(t) * norm.cdf(shape * t)
def rnd_skewnormal(location=0.0, scale=1.0, shape=0.0, size=1):
"""
Return skew normal random values
================================
with given location, scale and shape
@param location: location of sn distribution
@param scale: scale of sn distribution
@param shape: shape of sn distribution
@param size: number of values to generate
http://azzalini.stat.unipd.it/SN/ Matlab source rsn.m in sn-matlab.zip
"""
u1 = random.normal(0.0, 1.0, size=size)
u2 = random.normal(0.0, 1.0, size=size)
i = where(u2 > shape * u1)
u1[i] *= -1.0
return location + scale * u1
def skewnormal_parms(mean=0.0, stdev=1.0, skew=0.0):
"""
Return parameters for a skew normal distribution function
=========================================================
@param mean: mean of sn distribution
@param stdev: standard deviation of sn distribution
@param skew: skewness of sn distribution
http://azzalini.stat.unipd.it/SN/Intro/intro.html
location (xi), scale (omega) and shape (alpha)
"""
if abs(skew) > skew_max():
print('Skewness must be between %.8f and %.8f' % (
-skew_max(), skew_max()))
print('None, None, None returned')
return None, None, None
beta = (2.0 - pi / 2.0)
skew_23 = pow(skew * skew, 1.0 / 3.0)
beta_23 = pow(beta * beta, 1.0 / 3.0)
eps2 = skew_23 / (skew_23 + beta_23)
eps = copysign(sqrt(eps2), skew)
delta = eps * sqrt(pi / 2.0)
alpha = delta / sqrt(1.0 - delta * delta)
omega = stdev / sqrt(1.0 - eps * eps)
xi = mean - omega * eps
return xi, omega, alpha
def skewnormal_stats(location=0.0, scale=1.0, shape=0.0):
"""
Return statistics of a skew normal distribution function
========================================================
@param location: location of sn distribution
@param scale: scale of sn distribution
@param shape: shape of sn distribution
http://azzalini.stat.unipd.it/SN/Intro/intro.html
"""
beta = 2.0 - pi / 2.0
delta = shape / sqrt(1.0 + shape * shape)
eps = delta * sqrt(2.0 / pi)
mean = location + scale * eps
stdev = scale * sqrt(1.0 - eps * eps)
skew = beta * pow(eps, 3.0) / pow(1.0 - eps * eps, 3.0 / 2.0)
return mean, stdev, skew
def skew_max():
"""
Return maximum skewness of a skew normal distribution
=====================================================
skewness for shape --> infinity
"""
beta = 2.0 - pi / 2.0
#lim(delta, shape-> inf) = 1.0
eps = sqrt(2.0 / pi)
return beta * pow(eps, 3.0) / pow(1.0 - eps * eps, 3.0 / 2.0) - 1e-16
def random_skewnormal(mean=0.0, stdev=1.0, skew=0.0, size=1):
"""
Return random numbers from a skew normal distribution
=====================================================
with given mean, stdev and shape
@param mean: mean of sn distribution
@param stdev: stdev of sn distribution
@param shape: shape of sn distribution
@param shape: shape of sn distribution
"""
loc, scale, shape = skewnormal_parms(mean, stdev, skew)
if loc is not None:
return rnd_skewnormal(loc, scale, shape, size=size)
else:
return None
"""
Test routine (can all be deletet if not needed)
"""
if __name__ == '__main__':
from numpy import linspace, median, arange, take, sort
import scipy.stats as stats
import matplotlib.pyplot as plt
"""
skew between -skew_max() and skew_max()
un-comment one of values for skew below
"""
#skew = 0.0
skew = 0.75
#skew = skew_max()
def text_in_plot(fig):
xtxt = 0.10
ytxt = 0.87
dtxt = 0.03
txt = r'$\mu:\,%.2f$' % mean
fig.text(xtxt, ytxt, txt, horizontalalignment='left', fontsize=14)
ytxt -= dtxt
txt = r'$\sigma:\,%.2f$' % stdev
fig.text(xtxt, ytxt, txt, horizontalalignment='left', fontsize=14)
ytxt -= dtxt
txt = r'$\gamma_1:\,%.2f,\,%.2f,\,%.2f$' % (skew, 0.0, -skew)
fig.text(xtxt, ytxt, txt, horizontalalignment='left', fontsize=14)
ytxt -= 2.0 * dtxt
txt = r'$\xi:\,%.2f,\,%.2f,\,%.2f$' % (locp, loc, locm)
fig.text(xtxt, ytxt, txt, horizontalalignment='left', fontsize=14)
ytxt -= dtxt
txt = r'$\omega:\,%.2f,\,%.2f,\,%.2f$' % (scalep, scale, scalem)
fig.text(xtxt, ytxt, txt, horizontalalignment='left', fontsize=14)
ytxt -= dtxt
txt = r'$\alpha:\,%.2f,\,%.2f,\,%.2f$' % (shapep, shape, shapem)
fig.text(xtxt, ytxt, txt, horizontalalignment='left', fontsize=14)
mean = 0.0
stdev = 1.0
n_rand = 100000
n_plot = 200
data_plus = random_skewnormal(mean, stdev, skew, n_rand)
print('skew normal distribution: positive skewness')
print('mean: %.3f' % data_plus.mean())
print('median: %.3f' % median(data_plus))
print('stdev: %.3f' % data_plus.std())
print('skew: %.3f' % stats.skew(data_plus))
locp, scalep, shapep = skewnormal_parms(mean, stdev, skew)
print('loc: %.3f' % locp)
print('scale: %.3f' % scalep)
print('shape: %.3f' % shapep)
mu, sigma, gamma = skewnormal_stats(locp, scalep, shapep)
print('mean: %.3f' % mu)
print('stdev: %.3f' % sigma)
print('skew: %.3f' % gamma)
data_sym = random_skewnormal(mean, stdev, 0.0, n_rand)
print('\nskew normal distribution: zero skewness')
print('mean: %.3f' % data_sym.mean())
print('median: %.3f' % median(data_sym))
print('stdev: %.3f' % data_sym.std())
print('skew: %.3f' % stats.skew(data_sym))
loc, scale, shape = skewnormal_parms(mean, stdev, 0.0)
print('loc: %.3f' % loc)
print('scale: %.3f' % scale)
print('shape: %.3f' % shape)
mu, sigma, gamma = skewnormal_stats(loc, scale, shape)
print('mean: %.3f' % mu)
print('stdev: %.3f' % sigma)
print('skew: %.3f' % gamma)
data_min = random_skewnormal(mean, stdev, -skew, n_rand)
print('\nskew normal distribution: negative skewness')
print('mean: %.3f' % data_min.mean())
print('median: %.3f' % median(data_min))
print('stdev: %.3f' % data_min.std())
print('skew: %.3f' % stats.skew(data_min))
locm, scalem, shapem = skewnormal_parms(mean, stdev, -skew)
print('loc: %.3f' % locm)
print('scale: %.3f' % scalem)
print('shape: %.3f' % shapem)
mu, sigma, gamma = skewnormal_stats(locm, scalem, shapem)
print('mean: %.3f' % mu)
print('stdev: %.3f' % sigma)
print('skew: %.3f' % gamma)
xpdf = linspace(mean - 4.0 * stdev, mean + 4.0 * stdev, n_plot)
print 'Finding kde of Monte Carlo samples'
ykde_plus = stats.gaussian_kde(data_plus)
ypdf_plus = ykde_plus(xpdf)
y_plus = pdf_skewnormal(xpdf, locp, scalep, shapep)
ykde_sym = stats.gaussian_kde(data_sym)
ypdf_sym = ykde_sym(xpdf)
y_sym = pdf_skewnormal(xpdf, loc, scale, shape)
ykde_min = stats.gaussian_kde(data_min)
ypdf_min = ykde_min(xpdf)
y_min = pdf_skewnormal(xpdf, locm, scalem, shapem)
print 'Making pdf plots'
figpdf = plt.figure()
subpdf = figpdf.add_subplot(1, 1, 1)
txt = r'$\mathrm{Skew-normal\,distribution\,of\,data}$'
subpdf.set_title(txt, fontsize=18)
text_in_plot(figpdf)
subpdf.axes.set_xlim(xpdf[0], xpdf[-1])
subpdf.plot(xpdf, ypdf_plus, 'r')
subpdf.plot(xpdf, ypdf_sym, 'g')
subpdf.plot(xpdf, ypdf_min, 'b')
subpdf.plot(xpdf, y_plus, ':k')
subpdf.plot(xpdf, y_sym, ':k')
subpdf.plot(xpdf, y_min, ':k')
figpdf.tight_layout()
figpdf.savefig('skewnormal_pdf.svg')
figpdf.savefig('skewnormal_pdf.pdf')
print 'Making cdf plots'
figcdf = plt.figure()
subcdf = figcdf.add_subplot(1, 1, 1)
xcdf = linspace(mean - 5.0 * stdev, mean + 5.0 * stdev, n_plot)
#select n_plot samples from data
step = int(n_rand / n_plot)
i_sel = arange(0, n_rand, step)
p = i_sel * 1.0 / n_rand
ycdf_min = cdf_skewnormal(xcdf, locm, scalem, shapem)
ycdf_sym = cdf_skewnormal(xcdf, loc, scale, shape)
ycdf_plus = cdf_skewnormal(xcdf, locp, scalep, shapep)
data_plus = take(sort(data_plus), i_sel)
data_sym = take(sort(data_sym), i_sel)
data_min = take(sort(data_min), i_sel)
subcdf.axes.set_xlim(xcdf[0], xcdf[-1])
subcdf.axes.set_ylim(0.0, 1.05)
subcdf.plot(data_plus, p, 'r')
subcdf.plot(data_sym, p, 'g')
subcdf.plot(data_min, p, 'b')
subcdf.plot(xcdf, ycdf_plus, ':k')
subcdf.plot(xcdf, ycdf_sym, ':k')
subcdf.plot(xcdf, ycdf_min, ':k')
txt = r'$\mathrm{Skew-normal\,distribution\,of\,data}$'
subcdf.set_title(txt, fontsize=18)
text_in_plot(figcdf)
figcdf.tight_layout()
figcdf.savefig('skewnormal_cdf.svg')
figcdf.savefig('skewnormal_cdf.pdf')
print 'Show plots'
plt.show()
print 'Finished'
|
|
# -*- coding: utf-8 -*-
"""This file contains a class to provide a parsing framework to plaso.
This class contains a base framework class for parsing file-like objects, and
also some implementations that extend it to provide a more comprehensive
parser.
"""
from __future__ import unicode_literals
import abc
import pyparsing
from dfvfs.helpers import text_file
from plaso.lib import errors
from plaso.parsers import interface
from plaso.parsers import logger
# Pylint complains about some functions not being implemented that shouldn't
# be since they need to be implemented by children.
# pylint: disable=abstract-method
# TODO: determine if this method should be merged with PyParseIntCast.
# pylint: disable=unused-argument
def ConvertTokenToInteger(string, location, tokens):
"""Pyparsing parse action callback to convert a token into an integer value.
Args:
string (str): original string.
location (int): location in the string where the token was found.
tokens (list[str]): tokens.
Returns:
int: integer value or None.
"""
try:
return int(tokens[0], 10)
except ValueError:
pass
def PyParseIntCast(string, location, tokens):
"""Return an integer from a string.
This is a pyparsing callback method that converts the matched
string into an integer.
The method modifies the content of the tokens list and converts
them all to an integer value.
Args:
string (str): original string.
location (int): location in the string where the match was made.
tokens (list[str]): extracted tokens, where the string to be converted
is stored.
"""
# Cast the regular tokens.
for index, token in enumerate(tokens):
try:
tokens[index] = int(token)
except ValueError:
logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format(
token))
tokens[index] = 0
# We also need to cast the dictionary built tokens.
for key in tokens.keys():
try:
tokens[key] = int(tokens[key], 10)
except ValueError:
logger.error(
'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format(
key, tokens[key]))
tokens[key] = 0
class PyparsingConstants(object):
"""Constants for pyparsing-based parsers."""
# Numbers.
INTEGER = pyparsing.Word(pyparsing.nums).setParseAction(PyParseIntCast)
IPV4_ADDRESS = pyparsing.pyparsing_common.ipv4_address
IPV6_ADDRESS = pyparsing.pyparsing_common.ipv6_address
IP_ADDRESS = (IPV4_ADDRESS | IPV6_ADDRESS)
# TODO: deprecate and remove, use THREE_LETTERS instead.
# TODO: fix Python 3 compatibility of .uppercase and .lowercase.
# pylint: disable=no-member
MONTH = pyparsing.Word(
pyparsing.string.ascii_uppercase, pyparsing.string.ascii_lowercase,
exact=3)
# Define date structures.
HYPHEN = pyparsing.Literal('-').suppress()
ONE_OR_TWO_DIGITS = pyparsing.Word(
pyparsing.nums, min=1, max=2).setParseAction(PyParseIntCast)
TWO_DIGITS = pyparsing.Word(pyparsing.nums, exact=2).setParseAction(
PyParseIntCast)
THREE_DIGITS = pyparsing.Word(pyparsing.nums, exact=3).setParseAction(
PyParseIntCast)
FOUR_DIGITS = pyparsing.Word(pyparsing.nums, exact=4).setParseAction(
PyParseIntCast)
THREE_LETTERS = pyparsing.Word(pyparsing.alphas, exact=3)
DATE_ELEMENTS = (
FOUR_DIGITS.setResultsName('year') + pyparsing.Suppress('-') +
TWO_DIGITS.setResultsName('month') + pyparsing.Suppress('-') +
TWO_DIGITS.setResultsName('day_of_month'))
TIME_ELEMENTS = (
TWO_DIGITS.setResultsName('hours') + pyparsing.Suppress(':') +
TWO_DIGITS.setResultsName('minutes') + pyparsing.Suppress(':') +
TWO_DIGITS.setResultsName('seconds'))
TIME_MSEC_ELEMENTS = (
TIME_ELEMENTS + pyparsing.Word('.,', exact=1).suppress() +
INTEGER.setResultsName('microseconds'))
# Date structures defined as a single group.
DATE = pyparsing.Group(DATE_ELEMENTS)
DATE_TIME = pyparsing.Group(DATE_ELEMENTS + TIME_ELEMENTS)
DATE_TIME_MSEC = pyparsing.Group(DATE_ELEMENTS + TIME_MSEC_ELEMENTS)
TIME = pyparsing.Group(TIME_ELEMENTS)
TIME_MSEC = TIME + pyparsing.Suppress('.') + INTEGER
# TODO: replace by
# TIME_MSEC = pyparsing.Group(TIME_MSEC_ELEMENTS)
COMMENT_LINE_HASH = pyparsing.Literal('#') + pyparsing.SkipTo(
pyparsing.LineEnd())
# TODO: Add more commonly used structs that can be used by parsers.
PID = pyparsing.Word(
pyparsing.nums, min=1, max=5).setParseAction(PyParseIntCast)
class PyparsingSingleLineTextParser(interface.FileObjectParser):
"""Single line text parser interface based on pyparsing."""
# The actual structure, this needs to be defined by each parser.
# This is defined as a list of tuples so that more then a single line
# structure can be defined. That way the parser can support more than a
# single type of log entry, despite them all having in common the constraint
# that each log entry is a single line.
# The tuple should have two entries, a key and a structure. This is done to
# keep the structures in an order of priority/preference.
# The key is a comment or an identification that is passed to the ParseRecord
# function so that the developer can identify which structure got parsed.
# The value is the actual pyparsing structure.
LINE_STRUCTURES = []
# In order for the tool to not read too much data into a buffer to evaluate
# whether or not the parser is the right one for this file or not we
# specifically define a maximum amount of bytes a single line can occupy. This
# constant can be overwritten by implementations if their format might have a
# longer line than 400 bytes.
MAX_LINE_LENGTH = 400
# The maximum number of consecutive lines that don't match known line
# structures to encounter before aborting parsing.
MAXIMUM_CONSECUTIVE_LINE_FAILURES = 20
_ENCODING = None
_EMPTY_LINES = frozenset(['\n', '\r', '\r\n'])
# Allow for a maximum of 40 empty lines before we bail out.
_MAXIMUM_DEPTH = 40
def __init__(self):
"""Initializes a parser."""
super(PyparsingSingleLineTextParser, self).__init__()
self._current_offset = 0
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
self._line_structures = list(self.LINE_STRUCTURES)
def _GetValueFromStructure(self, structure, name, default_value=None):
"""Retrieves a token value from a Pyparsing structure.
This method ensures the token value is set to the default value when
the token is not present in the structure. Instead of returning
the Pyparsing default value of an empty byte stream (b'').
Args:
structure (pyparsing.ParseResults): tokens from a parsed log line.
name (str): name of the token.
default_value (Optional[object]): default value.
Returns:
object: value in the token or default value if the token is not available
in the structure.
"""
value = structure.get(name, default_value)
if isinstance(value, pyparsing.ParseResults) and not value:
# Ensure the return value is not an empty pyparsing.ParseResults otherwise
# copy.deepcopy() will fail on Python 3.8 with: "TypeError: 'str' object
# is not callable" due to pyparsing.ParseResults overriding __getattr__
# with a function that returns an empty string when named token does not
# exists.
return None
return value
# Pylint is confused by the formatting of the bytes_in argument.
# pylint: disable=missing-param-doc,missing-type-doc
def _IsText(self, bytes_in, encoding=None):
"""Examine the bytes in and determine if they are indicative of text.
Parsers need quick and at least semi reliable method of discovering whether
or not a particular byte stream is text or resembles text or not. This can
be used in text parsers to determine if a file is a text file or not for
instance.
The method assumes the byte sequence is either ASCII, UTF-8, UTF-16 or
method supplied character encoding. Otherwise it will make the assumption
the byte sequence is not text, but a byte sequence.
Args:
bytes_in (bytes|str): byte stream to examine.
encoding (Optional[str]): encoding to test, if not defined ASCII and
UTF-8 are tried.
Returns:
bool: True if the bytes stream contains text.
"""
# TODO: Improve speed and accuracy of this method.
# Start with the assumption we are dealing with text.
is_text = True
if isinstance(bytes_in, str):
return is_text
# Check if this is ASCII text string.
for value in bytes_in:
if not 31 < value < 128:
is_text = False
break
# We have an ASCII string.
if is_text:
return is_text
# Check if this is UTF-8
try:
bytes_in.decode('utf-8')
return True
except UnicodeDecodeError:
pass
if encoding:
try:
bytes_in.decode(encoding)
return True
except LookupError:
logger.error('Unsupported encoding: {0:s}'.format(encoding))
except UnicodeDecodeError:
pass
return False
def _ReadLine(self, text_file_object, max_len=None, depth=0):
"""Reads a line from a text file.
Args:
text_file_object (dfvfs.TextFile): text file.
max_len (Optional[int]): maximum number of bytes a single line can take,
where None means all remaining bytes should be read.
depth (Optional[int]): number of new lines the parser encountered.
Returns:
str: single line read from the file-like object, or the maximum number of
characters, if max_len defined and line longer than the defined size.
Raises:
UnicodeDecodeError: if the text cannot be decoded using the specified
encoding.
"""
line = text_file_object.readline(size=max_len)
if not line:
return ''
if line in self._EMPTY_LINES:
if depth == self._MAXIMUM_DEPTH:
return ''
return self._ReadLine(text_file_object, max_len=max_len, depth=depth + 1)
return line
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a text file-like object using a pyparsing definition.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
if not self._line_structures:
raise errors.UnableToParseFile(
'Line structure undeclared, unable to proceed.')
encoding = self._ENCODING or parser_mediator.codepage
text_file_object = text_file.TextFile(file_object, encoding=encoding)
try:
line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)
except UnicodeDecodeError:
raise errors.UnableToParseFile(
'Not a text file or encoding not supported.')
if not line:
raise errors.UnableToParseFile('Not a text file.')
if len(line) == self.MAX_LINE_LENGTH or len(
line) == self.MAX_LINE_LENGTH - 1:
logger.debug((
'Trying to read a line and reached the maximum allowed length of '
'{0:d}. The last few bytes of the line are: {1:s} [parser '
'{2:s}]').format(
self.MAX_LINE_LENGTH, repr(line[-10:]), self.NAME))
if not self._IsText(line):
raise errors.UnableToParseFile('Not a text file, unable to proceed.')
if not self.VerifyStructure(parser_mediator, line):
raise errors.UnableToParseFile('Wrong file structure.')
consecutive_line_failures = 0
index = None
# Set the offset to the beginning of the file.
self._current_offset = 0
# Read every line in the text file.
while line:
if parser_mediator.abort:
break
parsed_structure = None
use_key = None
# Try to parse the line using all the line structures.
for index, (key, structure) in enumerate(self._line_structures):
try:
parsed_structure = structure.parseString(line)
except pyparsing.ParseException:
pass
if parsed_structure:
use_key = key
break
if parsed_structure:
self.ParseRecord(parser_mediator, use_key, parsed_structure)
consecutive_line_failures = 0
if index is not None and index != 0:
key_structure = self._line_structures.pop(index)
self._line_structures.insert(0, key_structure)
else:
if len(line) > 80:
line = '{0:s}...'.format(line[:77])
parser_mediator.ProduceExtractionWarning(
'unable to parse log line: "{0:s}" at offset: {1:d}'.format(
line, self._current_offset))
consecutive_line_failures += 1
if (consecutive_line_failures >
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):
raise errors.UnableToParseFile(
'more than {0:d} consecutive failures to parse lines.'.format(
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))
self._current_offset = text_file_object.get_offset()
try:
line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode log line at offset {0:d}'.format(
self._current_offset))
break
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
This function takes as an input a parsed pyparsing structure
and produces an EventObject if possible from that structure.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): tokens from a parsed log line.
"""
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def VerifyStructure(self, parser_mediator, line):
"""Verify the structure of the file and return boolean based on that check.
This function should read enough text from the text file to confirm
that the file is the correct one for this particular parser.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): single line from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
class EncodedTextReader(object):
"""Encoded text reader."""
def __init__(self, encoding, buffer_size=2048):
"""Initializes the encoded text reader object.
Args:
encoding (str): encoding.
buffer_size (Optional[int]): buffer size.
"""
super(EncodedTextReader, self).__init__()
self._buffer = ''
self._buffer_size = buffer_size
self._current_offset = 0
self._encoding = encoding
self.lines = ''
def _ReadLine(self, file_object):
"""Reads a line from the file object.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the file-like object.
"""
if len(self._buffer) < self._buffer_size:
content = file_object.read(self._buffer_size)
content = content.decode(self._encoding)
self._buffer = ''.join([self._buffer, content])
line, new_line, self._buffer = self._buffer.partition('\n')
if not line and not new_line:
line = self._buffer
self._buffer = ''
self._current_offset += len(line)
# Strip carriage returns from the text.
if line.endswith('\r'):
line = line[:-len('\r')]
if new_line:
line = ''.join([line, '\n'])
self._current_offset += len('\n')
return line
def ReadLine(self, file_object):
"""Reads a line.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the lines buffer.
"""
line, _, self.lines = self.lines.partition('\n')
if not line:
self.ReadLines(file_object)
line, _, self.lines = self.lines.partition('\n')
return line
def ReadLines(self, file_object):
"""Reads lines into the lines buffer.
Args:
file_object (dfvfs.FileIO): file-like object.
"""
lines_size = len(self.lines)
if lines_size < self._buffer_size:
lines_size = self._buffer_size - lines_size
while lines_size > 0:
line = self._ReadLine(file_object)
if not line:
break
self.lines = ''.join([self.lines, line])
lines_size -= len(line)
def Reset(self):
"""Resets the encoded text reader."""
self._buffer = ''
self._current_offset = 0
self.lines = ''
def SkipAhead(self, file_object, number_of_characters):
"""Skips ahead a number of characters.
Args:
file_object (dfvfs.FileIO): file-like object.
number_of_characters (int): number of characters.
"""
lines_size = len(self.lines)
while number_of_characters >= lines_size:
number_of_characters -= lines_size
self.lines = ''
self.ReadLines(file_object)
lines_size = len(self.lines)
if lines_size == 0:
return
self.lines = self.lines[number_of_characters:]
class PyparsingMultiLineTextParser(PyparsingSingleLineTextParser):
"""Multi line text parser interface based on pyparsing."""
BUFFER_SIZE = 2048
def __init__(self):
"""Initializes a parser."""
super(PyparsingMultiLineTextParser, self).__init__()
self._buffer_size = self.BUFFER_SIZE
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a text file-like object using a pyparsing definition.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
if not self.LINE_STRUCTURES:
raise errors.UnableToParseFile('Missing line structures.')
encoding = self._ENCODING or parser_mediator.codepage
text_reader = EncodedTextReader(
encoding, buffer_size=self.BUFFER_SIZE)
text_reader.Reset()
try:
text_reader.ReadLines(file_object)
except UnicodeDecodeError as exception:
raise errors.UnableToParseFile(
'Not a text file, with error: {0!s}'.format(exception))
if not self.VerifyStructure(parser_mediator, text_reader.lines):
raise errors.UnableToParseFile('Wrong file structure.')
# Using parseWithTabs() overrides Pyparsing's default replacement of tabs
# with spaces to SkipAhead() the correct number of bytes after a match.
for key, structure in self.LINE_STRUCTURES:
structure.parseWithTabs()
consecutive_line_failures = 0
# Read every line in the text file.
while text_reader.lines:
if parser_mediator.abort:
break
# Initialize pyparsing objects.
tokens = None
start = 0
end = 0
key = None
index = None
# Try to parse the line using all the line structures.
for index, (key, structure) in enumerate(self._line_structures):
try:
structure_generator = structure.scanString(
text_reader.lines, maxMatches=1)
parsed_structure = next(structure_generator, None)
except pyparsing.ParseException:
parsed_structure = None
if not parsed_structure:
continue
tokens, start, end = parsed_structure
# Only want to parse the structure if it starts
# at the beginning of the buffer.
if start == 0:
break
if tokens and start == 0:
# Move matching key, structure pair to the front of the list, so that
# structures that are more likely to match are tried first.
if index is not None and index != 0:
key_structure = self._line_structures.pop(index)
self._line_structures.insert(0, key_structure)
try:
self.ParseRecord(parser_mediator, key, tokens)
consecutive_line_failures = 0
except (errors.ParseError, errors.TimestampError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse record: {0:s} with error: {1!s}'.format(
key, exception))
text_reader.SkipAhead(file_object, end)
else:
odd_line = text_reader.ReadLine(file_object)
if odd_line:
if len(odd_line) > 80:
odd_line = '{0:s}...'.format(odd_line[:77])
parser_mediator.ProduceExtractionWarning(
'unable to parse log line: {0:s}'.format(repr(odd_line)))
consecutive_line_failures += 1
if (consecutive_line_failures >
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):
raise errors.UnableToParseFile(
'more than {0:d} consecutive failures to parse lines.'.format(
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))
try:
text_reader.ReadLines(file_object)
except UnicodeDecodeError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to read lines with error: {0!s}'.format(exception))
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
This function takes as an input a parsed pyparsing structure
and produces an EventObject if possible from that structure.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): tokens from a parsed log line.
Returns:
EventObject: event or None.
"""
# pylint: disable=arguments-differ,redundant-returns-doc
@abc.abstractmethod
def VerifyStructure(self, parser_mediator, lines):
"""Verify the structure of the file and return boolean based on that check.
This function should read enough text from the text file to confirm
that the file is the correct one for this particular parser.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
import sys
import io
import unittest
from telemetry.testing import system_stub
from telemetry.internal.testing import system_stub_test_module
if sys.version_info.major == 3:
OPEN_FILE_TYPE = io.TextIOWrapper
else:
OPEN_FILE_TYPE = file
class CloudStorageTest(unittest.TestCase):
SUCCESS_FILE_HASH = 'success'.zfill(40)
PUBLIC_FILE_HASH = 'public'.zfill(40)
PARTNER_FILE_HASH = 'partner'.zfill(40)
INTERNAL_FILE_HASH = 'internal'.zfill(40)
UPDATED_HASH = 'updated'.zfill(40)
def setUp(self):
self.cloud_storage = system_stub.CloudStorageModuleStub()
# Files in Cloud Storage.
self.remote_files = ['preset_public_file.wpr',
'preset_partner_file.wpr',
'preset_internal_file.wpr']
self.remote_paths = {
self.cloud_storage.PUBLIC_BUCKET:
{'preset_public_file.wpr':CloudStorageTest.PUBLIC_FILE_HASH},
self.cloud_storage.PARTNER_BUCKET:
{'preset_partner_file.wpr':CloudStorageTest.PARTNER_FILE_HASH},
self.cloud_storage.INTERNAL_BUCKET:
{'preset_internal_file.wpr':CloudStorageTest.INTERNAL_FILE_HASH}}
# Local data files and hashes.
self.data_files = [
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr')]
self.local_file_hashes = {
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'):
CloudStorageTest.SUCCESS_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr'):
CloudStorageTest.SUCCESS_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'):
CloudStorageTest.PUBLIC_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr'):
CloudStorageTest.PARTNER_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'):
CloudStorageTest.INTERNAL_FILE_HASH,
}
self.cloud_storage.SetCalculatedHashesForTesting(self.local_file_hashes)
# Local hash files and their contents.
local_hash_files = {
os.path.join(os.path.sep, 'path', 'to', 'success.wpr.sha1'):
CloudStorageTest.SUCCESS_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr.sha1'):
'wronghash'.zfill(40),
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr.sha1'):
CloudStorageTest.PUBLIC_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr.sha1'):
CloudStorageTest.PARTNER_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to',
'preset_internal_file.wpr.sha1'):
CloudStorageTest.INTERNAL_FILE_HASH,
}
self.cloud_storage.SetHashFileContentsForTesting(local_hash_files)
def testSetup(self):
self.assertEqual(self.local_file_hashes,
self.cloud_storage.local_file_hashes)
self.assertEqual(set(self.data_files),
set(self.cloud_storage.GetLocalDataFiles()))
self.assertEqual(self.cloud_storage.default_remote_paths,
self.cloud_storage.GetRemotePathsForTesting())
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertEqual(self.remote_paths,
self.cloud_storage.GetRemotePathsForTesting())
def testExistsEmptyCloudStorage(self):
# Test empty remote files dictionary.
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'preset_public_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'preset_partner_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
def testExistsNonEmptyCloudStorage(self):
# Test non-empty remote files dictionary.
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'preset_public_file.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'preset_partner_file.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'fake_file'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'fake_file'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'fake_file'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testNonEmptyInsertAndExistsPublic(self):
# Test non-empty remote files dictionary.
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.cloud_storage.Insert(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testEmptyInsertAndExistsPublic(self):
# Test empty remote files dictionary.
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
self.cloud_storage.Insert(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
def testEmptyInsertAndGet(self):
self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to',
'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
self.assertEqual(CloudStorageTest.SUCCESS_FILE_HASH, self.cloud_storage.Get(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr')))
def testNonEmptyInsertAndGet(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to',
'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.assertEqual(
CloudStorageTest.SUCCESS_FILE_HASH, self.cloud_storage.Get(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr')))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testGetIfChanged(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertRaises(
self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.assertFalse(self.cloud_storage.GetIfChanged(
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
self.cloud_storage.PUBLIC_BUCKET))
self.cloud_storage.ChangeRemoteHashForTesting(
self.cloud_storage.PUBLIC_BUCKET, 'preset_public_file.wpr',
CloudStorageTest.UPDATED_HASH)
self.assertTrue(self.cloud_storage.GetIfChanged(
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
self.cloud_storage.PUBLIC_BUCKET))
self.assertFalse(self.cloud_storage.GetIfChanged(
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
self.cloud_storage.PUBLIC_BUCKET))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testList(self):
self.assertEqual([],
self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertEqual(['preset_public_file.wpr'],
self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testPermissionError(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.cloud_storage.SetPermissionLevelForTesting(
self.cloud_storage.PUBLIC_PERMISSION)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Get,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.GetIfChanged,
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'),
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.List,
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Exists,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Insert,
self.cloud_storage.INTERNAL_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testCredentialsError(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.cloud_storage.SetPermissionLevelForTesting(
self.cloud_storage.CREDENTIALS_ERROR_PERMISSION)
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Get,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.GetIfChanged,
self.cloud_storage.INTERNAL_BUCKET,
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.List,
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Exists,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Insert,
self.cloud_storage.INTERNAL_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testOpenRestoresCorrectly(self):
file_path = os.path.realpath(__file__)
stubs = system_stub.Override(system_stub_test_module, ['open'])
stubs.open.files = {file_path:'contents'}
f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
self.assertEqual(type(f), system_stub.OpenFunctionStub.FileStub)
stubs.open.files = {}
stubs.Restore()
# This will throw an error if the open stub wasn't restored correctly.
f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
self.assertEqual(type(f), OPEN_FILE_TYPE)
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_doc.py
"""
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from json import loads, dumps
from unittest import TestCase
from mail_safe_test import app
from mail_safe_test.auth import UserModel
from mail_safe_test.resources.doc import DocListAPI, DocAPI, DocModel
def common_setUp(self):
# Flask apps testing. See: http://flask.pocoo.org/docs/testing/
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
self.app = app.test_client()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
def setup_documents(self):
# Provision two valid users
self.user1_id = '1'
self.user1_token = "valid_user"
self.user2_id = '2'
self.user2_token = "valid_user2"
args = {
"id": self.user1_id,
"first_name": "Testy",
"last_name": "McTest",
"email": "[email protected]"
}
user = UserModel(**args)
user.put()
args["id"] = self.user2_id
user = UserModel(**args)
user.put()
# Provision a document for both users
self.d_id = '12345'
self.d_title = "A Title"
self.d_content = "Hello. This is a test document. With content."
self.d_status = 0
document_fields = {
'id': self.d_id,
'title': self.d_title,
'content': self.d_content,
'status': self.d_status
}
document = DocModel(parent=ndb.Key(UserModel, self.user1_id), **document_fields)
document.put()
document_fields = {
'id': '67890',
'title': "Another Title",
'content': "Howdy. This is a different test document. With other content. And exclamation points!",
'status': 1
}
document = DocModel(parent=ndb.Key(UserModel, self.user2_id), **document_fields)
document.put()
self.document_count = 2
def verify_document_count(self, document_count):
docs = DocModel.query().fetch()
self.assertEqual(document_count, len(docs))
def verify_user_document_count(self, user_id, document_count):
docs = DocModel.query(ancestor=ndb.Key(UserModel, user_id)).fetch()
self.assertEqual(document_count, len(docs))
class NonAuthContactTestCases(TestCase):
def setUp(self):
common_setUp(self)
setup_documents(self)
verify_document_count(self, self.document_count)
self.post_data = {
'title': "Titleist",
'content': "This is a short test document",
'status': 0
}
self.put_data = {"status": "Published"}
self.verifications = {}
self.verifications[("GET", "/user/doc/")] = 404
self.verifications[("PUT", "/user/doc/")] = 404
self.verifications[("POST", "/user/doc/")] = 404
self.verifications[("DELETE", "/user/doc/")] = 404
self.verifications[("GET", "/user/doc/12/")] = 403
self.verifications[("PUT", "/user/doc/12/")] = 403
self.verifications[("POST", "/user/doc/12/")] = 405
self.verifications[("DELETE", "/user/doc/12/")] = 403
self.verifications[("GET", "/user/doc/12345/")] = 403
self.verifications[("PUT", "/user/doc/12345/")] = 403
self.verifications[("POST", "/user/doc/12345/")] = 405
self.verifications[("DELETE", "/user/doc/12345/")] = 403
self.verifications[("GET", '/user/docs/')] = 403
self.verifications[("PUT", '/user/docs/')] = 405
self.verifications[("POST", '/user/docs/')] = 403
self.verifications[("DELETE", '/user/docs/')] = 403
def tearDown(self):
self.testbed.deactivate()
def test_doc_create(self):
rv = self.app.post('/user/docs/',
data=dumps({"content": "This is my revised testing document."}),
content_type='application/json',
headers={'Authorization': self.user1_token}
)
self.assertEqual(200, rv.status_code)
def test_doc_none_put(self):
rv = self.app.put('/user/doc/25/',
data=dumps({"content": "This is my revised testing document."}),
content_type='application/json',
headers={'Authorization': self.user1_token})
def test_document_no_auth(self):
errors=[]
for request in self.verifications:
method = request[0]
url = request[1]
response = self.verifications[request]
if "GET" == method:
rv = self.app.get(url)
verify_document_count(self, self.document_count)
elif "POST" == method:
rv = self.app.post(url, data=dumps(self.post_data),
content_type='application/json')
verify_document_count(self, self.document_count)
elif "PUT" == method:
rv = self.app.put(url, data=dumps(self.put_data),
content_type='application/json')
verify_document_count(self, self.document_count)
elif "DELETE" == method:
rv = self.app.delete(url)
verify_document_count(self, self.document_count)
else:
self.assertFalse(false, "This HTTP method is unsupported")
if (response != rv.status_code):
errors.append("%s %s returned %d" % (method, url, rv.status_code))
self.assertFalse(len(errors) > 0, errors)
def test_document_invalid_auth(self):
auth = {'headers': {'Authorization': 'invalid'}}
errors=[]
for request in self.verifications:
method = request[0]
url = request[1]
response = self.verifications[request]
if "GET" == method:
rv = self.app.get(url, **auth)
verify_document_count(self, self.document_count)
elif "POST" == method:
rv = self.app.post(url, data=dumps(self.post_data),
content_type='application/json', **auth)
verify_document_count(self, self.document_count)
elif "PUT" == method:
rv = self.app.put(url, data=dumps(self.put_data),
content_type='application/json', **auth)
verify_document_count(self, self.document_count)
elif "DELETE" == method:
rv = self.app.delete(url, **auth)
verify_document_count(self, self.document_count)
else:
self.assertFalse(false, "This HTTP method is unsupported")
if (response != rv.status_code):
errors.append("%s %s returned %d" % (method, url, rv.status_code))
self.assertFalse(len(errors) > 0, errors)
class AuthUserDocTestCases(TestCase):
def setUp(self):
common_setUp(self)
setup_documents(self)
self.post_data = {
'title': "Titleist",
'content': "This is a short test document",
'status': 0
}
self.put_data = {"status": "Published"}
def tearDown(self):
self.testbed.deactivate()
def test_document_id_none_get(self):
rv = self.app.get('/user/doc/25/',
headers={'Authorization': self.user1_token})
self.assertEqual(404, rv.status_code)
def test_document_id_get(self):
rv = self.app.get('/user/doc/12345/',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
data = loads(rv.data)
self.assertEqual(self.d_content, data['content'])
self.assertEqual(self.d_status, data['status'])
def test_document_id_get_other_user(self):
rv = self.app.get('/user/doc/67890/',
headers={'Authorization': self.user1_token})
self.assertEqual(404, rv.status_code)
rv = self.app.get('/user/doc/12345/',
headers={'Authorization': self.user2_token})
self.assertEqual(404, rv.status_code)
def test_document_id_none_put(self):
rv = self.app.put('/user/doc/25/',
data=dumps(self.put_data),
headers={'Authorization': self.user1_token})
self.assertEqual(404, rv.status_code)
def test_document_id_none_delete(self):
rv = self.app.delete('/user/doc/25/',
headers={'Authorization': self.user1_token})
self.assertEqual(404, rv.status_code)
def test_document_id_post(self):
rv = self.app.post('/user/doc/00101010/',
data=dumps(self.post_data),
content_type='application/json',
headers={'Authorization': self.user1_token})
# Posts are only allowed for the list endpoint.
self.assertEqual(405, rv.status_code)
def test_document_post(self):
verify_document_count(self, 2)
verify_user_document_count(self, self.user1_id, 1)
rv = self.app.post('/user/docs/',
data=dumps(self.post_data),
content_type='application/json',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
data = loads(rv.data)
self.assertEqual("Titleist", data['title'])
self.assertEqual("This is a short test document", data['content'])
self.assertEqual(0, data['status'])
verify_document_count(self, 3)
verify_user_document_count(self, self.user1_id, 2)
def test_document_post_duplicate(self):
verify_document_count(self, 2)
verify_user_document_count(self, self.user1_id, 1)
rv = self.app.post('/user/docs/',
data=dumps(self.post_data),
content_type='application/json',
headers = {'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
verify_document_count(self, 3)
verify_user_document_count(self, self.user1_id, 2)
def test_document_post_missing_title(self):
verify_document_count(self, self.document_count)
verify_user_document_count(self, self.user1_id, 1)
post_data = self.post_data.copy()
del(post_data['title'])
rv = self.app.post('/user/docs/',
data=dumps(post_data),
content_type='application/json',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
verify_document_count(self, 3)
verify_user_document_count(self, self.user1_id, 2)
def test_document_post_missing_content(self):
verify_document_count(self, self.document_count)
verify_user_document_count(self, self.user1_id, 1)
post_data = self.post_data.copy()
del(post_data['content'])
rv = self.app.post('/user/docs/',
data=dumps(post_data),
content_type='application/json',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
verify_document_count(self, 3)
verify_user_document_count(self, self.user1_id, 2)
def test_document_post_missing_content(self):
verify_document_count(self, self.document_count)
verify_user_document_count(self, self.user1_id, 1)
post_data = self.post_data.copy()
del(post_data['content'])
rv = self.app.post('/user/docs/',
data=dumps(post_data),
content_type='application/json',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
verify_document_count(self, 3)
verify_user_document_count(self, self.user1_id, 2)
def test_document_post_missing_status(self):
verify_document_count(self, self.document_count)
verify_user_document_count(self, self.user1_id, 1)
post_data = self.post_data.copy()
del(post_data['status'])
rv = self.app.post('/user/docs/',
data=dumps(post_data),
content_type='application/json',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
verify_document_count(self, 3)
verify_user_document_count(self, self.user1_id, 2)
def test_document_put(self):
rv = self.app.put('/user/doc/12345/',
data='{"content": "Changed"}',
content_type='application/json',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
data = loads(rv.data)
self.assertEqual(self.d_title, data['title'])
self.assertEqual("Changed", data['content'])
self.assertEqual(self.d_status, data['status'])
def test_document_put_all_fields(self):
rv = self.app.put('/user/doc/12345/',
data='{"content": "Changed", "title": "A Different Title", "status": 1}',
content_type='application/json',
headers = {'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
data = loads(rv.data)
self.assertEqual("A Different Title", data['title'])
self.assertEqual("Changed", data['content'])
self.assertEqual(1, data['status'])
def test_document_list_get(self):
rv = self.app.get('/user/docs/',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
data = loads(rv.data)
self.assertEqual(self.d_title, data['docs'][0]['title'])
self.assertEqual(self.d_content, data['docs'][0]['content'])
self.assertEqual(self.d_status, data['docs'][0]['status'])
rv = self.app.get('/user/docs/',
headers={'Authorization': self.user2_token})
self.assertEqual(200, rv.status_code)
data = loads(rv.data)
self.assertEqual("Another Title", data['docs'][0]['title'])
self.assertEqual("Howdy. This is a different test document. With other content. And exclamation points!", data['docs'][0]['content'])
self.assertEqual(1, data['docs'][0]['status'])
def test_document_list_delete(self):
verify_document_count(self, 2)
verify_user_document_count(self, self.user1_id, 1)
verify_user_document_count(self, self.user2_id, 1)
rv = self.app.delete('/user/docs/',
headers={'Authorization': self.user1_token})
self.assertEqual(200, rv.status_code)
data = loads(rv.data)
self.assertEqual([], data['docs'])
verify_document_count(self, 1)
verify_user_document_count(self, self.user1_id, 0)
verify_user_document_count(self, self.user2_id, 1)
|
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for integrating a Django project with the appengine infrastructure.
This requires Django 1.0beta1 or greater.
This module enables you to use the Django manage.py utility and *some* of it's
subcommands. View the help of manage.py for exact details.
Additionally this module takes care of initialising the datastore (and a test
datastore) so that the Django test infrastructure can be used for your
appengine project.
To use this module add the following two lines to your main.py and manage.py
scripts at the end of your imports:
from appengine_django import InstallAppengineHelperForDjango
InstallAppengineHelperForDjango()
If you would like to use a version of Django other than that provided by the
system all you need to do is include it in a directory just above this helper,
eg:
appengine_django/__init__.py - This file
django/... - your private copy of Django.
"""
import logging
import os
import re
import sys
import unittest
import zipfile
DIR_PATH = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.dirname(DIR_PATH)
# Add this project to the start of sys path to enable direct imports.
sys.path = [PARENT_DIR,] + sys.path
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError, e:
# Not on the system path. Build a list of alternative paths where it may be.
# First look within the project for a local copy, then look for where the Mac
# OS SDK installs it.
paths = [os.path.join(PARENT_DIR, '.google_appengine'),
os.path.join(PARENT_DIR, 'google_appengine'),
'/usr/local/google_appengine']
# Then if on windows, look for where the Windows SDK installed it.
for path in os.environ.get('PATH', '').split(';'):
path = path.rstrip('\\')
if path.endswith('google_appengine'):
paths.append(path)
try:
from win32com.shell import shell
from win32com.shell import shellcon
id_list = shell.SHGetSpecialFolderLocation(
0, shellcon.CSIDL_PROGRAM_FILES)
program_files = shell.SHGetPathFromIDList(id_list)
paths.append(os.path.join(program_files, 'Google',
'google_appengine'))
except ImportError, e:
# Not windows.
pass
# Loop through all possible paths and look for the SDK dir.
SDK_PATH = None
for sdk_path in paths:
if os.path.exists(sdk_path):
SDK_PATH = sdk_path
break
if SDK_PATH is None:
# The SDK could not be found in any known location.
sys.stderr.write("The Google App Engine SDK could not be found!\n")
sys.stderr.write("See README for installation instructions.\n")
sys.exit(1)
if SDK_PATH == os.path.join(PARENT_DIR, 'google_appengine'):
logging.warn('Loading the SDK from the \'google_appengine\' subdirectory '
'is now deprecated!')
logging.warn('Please move the SDK to a subdirectory named '
'\'.google_appengine\' instead.')
logging.warn('See README for further details.')
# Add the SDK and the libraries within it to the system path.
EXTRA_PATHS = [
SDK_PATH,
os.path.join(SDK_PATH, 'lib', 'django'),
os.path.join(SDK_PATH, 'lib', 'webob'),
os.path.join(SDK_PATH, 'lib', 'yaml', 'lib'),
]
# Add SDK paths at the start of sys.path, but after the local directory which
# was added to the start of sys.path on line 50 above. The local directory
# must come first to allow the local imports to override the SDK and
# site-packages directories.
sys.path = sys.path[0:1] + EXTRA_PATHS + sys.path[1:]
from google.appengine.api import apiproxy_stub_map
# Look for a zipped copy of Django.
have_django_zip = False
django_zip_path = os.path.join(PARENT_DIR, 'django.zip')
if os.path.exists(django_zip_path):
have_django_zip = True
sys.path.insert(1, django_zip_path)
# Remove the standard version of Django if a local copy has been provided.
if have_django_zip or os.path.exists(os.path.join(PARENT_DIR, 'django')):
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
from django import VERSION
from django.conf import settings
# Flags made available this module
appid = None
appconfig = None
have_appserver = False
# Hide everything other than the flags above and the install function.
__all__ = ("appid", "appconfig", "have_appserver", "have_django_zip",
"django_zip_path", "InstallAppengineHelperForDjango")
INCOMPATIBLE_COMMANDS = ["adminindex", "createcachetable", "dbshell",
"inspectdb", "runfcgi", "syncdb", "validate"]
def LoadAppengineEnvironment():
"""Loads the appengine environment.
Returns:
This function has no return value, but it sets the following parameters on
this package:
- appid: The name of the application as read from the config file.
- appconfig: The appserver configuration dictionary for the application, as
read from the config file.
- have_appserver: Boolean parameter which is True if the code is being run
from within the appserver environment.
"""
global appid, appconfig, have_appserver
# Load the application configuration.
try:
from google.appengine.tools import dev_appserver
appconfig, unused_matcher = dev_appserver.LoadAppConfig(".", {})
appid = appconfig.application
except ImportError:
# Running under the real appserver.
appconfig = {}
appid = "unknown"
# Detect if we are running under an appserver.
have_appserver = False
stub = apiproxy_stub_map.apiproxy.GetStub("datastore_v3")
if stub:
have_appserver = True
logging.debug("Loading application '%s' %s an appserver" %
(appid, have_appserver and "with" or "without"))
def InstallAppengineDatabaseBackend():
"""Installs the appengine database backend into Django.
The appengine database lives in the db/ subdirectory of this package, but is
known as "appengine" to Django. This function installs the module where
Django expects to find its database backends.
"""
from appengine_django import db
sys.modules['django.db.backends.appengine'] = db
logging.debug("Installed appengine database backend")
def InstallGoogleMemcache():
"""Installs the Google memcache into Django.
By default django tries to import standard memcache module.
Because appengine memcache is API compatible with Python memcache module,
we can trick Django to think it is installed and to use it.
Now you can use CACHE_BACKEND = 'memcached://' in settings.py. IP address
and port number are not required.
"""
from google.appengine.api import memcache
sys.modules['memcache'] = memcache
logging.debug("Installed App Engine memcache backend")
def InstallDjangoModuleReplacements():
"""Replaces internal Django modules with App Engine compatible versions."""
# Replace the session module with a partial replacement overlay using
# __path__ so that portions not replaced will fall through to the original
# implementation.
try:
from django.contrib import sessions
orig_path = sessions.__path__[0]
sessions.__path__.insert(0, os.path.join(DIR_PATH, 'sessions'))
from django.contrib.sessions import backends
backends.__path__.append(os.path.join(orig_path, 'backends'))
except ImportError:
logging.debug("No Django session support available")
# Replace incompatible dispatchers.
import django.core.signals
import django.db
import django.dispatch.dispatcher
# Rollback occurs automatically on Google App Engine. Disable the Django
# rollback handler.
try:
# pre 1.0
from django.dispatch import errors
CheckedException = errors.DispatcherKeyError
def _disconnectSignal():
django.dispatch.dispatcher.disconnect(
django.db._rollback_on_exception,
django.core.signals.got_request_exception)
except ImportError:
CheckedException = KeyError
def _disconnectSignal():
django.core.signals.got_request_exception.disconnect(
django.db._rollback_on_exception)
try:
_disconnectSignal()
except CheckedException, e:
logging.debug("Django rollback handler appears to be already disabled.")
def PatchDjangoSerializationModules():
"""Monkey patches the Django serialization modules.
The standard Django serialization modules to not correctly handle the
datastore models provided by this package. This method installs replacements
for selected modules and methods to give Django the capability to correctly
serialize and deserialize datastore models.
"""
# These can't be imported until InstallAppengineDatabaseBackend has run.
from django.core.serializers import python
from appengine_django.serializer.python import Deserializer
if not hasattr(settings, "SERIALIZATION_MODULES"):
settings.SERIALIZATION_MODULES = {}
base_module = "appengine_django"
settings.SERIALIZATION_MODULES["xml"] = "%s.serializer.xml" % base_module
python.Deserializer = Deserializer
PatchDeserializedObjectClass()
DisableModelValidation()
logging.debug("Installed appengine json and python serialization modules")
def PatchDeserializedObjectClass():
"""Patches the DeserializedObject class.
The default implementation calls save directly on the django Model base
class to avoid pre-save handlers. The model class provided by this package
is not derived from the Django Model class and therefore must be called
directly.
Additionally we need to clear the internal _parent attribute as it may
contain a FakeParent class that is used to deserialize instances without
needing to load the parent instance itself. See the PythonDeserializer for
more details.
"""
# This can't be imported until InstallAppengineDatabaseBackend has run.
from django.core.serializers import base
class NewDeserializedObject(base.DeserializedObject):
def save(self, save_m2m=True):
self.object.save()
self.object._parent = None
base.DeserializedObject = NewDeserializedObject
logging.debug("Replacement DeserializedObject class installed")
def DisableModelValidation():
"""Disables Django's model validation routines.
The model validation is primarily concerned with validating foreign key
references. There is no equivalent checking code for datastore References at
this time.
Validation needs to be disabled or serialization/deserialization will fail.
"""
from django.core.management import validation
validation.get_validation_errors = lambda x, y=0: 0
logging.debug("Django SQL model validation disabled")
def CleanupDjangoSettings():
"""Removes incompatible entries from the django settings module."""
# Ensure this module is installed as an application.
apps = getattr(settings, "INSTALLED_APPS", ())
found = False
for app in apps:
if app.endswith("appengine_django"):
found = True
break
if not found:
logging.warn("appengine_django module is not listed as an application!")
apps += ("appengine_django",)
setattr(settings, "INSTALLED_APPS", apps)
logging.info("Added 'appengine_django' as an application")
# Ensure the database backend is appropriately configured.
dbe = getattr(settings, "DATABASE_ENGINE", "")
if dbe != "appengine":
settings.DATABASE_ENGINE = "appengine"
logging.warn("DATABASE_ENGINE is not configured as 'appengine'. "
"Value overriden!")
for var in ["NAME", "USER", "PASSWORD", "HOST", "PORT"]:
val = getattr(settings, "DATABASE_%s" % var, "")
if val:
setattr(settings, "DATABASE_%s" % var, "")
logging.warn("DATABASE_%s should be blank. Value overriden!")
# Remove incompatible middleware modules.
mw_mods = list(getattr(settings, "MIDDLEWARE_CLASSES", ()))
disallowed_middleware_mods = (
'django.middleware.doc.XViewMiddleware',)
for modname in mw_mods[:]:
if modname in disallowed_middleware_mods:
# Currently only the CommonMiddleware has been ported. As other base
# modules are converted, remove from the disallowed_middleware_mods
# tuple.
mw_mods.remove(modname)
logging.warn("Middleware module '%s' is not compatible. Removed!" %
modname)
setattr(settings, "MIDDLEWARE_CLASSES", tuple(mw_mods))
# Remove incompatible application modules
app_mods = list(getattr(settings, "INSTALLED_APPS", ()))
disallowed_apps = (
'django.contrib.contenttypes',
'django.contrib.sites',)
for app in app_mods[:]:
if app in disallowed_apps:
app_mods.remove(app)
logging.warn("Application module '%s' is not compatible. Removed!" % app)
setattr(settings, "INSTALLED_APPS", tuple(app_mods))
# Remove incompatible session backends.
session_backend = getattr(settings, "SESSION_ENGINE", "")
if session_backend.endswith("file"):
logging.warn("File session backend is not compatible. Overriden "
"to use db backend!")
setattr(settings, "SESSION_ENGINE", "django.contrib.sessions.backends.db")
def ModifyAvailableCommands():
"""Removes incompatible commands and installs replacements where possible."""
if have_appserver:
# Commands are not used when running from an appserver.
return
from django.core import management
project_directory = os.path.join(__path__[0], "../")
if have_django_zip:
FindCommandsInZipfile.orig = management.find_commands
management.find_commands = FindCommandsInZipfile
management.get_commands()
# Replace startapp command which is set by previous call to get_commands().
from appengine_django.management.commands.startapp import ProjectCommand
management._commands['startapp'] = ProjectCommand(project_directory)
RemoveCommands(management._commands)
logging.debug("Removed incompatible Django manage.py commands")
def FindCommandsInZipfile(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
This implementation also works when Django is loaded from a zip.
Returns an empty list if no commands are defined.
"""
zip_marker = ".zip%s" % os.sep
if zip_marker not in management_dir:
return FindCommandsInZipfile.orig(management_dir)
# Django is sourced from a zipfile, ask zip module for a list of files.
filename, path = management_dir.split(zip_marker)
zipinfo = zipfile.ZipFile("%s.zip" % filename)
# The zipfile module returns paths in the format of the operating system
# that created the zipfile! This may not match the path to the zipfile
# itself. Convert operating system specific characters to a standard
# character (#) to compare paths to work around this.
path_normalise = re.compile(r"[/\\]")
path = path_normalise.sub("#", path)
def _IsCmd(t):
"""Returns true if t matches the criteria for a command module."""
t = path_normalise.sub("#", t)
if not t.startswith(path):
return False
if t.startswith("_") or not t.endswith(".py"):
return False
return True
return [os.path.basename(f)[:-3] for f in zipinfo.namelist() if _IsCmd(f)]
def RemoveCommands(command_dict):
"""Removes incompatible commands from the specified command dictionary."""
for cmd in command_dict.keys():
if cmd.startswith("sql"):
del command_dict[cmd]
elif cmd in INCOMPATIBLE_COMMANDS:
del command_dict[cmd]
def InstallReplacementImpModule():
"""Install a replacement for the imp module removed by the appserver.
This is only to find mangement modules provided by applications.
"""
if not have_appserver:
return
modname = 'appengine_django.replacement_imp'
imp_mod = __import__(modname, {}, [], [''])
sys.modules['imp'] = imp_mod
logging.debug("Installed replacement imp module")
def InstallAppengineHelperForDjango():
"""Installs and Patches all of the classes/methods required for integration.
If the variable DEBUG_APPENGINE_DJANGO is set in the environment verbose
logging of the actions taken will be enabled.
"""
if VERSION < (1, 0, None):
logging.error("Django 1.0 or greater is required!")
sys.exit(1)
if os.getenv("DEBUG_APPENGINE_DJANGO"):
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.debug("Loading the Google App Engine Helper for Django...")
# Force Django to reload its settings.
settings._target = None
# Must set this env var *before* importing any more of Django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
LoadAppengineEnvironment()
InstallReplacementImpModule()
InstallAppengineDatabaseBackend()
InstallModelForm()
InstallGoogleMemcache()
InstallDjangoModuleReplacements()
PatchDjangoSerializationModules()
CleanupDjangoSettings()
ModifyAvailableCommands()
InstallGoogleSMTPConnection()
InstallAuthentication()
logging.debug("Successfully loaded the Google App Engine Helper for Django.")
def InstallGoogleSMTPConnection():
from appengine_django import mail as gmail
from django.core import mail
logging.debug("Installing Google Email Adapter for Django")
mail.SMTPConnection = gmail.GoogleSMTPConnection
mail.mail_admins = gmail.mail_admins
mail.mail_managers = gmail.mail_managers
def InstallAuthentication():
try:
from appengine_django.auth import models as helper_models
from django.contrib.auth import models
models.User = helper_models.User
models.Group = helper_models.Group
models.Permission = helper_models.Permission
models.Message = helper_models.Message
from django.contrib.auth import middleware as django_middleware
from appengine_django.auth.middleware import AuthenticationMiddleware
django_middleware.AuthenticationMiddleware = AuthenticationMiddleware
from django.contrib.auth import decorators as django_decorators
from appengine_django.auth.decorators import login_required
django_decorators.login_required = login_required
from django.contrib import auth as django_auth
from django.contrib.auth import tests as django_tests
django_auth.suite = unittest.TestSuite
django_tests.suite = unittest.TestSuite
logging.debug("Installing authentication framework")
except ImportError:
logging.debug("No Django authentication support available")
def InstallModelForm():
"""Replace Django ModelForm with the AppEngine ModelForm."""
# This MUST happen as early as possible, but after any auth model patching.
from google.appengine.ext.db import djangoforms as aeforms
try:
# pre 1.0
from django import newforms as forms
except ImportError:
from django import forms
forms.ModelForm = aeforms.ModelForm
# Extend ModelForm with support for EmailProperty
# TODO: This should be submitted to the main App Engine SDK.
from google.appengine.ext.db import EmailProperty
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an email property."""
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EmailProperty, self).get_form_field(**defaults)
EmailProperty.get_form_field = get_form_field
|
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import calendar
from zope.interface import implements
from twisted.python import log
from twisted.internet import defer
from buildbot import interfaces, sourcestamp
from buildbot.process import properties
from buildbot.status.results import FAILURE
from buildbot.db import buildrequests
class BuildRequest(object):
"""
A rolled-up encapsulation of all of the data relevant to a build request.
This class is used by the C{nextBuild} and C{mergeRequests} configuration
parameters, as well as in starting a build. Construction of a BuildRequest
object is a heavyweight process involving a lot of database queries, so
it should be avoided where possible. See bug #1894.
Build requests have a SourceStamp which specifies what sources to build.
This may specify a specific revision of the source tree (so source.branch,
source.revision, and source.patch are used). The .patch attribute is either
None or a tuple of (patchlevel, diff), consisting of a number to use in
'patch -pN', and a unified-format context diff.
Alternatively, the SourceStamp may specify a set of Changes to be built,
contained in source.changes. In this case, the requeset may be mergeable
with other BuildRequests on the same branch.
@type source: L{buildbot.sourcestamp.SourceStamp}
@ivar source: the source stamp that this BuildRequest use
@type reason: string
@ivar reason: the reason this Build is being requested. Schedulers provide
this, but for forced builds the user requesting the build will provide a
string. It comes from the buildsets table.
@type properties: L{properties.Properties}
@ivar properties: properties that should be applied to this build, taken
from the buildset containing this build request
@ivar submittedAt: a timestamp (seconds since epoch) when this request was
submitted to the Builder. This is used by the CVS step to compute a
checkout timestamp, as well as by the master to prioritize build requests
from oldest to newest.
@ivar buildername: name of the requested builder
@ivar priority: request priority
@ivar id: build request ID
@ivar bsid: ID of the parent buildset
"""
source = None
sources = None
submittedAt = None
@classmethod
def fromBrdict(cls, master, brdict):
"""
Construct a new L{BuildRequest} from a dictionary as returned by
L{BuildRequestsConnectorComponent.getBuildRequest}.
This method uses a cache, which may result in return of stale objects;
for the most up-to-date information, use the database connector
methods.
@param master: current build master
@param brdict: build request dictionary
@returns: L{BuildRequest}, via Deferred
"""
cache = master.caches.get_cache("BuildRequests", cls._make_br)
return cache.get(brdict['brid'], brdict=brdict, master=master)
@classmethod
@defer.inlineCallbacks
def _make_br(cls, brid, brdict, master):
buildrequest = cls()
buildrequest.id = brid
buildrequest.bsid = brdict['buildsetid']
buildrequest.buildername = brdict['buildername']
buildrequest.priority = brdict['priority']
dt = brdict['submitted_at']
buildrequest.submittedAt = dt and calendar.timegm(dt.utctimetuple())
buildrequest.master = master
# fetch the buildset to get the reason
buildset = yield master.db.buildsets.getBuildset(brdict['buildsetid'])
assert buildset # schema should guarantee this
buildrequest.reason = buildset['reason']
# fetch the buildset properties, and convert to Properties
buildset_properties = yield master.db.buildsets.getBuildsetProperties(brdict['buildsetid'])
buildrequest.properties = properties.Properties.fromDict(buildset_properties)
# fetch the sourcestamp dictionary
sslist = yield master.db.sourcestamps.getSourceStamps(buildset['sourcestampsetid'])
assert len(sslist) > 0, "Empty sourcestampset: db schema enforces set to exist but cannot enforce a non empty set"
# and turn it into a SourceStamps
buildrequest.sources = {}
def store_source(source):
buildrequest.sources[source.codebase] = source
dlist = []
for ssdict in sslist:
d = sourcestamp.SourceStamp.fromSsdict(master, ssdict)
d.addCallback(store_source)
dlist.append(d)
yield defer.gatherResults(dlist)
if buildrequest.sources:
buildrequest.source = buildrequest.sources.values()[0]
defer.returnValue(buildrequest)
def requestsHaveSameCodebases(self, other):
self_codebases = set(self.sources.iterkeys())
other_codebases = set(other.sources.iterkeys())
return self_codebases == other_codebases
def requestsHaveChangesForSameCodebases(self, other):
# Merge can only be done if both requests have sourcestampsets containing
# comparable sourcestamps, that means sourcestamps with the same codebase.
# This means that both requests must have exact the same set of codebases
# If not then merge cannot be performed.
# The second requirement is that both request have the changes in the
# same codebases.
#
# Normaly a scheduler always delivers the same set of codebases:
# sourcestamps with and without changes
# For the case a scheduler is not configured with a set of codebases
# it delivers only a set with sourcestamps that have changes.
self_codebases = set(self.sources.iterkeys())
other_codebases = set(other.sources.iterkeys())
if self_codebases != other_codebases:
return False
for c in self_codebases:
# Check either both or neither have changes
if ((len(self.sources[c].changes) > 0)
!= (len(other.sources[c].changes) > 0)):
return False
# all codebases tested, no differences found
return True
def canBeMergedWith(self, other):
"""
Returns if both requests can be merged
"""
if not self.requestsHaveChangesForSameCodebases(other):
return False
#get codebases from myself, they are equal to other
self_codebases = set(self.sources.iterkeys())
for c in self_codebases:
# check to prevent exception
if c not in other.sources:
return False
if not self.sources[c].canBeMergedWith(other.sources[c]):
return False
return True
def mergeSourceStampsWith(self, others):
""" Returns one merged sourcestamp for every codebase """
#get all codebases from all requests
all_codebases = set(self.sources.iterkeys())
for other in others:
all_codebases |= set(other.sources.iterkeys())
all_merged_sources = {}
# walk along the codebases
for codebase in all_codebases:
all_sources = []
if codebase in self.sources:
all_sources.append(self.sources[codebase])
for other in others:
if codebase in other.sources:
all_sources.append(other.sources[codebase])
assert len(all_sources)>0, "each codebase should have atleast one sourcestamp"
all_merged_sources[codebase] = all_sources[0].mergeWith(all_sources[1:])
return [source for source in all_merged_sources.itervalues()]
def mergeReasons(self, others):
"""Return a reason for the merged build request."""
reasons = []
for req in [self] + others:
if req.reason and req.reason not in reasons:
reasons.append(req.reason)
return ", ".join(reasons)
def getSubmitTime(self):
return self.submittedAt
@defer.inlineCallbacks
def cancelBuildRequest(self):
# first, try to claim the request; if this fails, then it's too late to
# cancel the build anyway
try:
yield self.master.db.buildrequests.claimBuildRequests([self.id])
except buildrequests.AlreadyClaimedError:
log.msg("build request already claimed; cannot cancel")
return
# then complete it with 'FAILURE'; this is the closest we can get to
# cancelling a request without running into trouble with dangling
# references.
yield self.master.db.buildrequests.completeBuildRequests([self.id],
FAILURE)
# and let the master know that the enclosing buildset may be complete
yield self.master.maybeBuildsetComplete(self.bsid)
class BuildRequestControl:
implements(interfaces.IBuildRequestControl)
def __init__(self, builder, request):
self.original_builder = builder
self.original_request = request
self.brid = request.id
def subscribe(self, observer):
raise NotImplementedError
def unsubscribe(self, observer):
raise NotImplementedError
def cancel(self):
d = self.original_request.cancelBuildRequest()
d.addErrback(log.err, 'while cancelling build request')
|
|
__author__ = 'rogerjiang'
'''
This file performs the inference based on the learned mode
parameters (checkpoint). Each image is cut into 4 quarters due
to the limit of gpu memory. Vertical and horizonta reflections,
and [0, 90, 180, 270] degrees rotations are performed for each
quarter, and the outputs of sigmoid predictions are later
arithmetically averaged.
'''
import tensorflow as tf
import simplejson
from utils import data_utils, train_utils
import os
import numpy as np
import cv2
import train
import pandas as pd
from shapely import wkt
import time
import sys
def test_input(img, img_size, H):
'''
This function cuts each image into 4 quarters:
([upper, lower] * [left, right]), performs vertical and horizontal
reflections, and [0, 90, 180, 270] degrees rotations for each quarter.
It yields (4 * 2 * 4 =)32 images.
:param img:
:param img_size:
:return:
'''
[img_width, img_height] = img_size
[crop_width, crop_height] = H['crop_size']
pad = H['pad']
x_width = H['x_width']
x_height = H['x_height']
for [x_start, x_end, y_start, y_end] in [
[0, crop_width, 0, crop_height],
[0, crop_width, crop_height, img_height],
[crop_width, img_width, 0, crop_height],
[crop_width, img_width, crop_height, img_height],
]:
feature = img[x_start: x_end, y_start:y_end, :]
for feat_trans in [feature, np.rollaxis(feature, 1, 0)]:
for [x_step, y_step] in [[1, 1], [-1, 1], [1, -1], [-1, -1]]:
feature_w_padding = cv2.copyMakeBorder(
feat_trans[::x_step, ::y_step, :],
pad, x_width - pad - feat_trans.shape[0],
pad, x_height - pad - feat_trans.shape[1],
cv2.BORDER_REFLECT_101)
yield feat_trans.shape, feature_w_padding
def pred_for_each_quarter(sess, img_in, pred, img_data, H):
'''
:param sess:
:param img_in:
:param pred:
:param img_data:
:param H:
:return:
'''
num_channel = H['num_channel']
x_width = H['x_width']
x_height = H['x_height']
batch_size = H['batch_size']
mask_stack, shape_stack = [], []
for feat_shape, img in test_input(
img_data.train_feature, img_data.image_size, H):
predictions, = sess.run(
[pred],
feed_dict={img_in: np.reshape(img,
[batch_size,
x_width,
x_height,
num_channel])})
mask_stack.append(predictions)
shape_stack.append(feat_shape)
return mask_stack, shape_stack
def stitch_mask(img_stack, img_size, feat_shape, H):
'''
img_stack is the stack of pixel-wise inference of input images from
test_input. This function reverts the reflection and rotations and
stitches the 4 quarters together.
:param img_stack:
:param img_size:
:param feat_shape:
:return:
'''
mask = np.zeros([8, img_size[0], img_size[1]])
[img_width, img_height] = img_size
[crop_width, crop_height] = H['crop_size']
pad = H['pad']
idx = 0
for [x_start, x_end, y_start, y_end] in [
[0, crop_width, 0, crop_height],
[0, crop_width, crop_height, img_height],
[crop_width, img_width, 0, crop_height],
[crop_width, img_width, crop_height, img_height],
]:
quarter = 0
for feat_trans in range(2):
for [x_step, y_step] in [[1, 1], [-1, 1], [1, -1], [-1, -1]]:
img_stack[idx] = img_stack[idx] \
[pad: pad + feat_shape[idx][0],
pad: pad + feat_shape[idx][1]]
img_stack[idx] = img_stack[idx][::x_step, ::y_step]
if feat_trans == 1:
img_stack[idx] = np.rollaxis(img_stack[idx], 1, 0)
mask[quarter, x_start: x_end, y_start: y_end] = img_stack[idx]
quarter += 1
idx += 1
return np.squeeze((np.mean(mask, axis=0) > 0.5).astype(np.int))
if __name__ == '__main__':
hypes = './hypes/hypes.json'
with open(hypes, 'r') as f:
H = simplejson.load(f)
H['batch_size'] = 1
H['pad'] = 100
H['x_width'] = 1920
H['x_height'] = 1920
H['print_iter'] = 100
H['save_iter'] = 500
H['crop_size'] = [1700, 1700]
print_iter = H['print_iter']
num_channel = H['num_channel']
x_width = H['x_width']
x_height = H['x_height']
batch_size = H['batch_size']
class_type = H['class_type']
pad = H['pad']
class_type = H['class_type']
log_dir = H['log_dir']
save_iter = H['save_iter']
# Crop area for each inference, and this is limited by memory of k80 gpu
[crop_width, crop_height] = H['crop_size']
img_in = tf.placeholder(dtype=tf.float32,
shape=[batch_size, x_width, x_height, 16])
logits, pred = train.build_pred(img_in, H, 'test')
sys.stdout.write('\n')
sys.stdout.write('#' * 80 + '\n')
sys.stdout.write("Preparing submission file for class type {}".\
format(class_type).ljust(55, '#').rjust(80, '#') + '\n')
sys.stdout.write('#' * 80 + '\n')
sys.stdout.write('\n')
sys.stdout.flush()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
saver = tf.train.Saver()
df = pd.read_csv('data/sample_submission.csv')
if not os.path.exists('./submission'):
os.makedirs('./submission')
with tf.Session(config=config) as sess:
saver.restore(sess, save_path= 'log_dir/path_to_ckpt/ckpt/ckpt-9000')
start_time = time.time()
sys.stdout.write('\n')
for idx, row in df.iterrows():
if row[1] == class_type + 1:
img_id = data_utils.test_IDs_dict_r[row[0]]
img_data = data_utils.ImageData(img_id, phase='test')
img_data.load_image()
img_data.create_train_feature()
mask_stack, shape_stack = pred_for_each_quarter(
sess, img_in, pred, img_data, H)
mask = stitch_mask(mask_stack, img_data.image_size, shape_stack, H)
polygons = data_utils.mask_to_polygons(
mask=mask, img_id=img_id, test=True, epsilon=1)
df.iloc[idx, 2] = \
wkt.dumps(polygons) if len(polygons) else 'MULTIPOLYGON EMPTY'
if idx % print_iter == 0:
str1 = 'Working on Image No. {} Class {}: '.format(idx, class_type)
str2 = 'Time / image: {0:.2f} (mins); '. \
format((time.time() - start_time) / 60. / print_iter \
if idx else 0)
sys.stdout.write(str1 + str2 + '\n')
sys.stdout.flush()
start_time = time.time()
# Save some intermediate results in case of interruption.
if idx % save_iter == 0:
df.to_csv(
os.path.join('.','submission/class_{}.csv'.format(class_type)),
index=False)
sys.stdout.write('\n')
print df.head()
df.to_csv(
os.path.join('.', 'submission/class_{}.csv'.format(class_type)),
index=False)
|
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import helpers
from intel import custom_resource, uninstall, third_party
from kubernetes.client.rest import ApiException as K8sApiException
import os
import pytest
import tempfile
from unittest.mock import patch, MagicMock
FAKE_REASON = "fake reason"
FAKE_MESSAGE = "{\"message\":\"fake message\"}"
PATCH_NODE_STATUS = 'intel.discover.patch_k8s_node_status'
KUBERNETES_LOAD_INCLUSTER = 'kubernetes.config.load_incluster_config'
KUBERNETES_CLIENT_EXTENSIONS = 'kubernetes.client.ExtensionsV1beta1Api'
CMK_NODEREPORT_REMOVED = "\"cmk-nodereport\" for node \"{}\" removed."
REASON_NOT_FOUND = "{\"reason\":\"NotFound\"}"
WRONG_REASON = "{\"reason\":\"WrongReason\"}"
DISCOVER_PATCH_K8S_NODE = 'intel.discover.patch_k8s_node'
NON_EXISTANT_MGS = "{\"message\":\"nonexistant\"}"
MATADATA_LABELS_CMK_NODE = '/metadata/labels/cmk.intel.com~1cmk-node'
NODEREPORT_REMOVED = "\"NodeReport\" for node \"{}\" removed."
class FakeHTTPResponse:
def __init__(self, status=None, reason=None, data=None):
self.status = status
self.reason = reason
self.data = data
def getheaders(self):
return {"fakekey": "fakeval"}
def get_expected_log_error(err_msg, http_response):
return "{}: ({})\n" \
"Reason: {}\n" \
"HTTP response headers: {}\n" \
"HTTP response body: {}\n".format(
err_msg,
str(http_response.status),
http_response.reason,
str(http_response.getheaders()),
http_response.data)
def test_uninstall_remove_node_cmk_oir_failure(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_REASON,
FAKE_MESSAGE)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
with patch(PATCH_NODE_STATUS,
MagicMock(side_effect=fake_api_exception)):
with pytest.raises(SystemExit):
uninstall.remove_node_cmk_oir()
patch_path = "/status/capacity/" \
"pod.alpha.kubernetes.io~1opaque-int-resource-cmk"
exp_err = "Aborting uninstall: " \
"Exception when removing OIR \"{}\"".format(patch_path)
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == exp_log_err
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.6.3"))
def test_remove_all_report_tpr_success(caplog):
mock = MagicMock()
mock.remove.return_value = 0
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)), \
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(third_party.ThirdPartyResourceType,
'create',
MagicMock(return_value=mock)):
uninstall.remove_all_report()
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == "\"Reconcilereport\" for node \"{}\" " \
"removed.".format(os.getenv("NODE_NAME"))
assert caplog_tuple[-3][2] == "\"Nodereport\" for node \"{}\" " \
"removed.".format(os.getenv("NODE_NAME"))
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.7.4"))
def test_remove_all_report_crd_success(caplog):
mock = MagicMock()
mock.remove.return_value = 0
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)), \
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(custom_resource.CustomResourceDefinitionType,
'create',
MagicMock(return_value=mock)):
uninstall.remove_all_report()
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == "\"cmk-reconcilereport\" for node " \
"\"{}\" removed."\
.format(os.getenv("NODE_NAME"))
assert caplog_tuple[-3][2] == \
CMK_NODEREPORT_REMOVED.format(os.getenv("NODE_NAME"))
def test_remove_report_tpr_success(caplog):
fake_tpr_report = MagicMock()
fake_tpr_report.remove.return_value = 0
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)),\
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(third_party.ThirdPartyResourceType, 'create',
MagicMock(return_value=fake_tpr_report)):
uninstall.remove_report_tpr("NodeReport")
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == \
NODEREPORT_REMOVED.format(os.getenv("NODE_NAME"))
# Remove success due to not existing report
def test_remove_report_tpr_success2(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE,
REASON_NOT_FOUND)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
fake_tpr_report = MagicMock()
fake_tpr_report.remove.side_effect = fake_api_exception
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)),\
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(third_party.ThirdPartyResourceType, 'create',
MagicMock(return_value=fake_tpr_report)):
uninstall.remove_report_tpr("NodeReport")
caplog_tuple = caplog.record_tuples
assert \
caplog_tuple[-2][2] == "\"NodeReport\" for node \"{}\" does" \
" not exist.".format(os.getenv("NODE_NAME"))
assert \
caplog_tuple[-1][2] == \
NODEREPORT_REMOVED.format(os.getenv("NODE_NAME"))
def test_remove_report_tpr_failure(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE, WRONG_REASON)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
fake_tpr_report = MagicMock()
fake_tpr_report.remove.side_effect = fake_api_exception
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)),\
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(third_party.ThirdPartyResourceType, 'create',
MagicMock(return_value=fake_tpr_report)):
with pytest.raises(SystemExit):
uninstall.remove_report_tpr("NodeReport")
caplog_tuple = caplog.record_tuples
exp_err = "Aborting uninstall: " \
"Exception when removing third party resource \"NodeReport\""
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
assert caplog_tuple[-1][2] == exp_log_err
def test_remove_report_crd_success(caplog):
fake_crd_report = MagicMock()
fake_crd_report.remove.return_value = 0
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)),\
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(custom_resource.CustomResourceDefinitionType,
'create',
MagicMock(return_value=fake_crd_report)):
uninstall.remove_report_crd("cmk-nodereport", ["cmk-nr"])
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == \
CMK_NODEREPORT_REMOVED.format(os.getenv("NODE_NAME"))
# Remove success due to not existing report
def test_remove_report_crd_success2(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE,
REASON_NOT_FOUND)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
fake_crd_report = MagicMock()
fake_crd_report.remove.side_effect = fake_api_exception
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)),\
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(custom_resource.CustomResourceDefinitionType,
'create',
MagicMock(return_value=fake_crd_report)):
uninstall.remove_report_crd("cmk-nodereport", ["cmk-nr"])
caplog_tuple = caplog.record_tuples
assert \
caplog_tuple[-2][2] == "\"cmk-nodereport\" for node \"{}\" does "\
"not exist.".format(os.getenv("NODE_NAME"))
assert \
caplog_tuple[-1][2] == \
CMK_NODEREPORT_REMOVED.format(os.getenv("NODE_NAME"))
def test_remove_report_crd_failure(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE,
WRONG_REASON)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
fake_crd_report = MagicMock()
fake_crd_report.remove.side_effect = fake_api_exception
with patch(KUBERNETES_LOAD_INCLUSTER,
MagicMock(return_value=0)),\
patch(KUBERNETES_CLIENT_EXTENSIONS,
MagicMock(return_value=0)), \
patch.object(custom_resource.CustomResourceDefinitionType,
'create',
MagicMock(return_value=fake_crd_report)):
with pytest.raises(SystemExit):
uninstall.remove_report_crd("cmk-nodereport", ["cmk-nr"])
caplog_tuple = caplog.record_tuples
exp_err = "Aborting uninstall: " \
"Exception when removing custom resource definition " \
"\"cmk-nodereport\""
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
assert caplog_tuple[-1][2] == exp_log_err
def test_uninstall_remove_node_taint_failure1(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_REASON, "fake body")
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
node_name = os.getenv("NODE_NAME")
with patch('intel.discover.get_k8s_node',
MagicMock(side_effect=fake_api_exception)):
with pytest.raises(SystemExit):
uninstall.remove_node_taint()
exp_err = "Aborting uninstall: Exception when getting the " \
"node \"{}\" obj".format(node_name)
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == exp_log_err
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.5.2"))
def test_uninstall_remove_node_taint_failure2(caplog):
fake_node_resp = {
"metadata": {
"annotations": {
}
}
}
fake_http_resp = FakeHTTPResponse(500, FAKE_REASON, "fake body")
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
with patch('intel.discover.get_k8s_node',
MagicMock(return_value=fake_node_resp)), \
patch(DISCOVER_PATCH_K8S_NODE,
MagicMock(side_effect=fake_api_exception)):
with pytest.raises(SystemExit):
uninstall.remove_node_taint()
patch_path = '/metadata/annotations/' \
'scheduler.alpha.kubernetes.io~1taints'
exp_err = "Aborting uninstall: " \
"Exception when removing taint \"{}\"".format(patch_path)
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == exp_log_err
# Test removing non existing label
def test_uninstall_remove_node_label_success(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_REASON,
NON_EXISTANT_MGS)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
with patch(DISCOVER_PATCH_K8S_NODE,
MagicMock(side_effect=fake_api_exception)):
uninstall.remove_node_label()
caplog_tuple = caplog.record_tuples
patch_path = MATADATA_LABELS_CMK_NODE
exp_str = "Removed node label \"{}\".".format(patch_path)
exp_str2 = "Label \"{}\" does not exist.".format(patch_path)
assert caplog_tuple[-2][2] == exp_str2
assert caplog_tuple[-1][2] == exp_str
# Test removing existing label
def test_uninstall_remove_node_label_success2(caplog):
with patch(DISCOVER_PATCH_K8S_NODE,
MagicMock(return_value=0)):
uninstall.remove_node_label()
caplog_tuple = caplog.record_tuples
patch_path = MATADATA_LABELS_CMK_NODE
exp_str = "Removed node label \"{}\".".format(patch_path)
assert caplog_tuple[-1][2] == exp_str
def test_uninstall_remove_node_label_failure(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_REASON,
FAKE_MESSAGE)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
with patch(DISCOVER_PATCH_K8S_NODE,
MagicMock(side_effect=fake_api_exception)):
with pytest.raises(SystemExit):
uninstall.remove_node_label()
patch_path = MATADATA_LABELS_CMK_NODE
exp_err = "Aborting uninstall: Exception when removing node label" \
" \"{}\"".format(patch_path)
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == exp_log_err
# Test removing non existing oir
def test_uninstall_remove_node_oir_success(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_REASON,
NON_EXISTANT_MGS)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
with patch(PATCH_NODE_STATUS,
MagicMock(side_effect=fake_api_exception)):
uninstall.remove_node_cmk_oir()
caplog_tuple = caplog.record_tuples
patch_path = '/status/capacity/pod.alpha.kubernetes.' \
'io~1opaque-int-resource-cmk'
assert \
caplog_tuple[-2][2] == "CMK oir \"{}\" does not " \
"exist.".format(patch_path)
assert \
caplog_tuple[-1][2] == "Removed node oir " \
"\"{}\".".format(patch_path)
# Test removing existing oir
def test_uninstall_remove_node_oir_success2(caplog):
with patch(PATCH_NODE_STATUS,
MagicMock(return_value=0)):
uninstall.remove_node_cmk_oir()
caplog_tuple = caplog.record_tuples
patch_path = '/status/capacity/pod.alpha.kubernetes.' \
'io~1opaque-int-resource-cmk'
assert \
caplog_tuple[-1][2] == "Removed node oir " \
"\"{}\".".format(patch_path)
def test_remove_binary_sucess(caplog):
temp_dir = tempfile.mkdtemp()
fake_binary_path = os.path.join(temp_dir, "cmk")
helpers.execute(
"touch",
[fake_binary_path]
)
uninstall.remove_binary(temp_dir)
with pytest.raises(Exception):
helpers.execute(
"stat",
[fake_binary_path]
)
caplog_tuple = caplog.record_tuples
exp_log = "cmk binary from \"{}\" removed successfully.".format(
temp_dir)
assert caplog_tuple[-1][2] == exp_log
def test_remove_binary_failure(caplog):
temp_dir = tempfile.mkdtemp()
fake_binary_path = os.path.join(temp_dir, "cmk_wrong_name")
helpers.execute(
"touch",
[fake_binary_path]
)
uninstall.remove_binary(temp_dir)
caplog_tuple = caplog.record_tuples
exp_log = "Could not found cmk binary in \"{}\"."\
.format(temp_dir)
exp_log2 = "Wrong path or file has already been removed."
assert caplog_tuple[-2][2] == exp_log
assert caplog_tuple[-1][2] == exp_log2
def test_delete_cmk_pod_failure(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE,
WRONG_REASON)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
pod_base_name = "cmk-some-cmd-pod"
with patch('intel.k8s.delete_pod',
MagicMock(side_effect=fake_api_exception)):
with pytest.raises(SystemExit):
uninstall.delete_cmk_pod(pod_base_name,
postfix=str(os.getenv("NODE_NAME")),
namespace="default")
caplog_tuple = caplog.record_tuples
exp_err = "Aborting uninstall: " \
"Exception when removing pod \"{}-{}\""\
.format(pod_base_name, str(os.getenv("NODE_NAME")))
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
assert caplog_tuple[-1][2] == exp_log_err
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.8.0"))
def test_delete_cmk_pod_failure2(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE,
WRONG_REASON)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
pod_base_name = "cmk-some-cmd-ds"
with patch('intel.k8s.delete_ds',
MagicMock(side_effect=fake_api_exception)):
with pytest.raises(SystemExit):
uninstall.delete_cmk_pod(pod_base_name,
postfix=str(os.getenv("NODE_NAME")),
namespace="default")
caplog_tuple = caplog.record_tuples
exp_err = "Aborting uninstall: " \
"Exception when removing pod \"{}-{}\""\
.format(pod_base_name, str(os.getenv("NODE_NAME")))
exp_log_err = get_expected_log_error(exp_err, fake_http_resp)
assert caplog_tuple[-1][2] == exp_log_err
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.8.0"))
def test_delete_cmk_pod_success(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE,
REASON_NOT_FOUND)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
pod_base_name = "cmk-some-cmd-ds"
with patch('intel.k8s.delete_ds',
MagicMock(side_effect=fake_api_exception)):
uninstall.delete_cmk_pod(pod_base_name,
postfix=str(os.getenv("NODE_NAME")),
namespace="default")
caplog_tuple = caplog.record_tuples
assert \
caplog_tuple[-2][2] == "\"{}-{}\" does not exist".format(
pod_base_name, str(os.getenv("NODE_NAME")))
assert \
caplog_tuple[-1][2] == "\"{}-{}\" deleted".format(
pod_base_name, str(os.getenv("NODE_NAME")))
def test_delete_cmk_pod_success2(caplog):
fake_http_resp = FakeHTTPResponse(500, FAKE_MESSAGE,
REASON_NOT_FOUND)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
pod_base_name = "cmk-some-cmd-pod"
with patch('intel.k8s.delete_pod',
MagicMock(side_effect=fake_api_exception)):
uninstall.delete_cmk_pod(pod_base_name,
postfix=str(os.getenv("NODE_NAME")),
namespace="default")
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-2][2] == "\"{}-{}\" does not exist".format(
pod_base_name, str(os.getenv("NODE_NAME")))
assert caplog_tuple[-1][2] == "\"{}-{}\" deleted".format(
pod_base_name, str(os.getenv("NODE_NAME")))
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.10.0"))
def test_remove_resource_tracking_er_removed(caplog):
mock = MagicMock()
with patch('intel.uninstall.remove_node_cmk_er', mock):
uninstall.remove_resource_tracking()
assert mock.called
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.6.0"))
def test_remove_resource_tracking_oir_removed(caplog):
mock = MagicMock()
with patch('intel.uninstall.remove_node_cmk_oir', mock):
uninstall.remove_resource_tracking()
assert mock.called
@patch('intel.k8s.get_kube_version', MagicMock(return_value="v1.8.0"))
def test_remove_resource_tracking_unsupported(caplog):
uninstall.remove_resource_tracking()
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == "Unsupported Kubernetes version"
def test_remove_node_cmk_er_success(caplog):
with patch(PATCH_NODE_STATUS, MagicMock()):
uninstall.remove_node_cmk_er()
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-1][2] == "Removed node ERs"
def test_remove_node_cmk_er_failure(caplog):
fake_http_resp = FakeHTTPResponse(500, "{\"reason\":\"fake reason\"}",
NON_EXISTANT_MGS)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
with patch(PATCH_NODE_STATUS,
MagicMock(side_effect=fake_api_exception)):
uninstall.remove_node_cmk_er()
caplog_tuple = caplog.record_tuples
assert caplog_tuple[-2][2] == "CMK ER does not exist."
assert caplog_tuple[-1][2] == "Removed node ERs"
def test_remove_node_cmk_er_failure2(caplog):
fake_http_resp = FakeHTTPResponse(500, "{\"reason\":\"fake reason\"}",
FAKE_MESSAGE)
fake_api_exception = K8sApiException(http_resp=fake_http_resp)
with patch(PATCH_NODE_STATUS,
MagicMock(side_effect=fake_api_exception)):
with pytest.raises(SystemExit):
uninstall.remove_node_cmk_er()
caplog_tuple = caplog.record_tuples
exp_err = "Aborting uninstall: Exception when removing ER: " \
"{}".format(fake_api_exception)
assert caplog_tuple[-1][2] == exp_err
def test_check_remove_conf_dir_failure2(caplog):
temp_dir = tempfile.mkdtemp()
conf_dir = os.path.join(temp_dir, "ok")
fake_exception = Exception('fake')
with patch('intel.config.Config', MagicMock(side_effect=fake_exception)):
with pytest.raises(SystemExit):
uninstall.check_remove_conf_dir(conf_dir)
caplog_tuple = caplog.record_tuples
exp_err = "Aborting uninstall: Unable to read the CMK configuration " \
"directory at \"{}\": {}.".format(conf_dir, fake_exception)
assert caplog_tuple[-1][2] == exp_err
|
|
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for deformation augmentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
def vectorized_random_uniform(minvals, maxvals, name=None):
"""creates a tensor with uniform random values.
Args:
minvals: 1-D Tensor with minimum values.
maxvals: 1-D Tensor with maximum values.
name: (optional) Name for the operation.
Returns:
1-D Tensor with uniform random values.
"""
with tf.variable_scope(name, "vectorized_random_uniform", [minvals, maxvals]):
ranges = tf.subtract(maxvals, minvals, name="ranges")
samples = tf.random.uniform(
ranges.shape, dtype=ranges.dtype, name="samples")
samples_scaled = tf.multiply(ranges, samples, name="samples_scaled")
samples_scaled_offset = tf.add(samples_scaled,
minvals,
name="samples_scaled_offset")
return samples_scaled_offset
def create_centered_identity_transformation_field(shape, spacings):
"""Create 2D or 3D centered identity transformation field.
Args:
shape: 2- or 3-element list. The shape of the transformation field.
spacings: 2- or 3-element list. The spacings of the transformation field.
Returns:
2D case: 3-D Tensor (x0, x1, comp) describing a 2D vector field
3D case: 4-D Tensor (x0, x1, x2, comp) describing a 3D vector field
"""
coords = []
for i, size in enumerate(shape):
spacing = spacings[i]
coords.append(tf.linspace(
-(size - 1) / 2 * spacing,
(size - 1) / 2 * spacing,
size))
permutation = np.roll(np.arange(len(coords) + 1), -1)
return tf.transpose(tf.meshgrid(*coords, indexing="ij"), permutation)
def create_control_grid_for_cubic_interp(transformed_image_shape,
transformed_image_spacings_um,
control_grid_spacings_pix):
"""Create a control grid with optimal size for cubic interpolation.
The control grid will have two extra points in every direction to allow an
interpolation without border artefacts.
Args:
transformed_image_shape: 2- or 3-element list describing the shape of the
target image.
transformed_image_spacings_um: 2- or 3-element tensor describing the spacing
of the target image.
control_grid_spacings_pix: 2- or 3-element list describing the control grid
spacings.
Returns:
2D case: 3-D Tensor (x0, x1, comp) describing a 2D vector field.
3D case: 4-D Tensor (x0, x1, x2, comp) describing a 3D vector field.
"""
grid_shape = np.zeros(len(transformed_image_shape), dtype=int)
for comp in range(len(transformed_image_shape)):
spacing_pix = float(control_grid_spacings_pix[comp])
num_elem = float(transformed_image_shape[comp])
if num_elem % 2 == 0:
grid_shape[comp] = np.ceil((num_elem - 1) / (2 * spacing_pix) +
0.5) * 2 + 2
else:
grid_shape[comp] = np.ceil((num_elem - 1) / (2 * spacing_pix)) * 2 + 3
control_grid_spacings_um = tf.multiply(
tf.constant(control_grid_spacings_pix, dtype=tf.float32),
transformed_image_spacings_um)
control_grid = create_centered_identity_transformation_field(
grid_shape, control_grid_spacings_um)
control_grid.set_shape(np.append(grid_shape, len(control_grid_spacings_pix)))
return control_grid
def create_2x2_rotation_matrix(radians):
"""Creates a 2D rotation matrix.
For an angle a this is
[[cos(a), -sin(a)],
[sin(a), cos(a)]]
Args:
radians: rotation angle in radians.
Returns:
2-D Tensor with 2x2 elements, the rotation matrix.
"""
rotation = [[tf.cos(radians), -tf.sin(radians)],
[tf.sin(radians), tf.cos(radians)]]
rotation = tf.convert_to_tensor(rotation, name="rotation_matrix")
return rotation
def create_2x2_shearing_matrix(shearing_coefs):
"""Creates a 2D shearing matrix.
Args:
shearing_coefs: 2-element list with the shearing coefficients
(off-diagonal elements of the matrix: s01, s10) to create the matrix
[[ 1 , s01],
[s10, 1 ]]
Returns:
2-D Tensor with 2x2 elements, the shearing matrix
"""
shearing = [[1, shearing_coefs[0]], [shearing_coefs[1], 1]]
shearing = tf.convert_to_tensor(shearing, name="shearing_matrix")
return shearing
def create_2d_deformation_field(
raw_image_center_pos_pix, raw_image_element_size_um,
net_input_spatial_shape, net_input_element_size_um,
control_grid_spacings_pix, deformations_magnitudes_um, rotation_angle,
scale_factors, mirror_factors, shearing_coefs, cropping_offset_pix):
"""Creates a 2D deformation field.
Creates a dense 2D deformation field for affine and elastic deformations. The
created 2D vector field (represented as a 3-D Tensor with (x0, x1, comp))
has the same spatial shape as the output (net_input) image and contains the
absolute positions of the corresponding pixels in the input (raw) image. The
process of creating the deformation field has four steps:
1. Setup a grid of control points.
2. Add a random offset to each control point drawn from a normal
distribution to model the random elastic deformation.
3. Apply the affine transformation to the control points.
4. Compute a dense transformation field using cubic bspline interpolation.
A more detailed description of the process can be found in the doc directory.
Args:
raw_image_center_pos_pix: 1-D Tensor with 2 elements of type tf.float32. The
position of the center of the raw image in pixels from the upper, left
corner.
raw_image_element_size_um: 1-D Tensor with 2 elements of type tf.float32.
The pixel spacing (in micrometers) of the raw image.
net_input_spatial_shape: List with 2 elements. The shape of the image that
will be fed into the network (excluding channel dimension).
net_input_element_size_um: Tensor with 2 elements. The pixel spacing (in
micrometers) of the image that will be fed into the network.
control_grid_spacings_pix: List with 2 elements. The control grid spacing in
pixels.
deformations_magnitudes_um: 1-D Tensor with 2 elements. The magnitudes for
the random deformations. Will set the standard deviation (in micrometers)
of a random normal distribution from which deformations will be generated.
rotation_angle: Rotation angle in radians as a float (or single element
Tensor of floating point type). In the absence of mirroring, a positive
angle produces a counter-clockwise rotation of image contents.
scale_factors: 1-D Tensor with 2 elements of type tf.float32. Scale factors
in x0, x1 directions.
mirror_factors: 1-D Tensor with 2 elements. Mirror factors in x0, x1
directions. Each factor should be 1 or -1.
shearing_coefs: 1-D Tensor with 2 elements of type tf.float32. The shearing
coefficients (s01, s10) to create the shearing matrix:
[[ 1 , s01], [s10, 1]].
cropping_offset_pix: 1-D Tensor with 2 elements of type tf.float32. Cropping
position (center of the cropped patch in the raw image) in pixels relative
to the image origin (the origin is specified above as
raw_image_center_pos_pix).
Returns:
3-D Tensor (x0, x1, comp) containing a 2D vector field.
"""
# Set up the centered control grid for identity transform in real world
# coordinates.
control_grid = create_control_grid_for_cubic_interp(
transformed_image_shape=net_input_spatial_shape,
transformed_image_spacings_um=net_input_element_size_um,
control_grid_spacings_pix=control_grid_spacings_pix)
# Add random deformation.
control_grid += deformations_magnitudes_um * tf.random.normal(
shape=control_grid.shape)
# Apply affine transformation and transform units to raw image pixels.
scale_to_pix = 1. / raw_image_element_size_um
affine = tf.matmul(
create_2x2_rotation_matrix(rotation_angle),
tf.diag(scale_factors * tf.to_float(mirror_factors) * scale_to_pix))
affine_shearing = tf.matmul(affine,
create_2x2_shearing_matrix(shearing_coefs))
control_grid = tf.reshape(
tf.matmul(tf.reshape(control_grid, [-1, 2]), affine_shearing),
control_grid.get_shape().as_list())
# Translate to cropping position.
control_grid += raw_image_center_pos_pix + cropping_offset_pix
# Create the dense deformation field for the image.
dense_deformation_field = augmentation_ops.cubic_interpolation2d(
control_grid, control_grid_spacings_pix, net_input_spatial_shape)
return dense_deformation_field
def create_3x3_rotation_matrix(radians):
"""Creates a 3D rotation matrix.
Args:
radians: 1-D Tensor with 3 elements, (a0, a1, a2) with the 3 rotation
angles in radians, where a0 is the rotation around the x0 axis, etc.
Returns:
2-D Tensor with 3x3 elements, the rotation matrix.
"""
with tf.variable_scope("rotation_dim_0"):
rotation_dim_0 = [[1.0, 0.0, 0.0],
[0.0, tf.cos(radians[0]), -tf.sin(radians[0])],
[0.0, tf.sin(radians[0]), tf.cos(radians[0])]]
rotation_dim_0 = tf.convert_to_tensor(
rotation_dim_0, name="rotation_matrix")
with tf.variable_scope("rotation_dim_1"):
rotation_dim_1 = [[tf.cos(radians[1]), 0.0, tf.sin(radians[1])],
[0.0, 1.0, 0.0],
[-tf.sin(radians[1]), 0.0, tf.cos(radians[1])]]
rotation_dim_1 = tf.convert_to_tensor(
rotation_dim_1, name="rotation_matrix")
with tf.variable_scope("rotation_dim_2"):
rotation_dim_2 = [[tf.cos(radians[2]), -tf.sin(radians[2]), 0.0],
[tf.sin(radians[2]), tf.cos(radians[2]), 0.0],
[0.0, 0.0, 1.0]]
rotation_dim_2 = tf.convert_to_tensor(
rotation_dim_2, name="rotation_matrix")
with tf.variable_scope("rotation"):
rotation = tf.matmul(rotation_dim_0, rotation_dim_1)
rotation = tf.matmul(rotation, rotation_dim_2)
return rotation
def create_3x3_shearing_matrix(shearing_coefs):
"""Creates a 3D shearing matrix.
Args:
shearing_coefs: 6-element list with the shearing coefficients
(off-diagonal elements of the matrix: s01, s02, s10, s12, s20, s21) to
create the matrix
[[ 1 , s01, s02],
[s10, 1 , s12],
[s20, s21, 1 ]]
Returns:
2-D Tensor with 3x3 elements, the shearing matrix.
"""
shearing = [[1., shearing_coefs[0], shearing_coefs[1]],
[shearing_coefs[2], 1., shearing_coefs[3]],
[shearing_coefs[4], shearing_coefs[5], 1.]]
shearing = tf.convert_to_tensor(shearing, name="shearing_matrix")
return shearing
def create_3d_deformation_field(
raw_image_center_pos_pix, raw_image_element_size_um,
net_input_spatial_shape, net_input_element_size_um,
control_grid_spacings_pix, deformations_magnitudes_um, rotation_angles,
scale_factors, mirror_factors, shearing_coefs, cropping_offset_pix):
"""Create a 3D deformation field.
Creates a dense 3D deformation field for affine and elastic deformations. The
created 3D vector field (represented as a 4-D Tensor with (x0, x1, x2, comp))
has the same spatial shape as the output image and contains the absolute
position of the corresponding voxel in the input (raw) image. The process of
creating the deformation field has four steps:
1. Setup a grid of control points
2. Add a random offset to each control point drawn from a normal
distribution to model the random elastic deformation
3. Apply the affine transformation to the control points
4. Compute a dense transformation field using cubic bspline interpolation
A more detailled description of the process can be found in the doc
directory.
Args:
raw_image_center_pos_pix: 1-D Tensor with 3 elements. The position of the
origin in the raw image in pixels from the upper, left, front corner.
raw_image_element_size_um: 1-D Tensor with 3 elements. The pixel spacing
(in micrometers) of the raw image.
net_input_spatial_shape: 1-D Tensor with 3 elements. The shape of the
image that will be fed into the network.
net_input_element_size_um: 1-D Tensor with 3 elements. The pixel spacing
(in micrometers) of the image that will be fed into the network.
control_grid_spacings_pix: 1-D Tensor with 3 elements. The control grid
spacing in pixels.
deformations_magnitudes_um: 1-D Tensor with 3 elements. The magnitudes
for the random deformations, the standard deviation (in micrometers) of a
random normal distribution.
rotation_angles: 1-D Tensor with 3 elements, (a0, a1, a2) with the 3
rotation angles in radians, where a0 is the rotation around the x0 axis,
etc.
scale_factors: 1-D Tensor with 3 elements. Scale factors in x0, x1, and x2
directions.
mirror_factors: 1-D Tensor with 3 elements. Mirror factors in x0, x1, and
x2 direction. Each factor should be 1 or -1.
shearing_coefs: 1-D Tensor with 6 elements. The shearing coefficients
(off-diagonal elements of the matrix: s01, s02, s10, s12, s20, s21) to
create the shearing matrix
[[ 1 , s01, s02],
[s10, 1 , s12],
[s20, s21, 1 ]]
cropping_offset_pix: 1-D Tensor with 3 elements. Cropping position (center
of the cropped patch in the raw image) in pixels relative to the image
origin (the origin is specified above as raw_image_center_pos_pix).
Returns:
4-D Tensor (x0, x1, x2, comp) describing a 3D vector field.
"""
# Set up the centered control grid for identity transform in real world
# coordinates.
control_grid = create_control_grid_for_cubic_interp(
net_input_spatial_shape, net_input_element_size_um,
control_grid_spacings_pix)
# Add random deformation.
control_grid += deformations_magnitudes_um * tf.random.normal(
shape=control_grid.shape)
# Apply affine transformation and transform units to raw image pixels.
scale_to_pix = 1. / raw_image_element_size_um
affine = tf.matmul(
create_3x3_rotation_matrix(rotation_angles),
tf.diag(scale_factors * mirror_factors * scale_to_pix))
affine_shearing = tf.matmul(
affine, create_3x3_shearing_matrix(shearing_coefs))
control_grid = tf.reshape(
tf.matmul(tf.reshape(control_grid, [-1, 3]), affine_shearing),
control_grid.shape)
# Translate to cropping position.
control_grid += raw_image_center_pos_pix + cropping_offset_pix
# Create the dense deformation field for the image.
dense_deformation_field = augmentation_ops.cubic_interpolation3d(
control_grid, control_grid_spacings_pix, net_input_spatial_shape)
return dense_deformation_field
|
|
# UNITTEST.PY
# Copyright (c) 2013 Pilgrim Beart <[email protected]>
#
# Simple unit-tests for hypercat.py
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from nose.tools import *
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from hypercat import hypercat
def test_empty_catalogue():
print("Running unit tests")
print("\nTEST: Create minimal empty Catalogue, and render to a string, minimally, then with pretty-printing")
h = hypercat.Hypercat("")
s = h.asJSONstr()
print(s)
assert_equal(s, """{"catalogue-metadata":[{"rel":"urn:X-hypercat:rels:isContentType","val":"application/vnd.hypercat.catalogue+json"},{"rel":"urn:X-hypercat:rels:hasDescription:en","val":""}],"items":[]}""")
result = h.prettyprint()
print(result)
assert_equal(result, """{
"catalogue-metadata": [
{
"rel": "urn:X-hypercat:rels:isContentType",
"val": "application/vnd.hypercat.catalogue+json"
},
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": ""
}
],
"items": []
}""")
def test_minimal_catalogue():
print("\nTEST: Create a catalogue containing 1 catalogue and 1 resource, held as data")
h = hypercat.Hypercat("CatalogueContainingOneCatalogueAndOneResource")
h2 = hypercat.Hypercat("ChildCatalogue")
print("about to add child catalogue")
h.addItem(h2, "http://FIXMEcat")
r = hypercat.Resource("resource1", "application/vnd.hypercat.sensordata+json")
print("about to add child resource")
h.addItem(r, "http://FIXMEresource")
result = h.asJSON()
print(result)
print(h.prettyprint())
assert_equal(result, {'items': [{'item-metadata': [{'val': 'application/vnd.hypercat.catalogue+json', 'rel': 'urn:X-hypercat:rels:isContentType'}, {'val': 'ChildCatalogue', 'rel': 'urn:X-hypercat:rels:hasDescription:en'}], 'href': 'http://FIXMEcat'}, {'item-metadata': [{'val': 'application/vnd.hypercat.sensordata+json', 'rel': 'urn:X-hypercat:rels:isContentType'}, {'val': 'resource1', 'rel': 'urn:X-hypercat:rels:hasDescription:en'}], 'href': 'http://FIXMEresource'}], 'catalogue-metadata': [{'val': 'application/vnd.hypercat.catalogue+json', 'rel': 'urn:X-hypercat:rels:isContentType'}, {'val': 'CatalogueContainingOneCatalogueAndOneResource', 'rel': 'urn:X-hypercat:rels:hasDescription:en'}]})
def test_two_deep_catalogue():
print("\nTEST: Create a catalogue 2 deep (and output each level)")
h1 = hypercat.Hypercat("Top")
h2 = hypercat.Hypercat("Middle")
h3 = hypercat.Hypercat("Bottom")
h1.addItem(h2, "http://FIXMEcat2")
h2.addItem(h3, "http://FIXMEcat3")
print("Top:")
print(h1.asJSON())
print(h1.prettyprint())
print("Middle:")
print(h2.asJSON())
print(h2.prettyprint())
print("Bottom:")
print(h3.asJSON())
print(h3.prettyprint())
def test_deeper_catalogue():
print("\nTEST: Creating more than 2 levels of catalogue, then outputting different levels")
h1 = hypercat.Hypercat("Top")
h1.addRelation("name","top")
h2 = hypercat.Hypercat("Middle")
h2.addRelation("name","middle")
h3 = hypercat.Hypercat("Bottom")
h3.addRelation("name","bottom")
h1.addItem(h2, "http://FIXMEcat2")
h2.addItem(h3, "http://FIXMEcat3")
print("Find top catalogue:")
hN = h1.findByPath("name", "/")
print(hN.prettyprint())
assert_equal(hN.values("name")[0], "top")
print("Find middle catalogue:")
hN = h1.findByPath("name", "/middle/")
print(hN.prettyprint())
assert_equal(hN.values("name")[0], "middle")
print("Find bottom catalogue:")
hN = h1.findByPath("name", "/middle/bottom")
print(hN.prettyprint())
assert_equal(hN.values("name")[0], "bottom")
def test_fancy_catalogue():
print("\nTEST: Create a fancy Catalogue with optional metadata")
h2 = hypercat.Hypercat("Fancy Catalogue")
h2.supportsSimpleSearch()
h2.hasHomepage("http://www.FIXME.com")
h2.containsContentType("application/vnd.hypercat.FIXME+json")
result = h2.prettyprint()
print(result)
assert_equal(result, """{
"catalogue-metadata": [
{
"rel": "urn:X-hypercat:rels:isContentType",
"val": "application/vnd.hypercat.catalogue+json"
},
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": "Fancy Catalogue"
},
{
"rel": "urn:X-hypercat:rels:supportsSearch",
"val": "urn:X-hypercat:search:simple"
},
{
"rel": "urn:X-hypercat:rels:hasHomepage",
"val": "http://www.FIXME.com"
},
{
"rel": "urn:X-hypercat:rels:containsContentType",
"val": "application/vnd.hypercat.FIXME+json"
}
],
"items": []
}""")
def test_multiple_rels():
print("\nTEST: Add multiple RELS to a catalogue")
h = hypercat.Hypercat("cat")
assert_equal(h.values("relation"), [])
h.addRelation("relation","value1")
h.addRelation("relation","value2")
assert_equal(h.values("relation"), ["value1","value2"])
print(h.prettyprint())
print("\nTEST: Load a catalogue from a string")
inString = """{
"catalogue-metadata": [
{
"rel": "urn:X-hypercat:rels:isContentType",
"val": "application/vnd.hypercat.catalogue+json"
},
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": "ingestiontestcat"
}
],
"items": [
{
"href": "http://FIXME",
"item-metadata": [
{
"rel": "urn:X-hypercat:rels:isContentType",
"val": "application/vnd.hypercat.catalogue+json"
},
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": "resource1"
}
]
},
{
"href": "http://FIXME2",
"item-metadata": [
{
"rel": "urn:X-hypercat:rels:isContentType",
"val": "application/vnd.hypercat.catalogue+json"
},
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": "resource2"
}
]
},
{
"href": "http://RESOURCEURL",
"item-metadata": [
{
"rel": "urn:X-hypercat:rels:isContentType",
"val": "resourcecontenttype"
},
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": "A resource"
}
]
}
]
}"""
h = hypercat.loads(inString)
outString = h.prettyprint()
assert_equal(inString, outString)
print(inString)
print("\nUnit tests all passed OK")
|
|
"""
the :mod:`dataset` module defines some tools for managing datasets.
Users may use both *built-in* and user-defined datasets (see the
:ref:`getting_started` page for examples). Right now, three built-in datasets
are available:
* The `movielens-100k <http://grouplens.org/datasets/movielens/>`_ dataset.
* The `movielens-1m <http://grouplens.org/datasets/movielens/>`_ dataset.
* The `Jester <http://eigentaste.berkeley.edu/dataset/>`_ dataset 2.
Built-in datasets can all be loaded (or downloaded if you haven't already)
using the :meth:`Dataset.load_builtin` method. For each built-in dataset,
Surprise also provide predefined :class:`readers <Reader>` which are useful if
you want to use a custom dataset that has the same format as a built-in one.
Summary:
.. autosummary::
:nosignatures:
Dataset.load_builtin
Dataset.load_from_file
Dataset.load_from_folds
Dataset.folds
DatasetAutoFolds.split
Reader
Trainset
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
from collections import namedtuple
import sys
import os
import zipfile
import itertools
import random
import numpy as np
from six.moves import input
from six.moves.urllib.request import urlretrieve
from six.moves import range
from six import iteritems
# directory where builtin datasets are stored. For now it's in the home
# directory under the .surprise_data. May be ask user to define it?
DATASETS_DIR = os.path.expanduser('~') + '/.surprise_data/'
# a builtin dataset has
# - an url (where to download it)
# - a path (where it is located on the filesystem)
# - the parameters of the corresponding reader
BuiltinDataset = namedtuple('BuiltinDataset', ['url', 'path', 'reader_params'])
BUILTIN_DATASETS = {
'ml-100k':
BuiltinDataset(
url='http://files.grouplens.org/datasets/movielens/ml-100k.zip',
path=DATASETS_DIR + 'ml-100k/ml-100k/u.data',
reader_params=dict(line_format='user item rating timestamp',
rating_scale=(1, 5),
sep='\t')
),
'ml-1m':
BuiltinDataset(
url='http://files.grouplens.org/datasets/movielens/ml-1m.zip',
path=DATASETS_DIR + 'ml-1m/ml-1m/ratings.dat',
reader_params=dict(line_format='user item rating timestamp',
rating_scale=(1, 5),
sep='::')
),
'jester':
BuiltinDataset(
url='http://eigentaste.berkeley.edu/dataset/jester_dataset_2.zip',
path=DATASETS_DIR + 'jester/jester_ratings.dat',
reader_params=dict(line_format='user item rating',
rating_scale=(-10, 10))
)
}
class Dataset:
"""Base class for loading datasets.
Note that you should never instantiate the :class:`Dataset` class directly
(same goes for its derived classes), but instead use one of the three
available methods for loading datasets."""
def __init__(self, reader):
self.reader = reader
@classmethod
def load_builtin(cls, name='ml-100k'):
"""Load a built-in dataset.
If the dataset has not already been loaded, it will be downloaded and
saved. You will have to split your dataset using the :meth:`split
<DatasetAutoFolds.split>` method. See an example in the :ref:`User
Guide <load_builtin_example>`.
Args:
name(:obj:`string`): The name of the built-in dataset to load.
Accepted values are 'ml-100k', 'ml-1m', and 'jester'.
Default is 'ml-100k'.
Returns:
A :obj:`Dataset` object.
Raises:
ValueError: If the ``name`` parameter is incorrect.
"""
try:
dataset = BUILTIN_DATASETS[name]
except KeyError:
raise ValueError('unknown dataset ' + name +
'. Accepted values are ' +
', '.join(BUILTIN_DATASETS.keys()) + '.')
# if dataset does not exist, offer to download it
if not os.path.isfile(dataset.path):
answered = False
while not answered:
print('Dataset ' + name + ' could not be found. Do you want '
'to download it? [Y/n] ', end='')
choice = input().lower()
if choice in ['yes', 'y', '', 'omg this is so nice of you!!']:
answered = True
elif choice in ['no', 'n', 'hell no why would i want that?!']:
answered = True
print("Ok then, I'm out!")
sys.exit()
if not os.path.exists(DATASETS_DIR):
os.makedirs(DATASETS_DIR)
print('Trying to download dataset from ' + dataset.url + '...')
urlretrieve(dataset.url, DATASETS_DIR + 'tmp.zip')
with zipfile.ZipFile(DATASETS_DIR + 'tmp.zip', 'r') as tmp_zip:
tmp_zip.extractall(DATASETS_DIR + name)
os.remove(DATASETS_DIR + 'tmp.zip')
print('Done! Dataset', name, 'has been saved to', DATASETS_DIR +
name)
reader = Reader(**dataset.reader_params)
return cls.load_from_file(file_path=dataset.path, reader=reader)
@classmethod
def load_from_file(cls, file_path, reader):
"""Load a dataset from a (custom) file.
Use this if you want to use a custom dataset and all of the ratings are
stored in one file. You will have to split your dataset using the
:meth:`split <DatasetAutoFolds.split>` method. See an example in the
:ref:`User Guide <load_from_file_example>`.
Args:
file_path(:obj:`string`): The path to the file containing ratings.
reader(:obj:`Reader`): A reader to read the file.
"""
return DatasetAutoFolds(ratings_file=file_path, reader=reader)
@classmethod
def load_from_folds(cls, folds_files, reader):
"""Load a dataset where folds (for cross-validation) are predefined by
some files.
The purpose of this method is to cover a common use case where a
dataset is already split into predefined folds, such as the
movielens-100k dataset which defines files u1.base, u1.test, u2.base,
u2.test, etc... It can also be used when you don't want to perform
cross-validation but still want to specify your training and testing
data (which comes down to 1-fold cross-validation anyway). See an
example in the :ref:`User Guide <load_from_folds_example>`.
Args:
folds_files(:obj:`iterable` of :obj:`tuples`): The list of the
folds. A fold is a tuple of the form ``(path_to_train_file,
path_to_test_file)``.
reader(:obj:`Reader`): A reader to read the files.
"""
return DatasetUserFolds(folds_files=folds_files, reader=reader)
@classmethod
def load_from_df(cls, df, reader):
"""Load a dataset from a pandas dataframe.
Use this if you want to use a custom dataset that is stored in a pandas
dataframe. See the :ref:`User Guide<load_from_df_example>` for an
example.
Args:
df(`Dataframe`): The dataframe containing the ratings. It must have
three columns, corresponding to the user (raw) ids, the item
(raw) ids, and the ratings, in this order.
reader(:obj:`Reader`): A reader to read the file. Only the
``rating_scale`` field needs to be specified.
"""
return DatasetAutoFolds(reader=reader, df=df)
def read_ratings(self, file_name):
"""Return a list of ratings (user, item, rating, timestamp) read from
file_name"""
with open(os.path.expanduser(file_name)) as f:
raw_ratings = [self.reader.parse_line(line) for line in
itertools.islice(f, self.reader.skip_lines, None)]
return raw_ratings
def folds(self):
"""Generator function to iterate over the folds of the Dataset.
See :ref:`User Guide <iterate_over_folds>` for usage.
Yields:
tuple: :class:`Trainset` and testset of current fold.
"""
for raw_trainset, raw_testset in self.raw_folds():
trainset = self.construct_trainset(raw_trainset)
testset = self.construct_testset(raw_testset)
yield trainset, testset
def construct_trainset(self, raw_trainset):
raw2inner_id_users = {}
raw2inner_id_items = {}
current_u_index = 0
current_i_index = 0
ur = defaultdict(list)
ir = defaultdict(list)
# user raw id, item raw id, translated rating, time stamp
for urid, irid, r, timestamp in raw_trainset:
try:
uid = raw2inner_id_users[urid]
except KeyError:
uid = current_u_index
raw2inner_id_users[urid] = current_u_index
current_u_index += 1
try:
iid = raw2inner_id_items[irid]
except KeyError:
iid = current_i_index
raw2inner_id_items[irid] = current_i_index
current_i_index += 1
ur[uid].append((iid, r))
ir[iid].append((uid, r))
n_users = len(ur) # number of users
n_items = len(ir) # number of items
n_ratings = len(raw_trainset)
trainset = Trainset(ur,
ir,
n_users,
n_items,
n_ratings,
self.reader.rating_scale,
self.reader.offset,
raw2inner_id_users,
raw2inner_id_items)
return trainset
def construct_testset(self, raw_testset):
return [(ruid, riid, r_ui_trans)
for (ruid, riid, r_ui_trans, _) in raw_testset]
class DatasetUserFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are predefined."""
def __init__(self, folds_files=None, reader=None):
Dataset.__init__(self, reader)
self.folds_files = folds_files
# check that all files actually exist.
for train_test_files in self.folds_files:
for f in train_test_files:
if not os.path.isfile(os.path.expanduser(f)):
raise ValueError('File ' + str(f) + ' does not exist.')
def raw_folds(self):
for train_file, test_file in self.folds_files:
raw_train_ratings = self.read_ratings(train_file)
raw_test_ratings = self.read_ratings(test_file)
yield raw_train_ratings, raw_test_ratings
class DatasetAutoFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are not predefined. (Or for when there are no folds at
all)."""
def __init__(self, ratings_file=None, reader=None, df=None):
Dataset.__init__(self, reader)
self.n_folds = 5
self.shuffle = True
if ratings_file is not None:
self.ratings_file = ratings_file
self.raw_ratings = self.read_ratings(self.ratings_file)
elif df is not None:
self.df = df
self.raw_ratings = [(uid, iid, r, None) for (uid, iid, r) in
self.df.itertuples(index=False)]
else:
raise ValueError('Must specify ratings file or dataframe.')
def build_full_trainset(self):
"""Do not split the dataset into folds and just return a trainset as
is, built from the whole dataset.
User can then query for predictions, as shown in the :ref:`User Guide
<train_on_whole_trainset>`.
Returns:
The :class:`Trainset`.
"""
return self.construct_trainset(self.raw_ratings)
def raw_folds(self):
if self.shuffle:
random.shuffle(self.raw_ratings)
self.shuffle = False # set to false for future calls to raw_folds
def k_folds(seq, n_folds):
"""Inspired from scikit learn KFold method."""
if n_folds > len(seq) or n_folds < 2:
raise ValueError('Incorrect value for n_folds.')
start, stop = 0, 0
for fold_i in range(n_folds):
start = stop
stop += len(seq) // n_folds
if fold_i < len(seq) % n_folds:
stop += 1
yield seq[:start] + seq[stop:], seq[start:stop]
return k_folds(self.raw_ratings, self.n_folds)
def split(self, n_folds=5, shuffle=True):
"""Split the dataset into folds for future cross-validation.
If you forget to call :meth:`split`, the dataset will be automatically
shuffled and split for 5-folds cross-validation.
You can obtain repeatable splits over your all your experiments by
seeding the RNG: ::
import random
random.seed(my_seed) # call this before you call split!
Args:
n_folds(:obj:`int`): The number of folds.
shuffle(:obj:`bool`): Whether to shuffle ratings before splitting.
If ``False``, folds will always be the same each time the
experiment is run. Default is ``True``.
"""
self.n_folds = n_folds
self.shuffle = shuffle
class Reader():
"""The Reader class is used to parse a file containing ratings.
Such a file is assumed to specify only one rating per line, and each line
needs to respect the following structure: ::
user ; item ; rating ; [timestamp]
where the order of the fields and the separator (here ';') may be
arbitrarily defined (see below). brackets indicate that the timestamp
field is optional.
Args:
name(:obj:`string`, optional): If specified, a Reader for one of the
built-in datasets is returned and any other parameter is ignored.
Accepted values are 'ml-100k', 'ml-1m', and 'jester'. Default
is ``None``.
line_format(:obj:`string`): The fields names, in the order at which
they are encountered on a line. Default is ``'user item rating'``.
sep(char): the separator between fields. Example : ``';'``.
rating_scale(:obj:`tuple`, optional): The rating scale used for every
rating. Default is ``(1, 5)``.
skip_lines(:obj:`int`, optional): Number of lines to skip at the
beginning of the file. Default is ``0``.
"""
def __init__(self, name=None, line_format='user item rating', sep=None,
rating_scale=(1, 5), skip_lines=0):
if name:
try:
self.__init__(**BUILTIN_DATASETS[name].reader_params)
except KeyError:
raise ValueError('unknown reader ' + name +
'. Accepted values are ' +
', '.join(BUILTIN_DATASETS.keys()) + '.')
else:
self.sep = sep
self.skip_lines = skip_lines
self.rating_scale = rating_scale
lower_bound, higher_bound = rating_scale
self.offset = -lower_bound + 1 if lower_bound <= 0 else 0
splitted_format = line_format.split()
entities = ['user', 'item', 'rating']
if 'timestamp' in splitted_format:
self.with_timestamp = True
entities.append('timestamp')
else:
self.with_timestamp = False
# check that all fields are correct
if any(field not in entities for field in splitted_format):
raise ValueError('line_format parameter is incorrect.')
self.indexes = [splitted_format.index(entity) for entity in
entities]
def parse_line(self, line):
'''Parse a line.
Ratings are translated so that they are all strictly positive.
Args:
line(str): The line to parse
Returns:
tuple: User id, item id, rating and timestamp. The timestamp is set
to ``None`` if it does no exist.
'''
line = line.split(self.sep)
try:
if self.with_timestamp:
uid, iid, r, timestamp = (line[i].strip()
for i in self.indexes)
else:
uid, iid, r = (line[i].strip()
for i in self.indexes)
timestamp = None
except IndexError:
raise ValueError(('Impossible to parse line.' +
' Check the line_format and sep parameters.'))
return uid, iid, float(r) + self.offset, timestamp
class Trainset:
"""A trainset contains all useful data that constitutes a training set.
It is used by the :meth:`train()
<surprise.prediction_algorithms.algo_base.AlgoBase.train>` method of every
prediction algorithm. You should not try to built such an object on your
own but rather use the :meth:`Dataset.folds` method or the
:meth:`DatasetAutoFolds.build_full_trainset` method.
Attributes:
ur(:obj:`defaultdict` of :obj:`list`): The users ratings. This is a
dictionary containing lists of tuples of the form ``(item_inner_id,
rating)``. The keys are user inner ids.
ir(:obj:`defaultdict` of :obj:`list`): The items ratings. This is a
dictionary containing lists of tuples of the form ``(user_inner_id,
rating)``. The keys are item inner ids.
n_users: Total number of users :math:`|U|`.
n_items: Total number of items :math:`|I|`.
n_ratings: Total number of ratings :math:`|R_{train}|`.
rating_scale(tuple): The minimum and maximal rating of the rating
scale.
global_mean: The mean of all ratings :math:`\\mu`.
"""
def __init__(self, ur, ir, n_users, n_items, n_ratings, rating_scale,
offset, raw2inner_id_users, raw2inner_id_items):
self.ur = ur
self.ir = ir
self.n_users = n_users
self.n_items = n_items
self.n_ratings = n_ratings
self.rating_scale = rating_scale
self.offset = offset
self._raw2inner_id_users = raw2inner_id_users
self._raw2inner_id_items = raw2inner_id_items
self._global_mean = None
# inner2raw dicts could be built right now (or even before) but they
# are not always useful so we wait until we need them.
self._inner2raw_id_users = None
self._inner2raw_id_items = None
def knows_user(self, uid):
"""Indicate if the user is part of the trainset.
A user is part of the trainset if the user has at least one rating.
Args:
uid(int): The (inner) user id. See :ref:`this
note<raw_inner_note>`.
Returns:
``True`` if user is part of the trainset, else ``False``.
"""
return uid in self.ur
def knows_item(self, iid):
"""Indicate if the item is part of the trainset.
An item is part of the trainset if the item was rated at least once.
Args:
iid(int): The (inner) item id. See :ref:`this
note<raw_inner_note>`.
Returns:
``True`` if item is part of the trainset, else ``False``.
"""
return iid in self.ir
def to_inner_uid(self, ruid):
"""Convert a **user** raw id to an inner id.
See :ref:`this note<raw_inner_note>`.
Args:
ruid(str): The user raw id.
Returns:
int: The user inner id.
Raises:
ValueError: When user is not part of the trainset.
"""
try:
return self._raw2inner_id_users[ruid]
except KeyError:
raise ValueError(('User ' + str(ruid) +
' is not part of the trainset.'))
def to_raw_uid(self, iuid):
"""Convert a **user** inner id to a raw id.
See :ref:`this note<raw_inner_note>`.
Args:
iuid(int): The user inner id.
Returns:
str: The user raw id.
Raises:
ValueError: When ``iuid`` is not an inner id.
"""
if self._inner2raw_id_users is None:
self._inner2raw_id_users = {inner: raw for (raw, inner) in
iteritems(self._raw2inner_id_users)}
try:
return self._inner2raw_id_users[iuid]
except KeyError:
raise ValueError((str(iuid) +
' is not a valid inner id.'))
def to_inner_iid(self, riid):
"""Convert an **item** raw id to an inner id.
See :ref:`this note<raw_inner_note>`.
Args:
riid(str): The item raw id.
Returns:
int: The item inner id.
Raises:
ValueError: When item is not part of the trainset.
"""
try:
return self._raw2inner_id_items[riid]
except KeyError:
raise ValueError(('Item ' + str(riid) +
' is not part of the trainset.'))
def to_raw_iid(self, iiid):
"""Convert an **item** inner id to a raw id.
See :ref:`this note<raw_inner_note>`.
Args:
iiid(int): The item inner id.
Returns:
str: The item raw id.
Raises:
ValueError: When ``iiid`` is not an inner id.
"""
if self._inner2raw_id_items is None:
self._inner2raw_id_items = {inner: raw for (raw, inner) in
iteritems(self._raw2inner_id_items)}
try:
return self._inner2raw_id_items[iiid]
except KeyError:
raise ValueError((str(iiid) +
' is not a valid inner id.'))
def all_ratings(self):
"""Generator function to iterate over all ratings.
Yields:
A tuple ``(uid, iid, rating)`` where ids are inner ids (see
:ref:`this note <raw_inner_note>`).
"""
for u, u_ratings in iteritems(self.ur):
for i, r in u_ratings:
yield u, i, r
def build_testset(self):
"""Return a list of ratings that can be used as a testset in the
:meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>`
method.
The ratings are all the ratings that are in the trainset, i.e. all the
ratings returned by the :meth:`all_ratings()
<surprise.dataset.Trainset.all_ratings>` generator. This is useful in
cases where you want to to test your algorithm on the trainset.
"""
return [(self.to_raw_uid(u), self.to_raw_iid(i), r)
for (u, i, r) in self.all_ratings()]
def build_anti_testset(self):
"""Return a list of ratings that can be used as a testset in the
:meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>`
method.
The ratings are all the ratings that are **not** in the trainset, i.e.
all the ratings :math:`r_{ui}` where the user :math:`u` is known, the
item :math:`i` is known, but the rating :math:`r_{ui}` is not in the
trainset. As :math:`r_{ui}` is unknown, it is assumed to be equal to
the mean of all ratings :meth:`global_mean
<surprise.dataset.Trainset.global_mean>`.
"""
anti_testset = []
for u in self.all_users():
for i in self.all_items():
user_items = [j for (j, _) in self.ur[u]]
if i not in user_items:
r_ui = (self.to_raw_uid(u), self.to_raw_iid(i),
self.global_mean)
anti_testset.append(r_ui)
return anti_testset
def all_users(self):
"""Generator function to iterate over all users.
Yields:
Inner id of users.
"""
return range(self.n_users)
def all_items(self):
"""Generator function to iterate over all items.
Yields:
Inner id of items.
"""
return range(self.n_items)
@property
def global_mean(self):
"""Return the mean of all ratings.
It's only computed once."""
if self._global_mean is None:
self._global_mean = np.mean([r for (_, _, r) in
self.all_ratings()])
return self._global_mean
|
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from abc import abstractmethod, ABC
from typing import Set
from pandas import DataFrame
from ludwig.datasets.base_dataset import BaseDataset, DEFAULT_CACHE_LOCATION
from ludwig.datasets.mixins.download import ZipDownloadMixin
from ludwig.datasets.mixins.load import CSVLoadMixin
from ludwig.datasets.mixins.process import *
class SST(ABC, ZipDownloadMixin, MultifileJoinProcessMixin, CSVLoadMixin,
BaseDataset):
"""The SST2 dataset.
This dataset is constructed using the Stanford Sentiment Treebank Dataset.
This dataset contains binary labels (positive or negative) for each sample.
The original dataset specified 5 labels:
very negative, negative, neutral, positive, very positive with
the following cutoffs:
[0, 0.2], (0.2, 0.4], (0.4, 0.6], (0.6, 0.8], (0.8, 1.0]
This class pulls in an array of mixins for different types of functionality
which belongs in the workflow for ingesting and transforming
training data into a destination dataframe that can be use by Ludwig.
"""
def __init__(self, dataset_name, cache_dir=DEFAULT_CACHE_LOCATION,
include_subtrees=False, discard_neutral=False,
convert_parentheses=True, remove_duplicates=False):
super().__init__(dataset_name=dataset_name, cache_dir=cache_dir)
self.include_subtrees = include_subtrees
self.discard_neutral = discard_neutral
self.convert_parentheses = convert_parentheses
self.remove_duplicates = remove_duplicates
@staticmethod
@abstractmethod
def get_sentiment_label(id2sent, phrase_id):
pass
def process_downloaded_dataset(self):
sentences_df = pd.read_csv(
os.path.join(self.raw_dataset_path,
'stanfordSentimentTreebank/datasetSentences.txt'),
sep="\t",
)
sentences_df['sentence'] = sentences_df['sentence'].apply(format_text)
datasplit_df = pd.read_csv(
os.path.join(self.raw_dataset_path,
'stanfordSentimentTreebank/datasetSplit.txt'),
sep=',')
phrase2id = {}
with open(os.path.join(self.raw_dataset_path,
'stanfordSentimentTreebank/dictionary.txt')) as f:
Lines = f.readlines()
for line in Lines:
if line:
split_line = line.split('|')
phrase = split_line[0]
phrase2id[phrase] = int(split_line[1])
id2sent = {}
with open(os.path.join(self.raw_dataset_path,
'stanfordSentimentTreebank/sentiment_labels.txt')) as f:
Lines = f.readlines()
for line in Lines:
if line:
split_line = line.split('|')
try:
id2sent[int(split_line[0])] = float(split_line[1])
except ValueError:
pass
trees_pointers = None
trees_phrases = None
if self.include_subtrees:
trees_pointers = []
with open(os.path.join(self.raw_dataset_path,
'stanfordSentimentTreebank/STree.txt')) as f:
Lines = f.readlines()
for line in Lines:
if line:
trees_pointers.append(
[int(s.strip()) for s in line.split('|')]
)
trees_phrases = []
with open(os.path.join(self.raw_dataset_path,
'stanfordSentimentTreebank/SOStr.txt')) as f:
Lines = f.readlines()
for line in Lines:
if line:
trees_phrases.append(
[s.strip() for s in line.split('|')]
)
splits = {
'train': 1,
'test': 2,
'dev': 3
}
for split_name, split_id in splits.items():
sentence_idcs = get_sentence_idcs_in_split(datasplit_df, split_id)
pairs = []
if split_name == 'train' and self.include_subtrees:
phrases = []
for sentence_idx in sentence_idcs:
# trees_pointers and trees_phrases are 0 indexed
# while sentence_idx starts from 1
# so we need to decrease sentence_idx value
sentence_idx -= 1
subtrees = sentence_subtrees(sentence_idx, trees_pointers,
trees_phrases)
sentence_idx += 1
sentence_phrase = list(sentences_df[
sentences_df['sentence_index'] == sentence_idx
]['sentence'])[0]
sentence_phrase = convert_parentheses(sentence_phrase)
label = self.get_sentiment_label(id2sent, phrase2id[sentence_phrase])
# filter @ sentence level
# For SST-2, check subtrees only if sentence is not neutral
if not self.discard_neutral or label != -1:
for phrase in subtrees:
label = self.get_sentiment_label(id2sent, phrase2id[phrase])
if not self.discard_neutral or label != -1:
if not self.convert_parentheses:
phrase = convert_parentheses_back(phrase)
phrase = phrase.replace('\xa0', ' ')
pairs.append([phrase, label])
else:
phrases = get_sentences_with_idcs(sentences_df, sentence_idcs)
for phrase in phrases:
phrase = convert_parentheses(phrase)
label = self.get_sentiment_label(id2sent, phrase2id[phrase])
if not self.discard_neutral or label != -1:
if not self.convert_parentheses:
phrase = convert_parentheses_back(phrase)
phrase = phrase.replace('\xa0', ' ')
pairs.append([phrase, label])
final_csv = pd.DataFrame(pairs)
final_csv.columns = ['sentence', 'label']
if self.remove_duplicates:
final_csv = final_csv.drop_duplicates(subset=['sentence'])
final_csv.to_csv(
os.path.join(self.raw_dataset_path, f'{split_name}.csv'),
index=False
)
super(SST, self).process_downloaded_dataset()
def format_text(text: str):
"""
Formats text by decoding into utf-8
"""
return ' '.join(
[w.encode('latin1').decode('utf-8')
for w in text.strip().split(' ')]
)
def convert_parentheses(text: str):
"""
Replaces -LRB- and -RRB- tokens present in SST with ( and )
"""
return text.replace('-LRB-', '(').replace('-RRB-', ')')
def convert_parentheses_back(text: str):
"""
Replaces ( and ) tokens with -LRB- and -RRB-
"""
return text.replace('(', '-LRB-').replace(')', '-RRB-')
def get_sentence_idcs_in_split(datasplit: DataFrame, split_id: int):
"""
Given a dataset split is (1 for train, 2 for test, 3 for dev),
returns the set of corresponding sentence indices in sentences_df.
"""
return set(
datasplit[datasplit['splitset_label'] == split_id]['sentence_index']
)
def get_sentences_with_idcs(sentences: DataFrame, sentences_idcs: Set[int]):
"""
Given a set of sentence indices,
returns the corresponding sentences texts in sentences
"""
criterion = sentences['sentence_index'].map(
lambda x: x in sentences_idcs
)
return sentences[criterion]['sentence'].tolist()
def sentence_subtrees(sentence_idx, trees_pointers, trees_phrases):
tree_pointers = trees_pointers[sentence_idx]
tree_phrases = trees_phrases[sentence_idx]
tree = SSTTree(tree_pointers, tree_phrases)
return tree.subtrees()
def visit_postorder(node, visit_list):
if node:
visit_postorder(node.left, visit_list)
visit_postorder(node.right, visit_list)
visit_list.append(node.val)
class SSTTree:
class Node:
def __init__(self, key, val=None):
self.left = None
self.right = None
self.key = key
self.val = val
def create_node(self, parent, i):
if self.nodes[i] is not None:
# already created
return
self.nodes[i] = self.Node(i)
if parent[i] == -1:
# is root
self.root = self.nodes[i]
return
if self.nodes[parent[i]] is None:
# parent not yet created
self.create_node(parent, parent[i])
# assign current node to parent
parent = self.nodes[parent[i]]
if parent.left is None:
parent.left = self.nodes[i]
else:
parent.right = self.nodes[i]
def create_tree(self, parents, tree_phrases):
n = len(parents)
self.nodes = [None for i in range(n)]
self.root = [None]
for i in range(n):
self.create_node(parents, i)
for i, phrase in enumerate(tree_phrases):
self.nodes[i].val = phrase
for node in self.nodes:
if node.val is None:
node.val = ' '.join((node.left.val, node.right.val))
def __init__(self, tree_pointers, tree_phrases):
self.create_tree(
[int(elem) - 1 for elem in tree_pointers],
tree_phrases
)
def subtrees(self):
visit_list = []
visit_postorder(self.root, visit_list)
return visit_list
|
|
#!/usr/bin/env python3
import unittest
import random
import numpy as np
from mips_sim import IanMIPS, Instr, IllegalInstructionError,\
CMDParse, MIPSProcessor, IntegerOverflow, AddressError, SoftwareInterrupt
well_formed = [
"add $s0, $t0, $t1",
"addi $s0, $t0, 0xfffb", # -5 = 0xfffb
"addi $s1, $t1, -5",
"addi $s2, $t2, 26",
"addiu $s0, $t0, 42",
"addiu $s1, $t1, -55",
"addiu $s2, $t2, 0x0bba",
"addu $s0, $t0, $t1",
"and $s0, $t0, $t1",
"andi $s0, $t0, 63",
"andi $s0, $t0, 0xaaaa",
"beq $s0, $t0, 2000",
"bgez $s0, 1000",
"bgezal $s0, 50",
"blez $s0, 100",
"bltz $s0, 1001",
"bltzal $s0, 500",
"bne $s0, $t0, 2001",
"div $s0, $t0",
"divu $s0, $t0",
"j 1000200",
"jal 1000201",
"jr $s4",
"lb $s1, 50($t0)",
"lui $s0, 5321",
"lw $s1, 65($t0)",
"mfhi $s0",
"mflo $s1",
"mult $t1, $t2",
"multu $t1, $t2",
"noop",
"or $s0, $t1, $t2",
"ori $s0, $t1, 500",
"sb $s0, 22($s1)",
"sll $s0, $t6, 5",
"sllv $t0, $t6, $t3",
"slt $s0, $t5, $t4",
"slti $s0, $t3, -100",
"sltiu $s0, $t3, 1000",
"sltu $s0, $t3, $t7",
"sra $s0, $t5, 6",
"srl $s0, $s5, 2",
"srlv $s0, $s1, $s2",
"sub $s3, $s0, $s2",
"subu $s2, $s3, $s5",
"sw $t0, 25($s3)",
"syscall",
"xor $s3, $t3, $s1",
"xori $s4, $t2, 0xFFFF"
]
class TestOpcodes(unittest.TestCase):
def test_well_formed(self):
for s in well_formed:
v = CMDParse.parse_cmd(s)
#self.assertEqual(s, v.__str__())
def test_encode_complete(self):
for s in well_formed:
iform = CMDParse.parse_cmd(s)
try:
bform = iform.encode()
except NotImplementedError:
self.assertTrue(False, "encode {} is not implemented.".format(iform.op))
except Exception as e:
self.assertTrue(False, "Unexpected exception encountered encoding `{}`\n{}".format(s, e))
self.assertEqual(iform.bin, bform)
try:
iform2 = Instr.decode(bform)
except NotImplementedError:
self.assertTrue(False, "decode {} is not implemented.".format(iform.op))
except Exception as e:
self.assertTrue(False, "Unexpected exception encountered decoding `{}`\n{}".format(s, e))
self.assertEqual(iform2.bin, bform)
self.assertEqual(iform, iform2, "error encoding and decoding {}.".format(s))
def test_encode_jr(self):
o = CMDParse.parse_cmd("jr $s0")
o_bin = o.encode()
op = Instr.extr_op(o_bin)
rs = Instr.extr_rs(o_bin)
rt = Instr.extr_rt(o_bin)
rd = Instr.extr_rd(o_bin)
shamt = Instr.extr_shamt(o_bin)
funct = Instr.extr_funct(o_bin)
self.assertEqual(op, 0)
self.assertEqual(rs, IanMIPS.reg_dict["s0"])
self.assertEqual(rt, 0)
self.assertEqual(rd, 0)
self.assertEqual(shamt, 0)
self.assertEqual(funct, IanMIPS.funct_dict["jr"])
def test_encode_bgez(self):
o = CMDParse.parse_cmd("bgez $s0, 1000")
o_bin = o.encode()
op = Instr.extr_op(o_bin)
rs = Instr.extr_rs(o_bin)
rt = Instr.extr_rt(o_bin)
imm = Instr.extr_imm(o_bin)
self.assertEqual(op, 1)
self.assertEqual(imm, 1000)
self.assertEqual(rs, IanMIPS.reg_dict["s0"])
self.assertEqual(rt, IanMIPS.b_instr[o.op])
def test_encode_add(self):
o = CMDParse.parse_cmd("add $s0, $s1, $s2")
o_bin = o.encode()
op = Instr.extr_op(o_bin)
rs = Instr.extr_rs(o_bin)
rt = Instr.extr_rt(o_bin)
rd = Instr.extr_rd(o_bin)
funct = Instr.extr_funct(o_bin)
self.assertEqual(op, 0)
self.assertEqual(funct, IanMIPS.funct_dict["add"])
self.assertEqual(rd, IanMIPS.reg_dict["s0"])
self.assertEqual(rs, IanMIPS.reg_dict["s1"])
self.assertEqual(rt, IanMIPS.reg_dict["s2"])
def test_encode_addi(self):
o = CMDParse.parse_cmd("addi $s0, $t0, 22")
o_bin = o.encode()
op = Instr.extr_op(o_bin)
rs = Instr.extr_rs(o_bin)
rt = Instr.extr_rt(o_bin)
imm = Instr.extr_imm(o_bin)
self.assertEqual(op, IanMIPS.op_dict["addi"])
self.assertEqual(rt, IanMIPS.reg_dict["s0"])
self.assertEqual(rs, IanMIPS.reg_dict["t0"])
self.assertEqual(imm, 22)
def test_encode_addi_negimm(self):
o = CMDParse.parse_cmd("addi $s0, $t0, -5")
o_bin = o.encode()
op = Instr.extr_op(o_bin)
rs = Instr.extr_rs(o_bin)
rt = Instr.extr_rt(o_bin)
imm = np.int16(Instr.extr_imm(o_bin))
self.assertEqual(op, IanMIPS.op_dict["addi"])
self.assertEqual(rt, IanMIPS.reg_dict["s0"])
self.assertEqual(rs, IanMIPS.reg_dict["t0"])
self.assertEqual(imm, -5)
def test_encode_addi_heximm(self):
o = CMDParse.parse_cmd("addi $s0, $t0, 0xa")
o_bin = o.encode()
op = Instr.extr_op(o_bin)
rs = Instr.extr_rs(o_bin)
rt = Instr.extr_rt(o_bin)
imm = Instr.extr_imm(o_bin)
self.assertEqual(op, IanMIPS.op_dict["addi"])
self.assertEqual(rt, IanMIPS.reg_dict["s0"])
self.assertEqual(rs, IanMIPS.reg_dict["t0"])
self.assertEqual(imm, 10)
def test_add(self):
p = MIPSProcessor()
p.reg[10] = 11
p.reg[11] = 22
p.reg[12] = 3
p._add(10, 11, 12)
self.assertEqual(p.reg[10], 25)
try:
p.reg[11] = 2 ** 31 - 1 # INT_MAX + 1 = overflow
p.reg[12] = 1
p._add(10, 11, 12)
self.assertTrue(False)
except IntegerOverflow:
pass
try:
p.reg[11] = -2 ** 31 # INT_MIN - 2 = overflow
p.reg[12] = -2
p._add(10, 11, 12)
self.assertTrue(False)
except IntegerOverflow:
pass
inst = CMDParse.parse_cmd("add $s3, $s4, $s5")
p.reg[19] = 2
p.reg[20] = 11
p.reg[21] = 22
p.do_instr(inst)
self.assertEqual(p.reg[19], 33)
def test_addi(self):
p = MIPSProcessor()
p.reg[10] = 5
p._addi(11, 10, 0x5)
self.assertEqual(p.reg[11], 10)
try:
p.reg[10] = 2**31 - 1
p._addi(11, 10, 2)
self.assertTrue(False)
except IntegerOverflow:
pass
try:
p.reg[10] = -2 ** 31
p._addi(11, 10, -2)
self.assertTrue(False)
except IntegerOverflow:
pass
inst = CMDParse.parse_cmd("addi $s3, $s4, 0xa")
p.reg[19] = 2
p.reg[20] = 11
p.do_instr(inst)
self.assertEqual(p.reg[19], 21)
def test_addiu(self):
p = MIPSProcessor()
p.reg[10] = 5
p._addiu(11, 10, 2)
self.assertEqual(p.reg[11], 7)
p.reg[10] = 2**32 - 1
p._addiu(11, 10, 2)
self.assertEqual(p.reg[11], 1)
p.reg[10] = 1
p._addiu(11, 10, -2)
self.assertEqual(p.reg[11], 2 ** 32 - 1)
def test_addu(self):
""" Test addu $rd, $rs, $rt """
p = MIPSProcessor()
p.reg[10] = 11
p.reg[11] = 22
p.reg[12] = 3
p._addu(10, 11, 12)
self.assertEqual(p.reg[10], 25)
p.reg[11] = 2 ** 32 - 1
p.reg[12] = 2
p._addu(10, 11, 12)
self.assertTrue(p.reg[10], 1)
p.reg[11] = 0
p.reg[12] = -1
p._addu(10, 11, 12)
self.assertTrue(p.reg[10], 2 ** 32 - 1)
def test_and(self):
""" Test and $rd, $rs, $rt """
p = MIPSProcessor()
for i in range(100):
a = np.uint32(random.getrandbits(32))
b = np.uint32(random.getrandbits(32))
p.reg[11] = a
p.reg[12] = b
c = np.bitwise_and(a, b)
p._and(10, 11, 12)
self.assertEqual(p.reg[10], c)
def test_andi(self):
""" Test addi $rt, $rs, imm """
p = MIPSProcessor()
for i in range(100):
imm = np.uint32(random.getrandbits(32))
rt = random.randint(8, 23)
rs = random.randint(8, 23)
rsval = np.uint32(random.getrandbits(32))
p.reg[rs] = rsval
res = np.bitwise_and(rsval, imm)
p._andi(rt, rs, imm)
self.assertEqual(p.reg[rt], res)
def test_beq(self):
p = MIPSProcessor()
beq_cmd = CMDParse.parse_cmd("beq $t0, $s0, 0x3")
p.pc = 10
p.reg[8] = 10
p.reg[16] = 10
p.do_instr(beq_cmd)
self.assertEqual(p.pc, 26)
def test_bgez(self):
p = MIPSProcessor()
bgez_cmd = CMDParse.parse_cmd("bgez $s0, 0xa")
p.pc = 10
p.reg[16] = np.uint32(-5)
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 14)
p.pc = 10
p.reg[16] = 0
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
p.pc = 10
p.reg[16] = 22
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
def test_bgezal(self):
p = MIPSProcessor()
bgez_cmd = CMDParse.parse_cmd("bgezal $s0, 0xa")
p.pc = 10
p.reg[16] = np.uint32(-5)
p.reg[31] = 0
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 14)
self.assertEqual(p.reg[31], 0)
p.pc = 10
p.reg[16] = 0
p.reg[31] = 0
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
self.assertEqual(p.reg[31], 18)
p.pc = 10
p.reg[16] = 22
p.reg[31] = 0
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
self.assertEqual(p.reg[31], 18)
def test_bgtz(self):
p = MIPSProcessor()
bgez_cmd = CMDParse.parse_cmd("bgtz $s0, 0xa")
p.pc = 10
p.reg[16] = np.uint32(-5)
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 14)
p.pc = 10
p.reg[16] = 0
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 14)
p.pc = 10
p.reg[16] = 22
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
def test_blez(self):
p = MIPSProcessor()
bgez_cmd = CMDParse.parse_cmd("blez $s0, 0xa")
p.pc = 10
p.reg[16] = np.uint32(-5)
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
p.pc = 10
p.reg[16] = 0
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
p.pc = 10
p.reg[16] = 22
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 14)
def test_bltz(self):
p = MIPSProcessor()
bgez_cmd = CMDParse.parse_cmd("bltz $s0, 0xa")
p.pc = 10
p.reg[16] = np.uint32(-5)
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 54)
p.pc = 10
p.reg[16] = 0
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 14)
p.pc = 10
p.reg[16] = 22
p.do_instr(bgez_cmd)
self.assertEqual(p.pc, 14)
def test_bltzal(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("bltzal $s0, 0xa")
p.pc = 10
p.reg[16] = np.uint32(-5)
p.reg[31] = 0
p.do_instr(the_cmd)
self.assertEqual(p.pc, 54)
self.assertEqual(p.reg[31], 18)
p.pc = 10
p.reg[16] = 0
p.reg[31] = 0
p.do_instr(the_cmd)
self.assertEqual(p.pc, 14)
self.assertEqual(p.reg[31], 0)
p.pc = 10
p.reg[16] = 22
p.reg[31] = 0
p.do_instr(the_cmd)
self.assertEqual(p.pc, 14)
self.assertEqual(p.reg[31], 0)
def test_bne(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("bne $t0, $s0, 0xa")
p.pc = 10
p.reg[8] = 10
p.reg[16] = 10
p.do_instr(the_cmd)
self.assertEqual(p.pc, 14)
p.pc = 10
p.reg[8] = 10
p.reg[16] = 9
p.do_instr(the_cmd)
self.assertEqual(p.pc, 54)
def test_div(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("div $s0, $s1")
p.reg[16] = 14
p.reg[17] = 4
p.do_instr(the_cmd)
self.assertEqual(p.hi, 2)
self.assertEqual(p.lo, 3)
p.reg[16] = np.uint32(-14)
p.reg[17] = 4
p.do_instr(the_cmd)
self.assertEqual(p.hi, 2)
self.assertEqual(np.int32(p.lo), -3)
def test_divu(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("divu $s0, $s1")
p.reg[16] = 14
p.reg[17] = 4
p.do_instr(the_cmd)
self.assertEqual(p.hi, 2)
self.assertEqual(p.lo, 3)
p.reg[16] = np.uint32(-14)
p.reg[17] = 4
p.do_instr(the_cmd)
self.assertEqual(p.hi, 2)
self.assertEqual(p.lo, 1073741820)
def test_j(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("j 0xf")
p.pc = np.uint32(0xa)
p.do_instr(the_cmd)
self.assertEqual(p.pc, 0xf * 4)
p.pc = np.uint32(0xa00000ba)
p.do_instr(the_cmd)
self.assertEqual(p.pc, np.bitwise_or(0xa0000000, 0xf * 4))
def test_jal(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("jal 0xf")
p.pc = np.uint32(0xa)
p.reg[31] = 0x0
p.do_instr(the_cmd)
self.assertEqual(p.pc, 0xf * 4)
self.assertEqual(p.reg[31], 0xa + 8)
p.pc = np.uint32(0xa00000ba)
p.reg[31] = 0x0
p.do_instr(the_cmd)
self.assertEqual(p.pc, np.bitwise_or(0xa0000000, 0xf * 4))
self.assertEqual(p.reg[31], 0xa00000ba + 8)
def test_jr(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("jr $s0")
p.pc = np.uint32(0xa)
p.reg[16] = 0xf * 4
p.do_instr(the_cmd)
self.assertEqual(p.pc, 0xf * 4)
p.pc = np.uint32(0xa)
p.reg[16] = 0xf
try:
p.do_instr(the_cmd)
self.assertTrue(False, "Branching to a non 4-byte aligned address isn't allowed.")
except AddressError:
pass
def test_lb(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("lb $s0, 4($s1)")
p.mem[0x2f8] = 77
p.reg[16] = 0x0
p.reg[17] = 0x2f8 - 4
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 77)
p.mem[0x2f8] = np.uint8(-96)
p.reg[16] = 0x0
p.reg[17] = 0x2f8 - 4
p.do_instr(the_cmd)
self.assertEqual(np.int32(p.reg[16]), -96)
def test_lui(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("lui $s0, 0xabba")
p.reg[16] = 0x0
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 0xabba0000)
the_cmd = CMDParse.parse_cmd("lui $s0, 0xdead")
p.reg[16] = 0xbeef
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 0xdead0000)
def test_lw(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("lw $s0, 4($s1)")
p.mem[0x2f8: 0x2f8 + 4] = np.uint32([0xdeadbeef]).view('uint8')
p.reg[16] = 0x0
p.reg[17] = 0x2f8 - 4
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 0xdeadbeef)
def test_mfhi(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("mfhi $s0")
p.hi = 55
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 55)
p.hi = -55
p.do_instr(the_cmd)
self.assertEqual(np.int32(p.reg[16]), -55)
def test_mflo(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("mflo $s0")
p.lo = 55
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 55)
p.lo = -55
p.do_instr(the_cmd)
self.assertEqual(np.int32(p.reg[16]), -55)
def test_mult(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("mult $s0, $s1")
p.reg[16] = 0xabbaabba
p.reg[17] = 0x9ba461595
p.do_instr(the_cmd)
res = np.int64(np.uint32([p.lo, p.hi]).view('int64')[0])
self.assertEqual(res, np.int64(np.int32(0xabbaabba)) * np.int64(np.int32(0x9ba461595)))
def test_multu(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("multu $s0, $s1")
p.reg[16] = 0xabbaabba
p.reg[17] = 0x9ba461595
p.do_instr(the_cmd)
res = np.uint64(np.uint32([p.lo, p.hi]).view('uint64')[0])
self.assertEqual(res, np.uint64(np.uint32(0xabbaabba)) * np.uint64(np.uint32(0x9ba461595)))
def test_noop(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("noop")
p.pc = 12
p.do_instr(the_cmd)
self.assertEqual(p.pc, 16)
def test_or(self):
p = MIPSProcessor()
for i in range(100):
a = np.uint32(random.getrandbits(32))
b = np.uint32(random.getrandbits(32))
p.reg[11] = a
p.reg[12] = b
c = np.bitwise_or(a, b)
p._or(10, 11, 12)
self.assertEqual(p.reg[10], c)
def test_ori(self):
p = MIPSProcessor()
for i in range(100):
imm = np.uint32(random.getrandbits(32))
rt = random.randint(8, 23)
rs = random.randint(8, 23)
rsval = np.uint32(random.getrandbits(32))
p.reg[rs] = rsval
res = np.bitwise_or(rsval, imm)
p._ori(rt, rs, imm)
self.assertEqual(p.reg[rt], res)
def test_sb(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("sb $s0, 4($s1)")
p.reg[16] = 0xabba
p.reg[17] = 0x2f8 - 4
p.do_instr(the_cmd)
self.assertEqual(p.mem[0x2f8], 0xba)
def test_sll(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("sll $s0, $s1, 10")
p.reg[16] = 0x0
p.reg[17] = 0xa
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], np.left_shift(0xa, 10))
def test_sllv(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("sllv $s0, $s1, $s2")
p.reg[16] = 0x0
p.reg[17] = 0xa
p.reg[18] = 0xa
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], np.left_shift(0xa, 10))
def test_slt(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("slt $s0, $s1, $s2")
p.reg[16] = 0x0
p.reg[17] = np.uint32(-10)
p.reg[18] = np.uint32(0)
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 1)
def test_slti(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("slti $s0, $s1, 0x5")
p.reg[16] = 0x0
p.reg[17] = np.uint32(-10)
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 1)
def test_sltiu(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("sltiu $s0, $s1, 0x5")
p.reg[16] = 0x0
p.reg[17] = np.uint32(-10)
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 0)
def test_sltu(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("sltu $s0, $s1, $s2")
p.reg[16] = 0x0
p.reg[17] = np.uint32(-10)
p.reg[18] = np.uint32(0)
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 0)
def test_sra(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("sra $s0, $s1, 2")
p.reg[16] = 0x0
p.reg[17] = np.uint32(-200)
p.do_instr(the_cmd)
self.assertEqual(np.int32(p.reg[16]), -50)
def test_srl(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("srl $s0, $s1, 2")
p.reg[16] = 0x0
p.reg[17] = np.uint32(-200)
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], 1073741774)
def test_srlv(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("srlv $s0, $s1, $s2")
p.reg[16] = 0x0
p.reg[17] = 0xabbaabba
p.reg[18] = 0xa
p.do_instr(the_cmd)
self.assertEqual(p.reg[16], np.right_shift(0xabbaabba, 0xa))
def test_sub(self):
p = MIPSProcessor()
p.reg[10] = 11
p.reg[11] = 22
p.reg[12] = 3
p._sub(10, 11, 12)
self.assertEqual(p.reg[10], 19)
try:
p.reg[11] = 2 ** 31 - 1 # INT_MAX - -2 = overflow
p.reg[12] = -1
p._sub(10, 11, 12)
except IntegerOverflow:
pass
try:
p.reg[11] = -2 ** 31 # INT_MIN - 2 = overflow
p.reg[12] = 1
p._sub(10, 11, 12)
except IntegerOverflow:
pass
inst = CMDParse.parse_cmd("sub $s3, $s4, $s5")
p.reg[19] = 2
p.reg[20] = 22
p.reg[21] = 11
p.do_instr(inst)
self.assertEqual(p.reg[19], 11)
def test_subu(self):
""" Test subu $rd, $rs, $rt """
p = MIPSProcessor()
p.reg[10] = 11
p.reg[11] = 22
p.reg[12] = 3
p._subu(10, 11, 12)
self.assertEqual(p.reg[10], 19)
p.reg[11] = 2 ** 32 - 1
p.reg[12] = -1
p._subu(10, 11, 12)
self.assertEqual(p.reg[10], 0)
p.reg[11] = 0
p.reg[12] = 1
p._subu(10, 11, 12)
self.assertEqual(p.reg[10], 2 ** 32 - 1)
def test_sw(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("sw $s0, 4($s1)")
p.reg[16] = 0xdeadbeef
p.reg[17] = 0x2f8 - 4
p.do_instr(the_cmd)
self.assertListEqual(list(p.mem[0x2f8:0x2f8 + 4]), [0xef, 0xbe, 0xad, 0xde])
the_cmd = CMDParse.parse_cmd("sw $s0, 0($s1)")
p.reg[16] = 0xdeadbeef
p.reg[17] = 1
try:
p.do_instr(the_cmd)
self.assertTrue(False, "Cannot store to not naturally aligned memory addresses.")
except AddressError:
pass
def test_syscall(self):
p = MIPSProcessor()
the_cmd = CMDParse.parse_cmd("syscall")
try:
p.do_instr(the_cmd)
self.assertTrue(False, "Software interrupt not thrown on syscall.")
except SoftwareInterrupt:
pass
def test_xor(self):
p = MIPSProcessor()
for i in range(100):
a = np.uint32(random.getrandbits(32))
b = np.uint32(random.getrandbits(32))
p.reg[11] = a
p.reg[12] = b
c = np.bitwise_xor(a, b)
p._xor(10, 11, 12)
self.assertEqual(p.reg[10], c)
def test_xori(self):
p = MIPSProcessor()
for i in range(100):
imm = np.uint32(random.getrandbits(32))
rt = random.randint(8, 23)
rs = random.randint(8, 23)
rsval = np.uint32(random.getrandbits(32))
p.reg[rs] = rsval
res = np.bitwise_xor(rsval, imm)
p._xori(rt, rs, imm)
self.assertEqual(p.reg[rt], res)
if __name__ == "__main__":
random.seed()
unittest.main()
|
|
#!/usr/bin/env python
__author__ = 'Stephen P. Henrie'
import unittest
from mock import Mock, patch
from pyon.util.unit_test import PyonTestCase
from pyon.util.int_test import IonIntegrationTestCase
from nose.plugins.attrib import attr
from pyon.core.exception import BadRequest, Conflict, Inconsistent, NotFound
from pyon.public import PRED, RT, IonObject
from ion.services.coi.org_management_service import OrgManagementService
from interface.services.coi.iorg_management_service import OrgManagementServiceClient
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from pyon.core.governance import ORG_MANAGER_ROLE
@attr('UNIT', group='coi')
class TestOrgManagementService(PyonTestCase):
def setUp(self):
mock_clients = self._create_service_mock('org_management')
self.org_management_service = OrgManagementService()
self.org_management_service.clients = mock_clients
# Rename to save some typing
self.mock_create = mock_clients.resource_registry.create
self.mock_read = mock_clients.resource_registry.read
self.mock_update = mock_clients.resource_registry.update
self.mock_delete = mock_clients.resource_registry.delete
self.mock_create_association = mock_clients.resource_registry.create_association
self.mock_delete_association = mock_clients.resource_registry.delete_association
self.mock_find_objects = mock_clients.resource_registry.find_objects
self.mock_find_resources = mock_clients.resource_registry.find_resources
self.mock_find_subjects = mock_clients.resource_registry.find_subjects
# Org
self.org = Mock()
self.org.name = "Foo"
self.org.org_governance_name = ''
self.user_role = Mock()
self.user_role2 = Mock()
@patch('pyon.ion.directory.Directory.__init__', Mock(return_value=None))
def test_create_org(self):
self.mock_find_objects.return_value = ([self.user_role], [self.user_role2])
self.mock_create.return_value = ['111', 1]
org_id = self.org_management_service.create_org(self.org)
assert org_id == '111'
self.mock_create.assert_called_once_with(self.org)
def test_read_and_update_org(self):
self.mock_read.return_value = self.org
org = self.org_management_service.read_org('111')
assert org is self.mock_read.return_value
self.mock_read.assert_called_once_with('111', '')
org.name = 'Bar'
self.mock_update.return_value = ['111', 2]
self.org_management_service.update_org(org)
self.mock_update.assert_called_once_with(org)
def test_delete_org(self):
self.org_management_service.delete_org('111')
self.mock_delete.assert_called_once_with('111')
def test_read_org_not_found(self):
self.mock_read.side_effect = NotFound('Org bad does not exist')
# TEST: Execute the service operation call
with self.assertRaises(NotFound) as cm:
self.org_management_service.read_org('bad')
ex = cm.exception
self.assertEqual(ex.message, 'Org bad does not exist')
self.mock_read.assert_called_once_with('bad', '')
def test_delete_org_not_found(self):
self.mock_delete.side_effect = NotFound('Org bad does not exist')
# TEST: Execute the service operation call
with self.assertRaises(NotFound) as cm:
self.org_management_service.delete_org('bad')
ex = cm.exception
self.assertEqual(ex.message, 'Org bad does not exist')
self.mock_delete.assert_called_once_with('bad')
@attr('INT', group='coi')
class TestOrgManagementServiceInt(IonIntegrationTestCase):
def setUp(self):
# Start container
self._start_container()
self.container.start_rel_from_url('res/deploy/r2coi.yml')
self.resource_registry = ResourceRegistryServiceClient(node=self.container.node)
self.org_management_service = OrgManagementServiceClient(node=self.container.node)
def test_org_crud(self):
with self.assertRaises(BadRequest) as br:
self.org_management_service.create_org(IonObject("Org", {"name": "Test Facility", "org_governance_name": "Test Facility" }))
self.assertTrue("can only contain alphanumeric and underscore characters" in br.exception.message)
with self.assertRaises(BadRequest):
self.org_management_service.create_org()
org_obj = IonObject("Org", {"name": "Test Facility"})
org_id = self.org_management_service.create_org(org_obj)
self.assertNotEqual(org_id, None)
org = None
org = self.org_management_service.read_org(org_id)
self.assertNotEqual(org, None)
self.assertEqual(org.org_governance_name, 'Test_Facility')
#Check that the roles got associated to them
role_list = self.org_management_service.find_org_roles(org_id)
self.assertEqual(len(role_list),2 )
with self.assertRaises(BadRequest):
self.org_management_service.update_org()
org.name = 'Updated Test Facility'
self.org_management_service.update_org(org)
org = None
org = self.org_management_service.read_org(org_id)
self.assertNotEqual(org, None)
self.assertEqual(org.name, 'Updated Test Facility')
self.assertEqual(org.org_governance_name, 'Test_Facility')
user_role = self.org_management_service.find_org_role_by_name(org_id, ORG_MANAGER_ROLE)
self.assertNotEqual(user_role, None)
self.org_management_service.remove_user_role(org_id, ORG_MANAGER_ROLE)
with self.assertRaises(BadRequest) as cm:
user_role = self.org_management_service.find_org_role_by_name(org_id, ORG_MANAGER_ROLE)
self.assertIn("The User Role 'ORG_MANAGER' does not exist for this Org", cm.exception.message)
with self.assertRaises(BadRequest):
self.org_management_service.delete_org()
self.org_management_service.delete_org(org_id)
with self.assertRaises(NotFound) as cm:
self.org_management_service.read_org(org_id)
self.assertIn("does not exist", cm.exception.message)
with self.assertRaises(NotFound) as cm:
self.org_management_service.delete_org(org_id)
self.assertIn("does not exist", cm.exception.message)
def test_org_affiliation(self):
root_org = None
root_org = self.org_management_service.find_org()
self.assertNotEqual(root_org, None)
org_obj = IonObject("Org", {"name": "TestFacility"})
org_id = self.org_management_service.create_org(org_obj)
self.assertNotEqual(org_id, None)
ret = self.org_management_service.affiliate_org(root_org._id, org_id)
self.assertTrue(ret)
ret = self.org_management_service.unaffiliate_org(root_org._id, org_id)
self.assertTrue(ret)
def test_find_org_containers(self):
root_org = None
root_org = self.org_management_service.find_org()
self.assertNotEqual(root_org, None)
containers = self.org_management_service.find_org_containers(root_org._id)
all_containers,_ = self.resource_registry.find_resources(restype=RT.CapabilityContainer, id_only=True)
self.assertEqual(len(containers),len(all_containers))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"pipelineRunName": _SERIALIZER.url("pipeline_run_name", pipeline_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"pipelineRunName": _SERIALIZER.url("pipeline_run_name", pipeline_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"pipelineRunName": _SERIALIZER.url("pipeline_run_name", pipeline_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PipelineRunsOperations(object):
"""PipelineRunsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> "_models.PipelineRun":
"""Gets the detailed information for a given pipeline run.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineRun, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRun
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
pipeline_run_create_parameters: "_models.PipelineRun",
**kwargs: Any
) -> "_models.PipelineRun":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(pipeline_run_create_parameters, 'PipelineRun')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PipelineRun', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
pipeline_run_create_parameters: "_models.PipelineRun",
**kwargs: Any
) -> LROPoller["_models.PipelineRun"]:
"""Creates a pipeline run for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:param pipeline_run_create_parameters: The parameters for creating a pipeline run.
:type pipeline_run_create_parameters:
~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRun
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PipelineRun or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRun]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
pipeline_run_create_parameters=pipeline_run_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a pipeline run from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.PipelineRunListResult"]:
"""Lists all the pipeline runs for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PipelineRunListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRunListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRunListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PipelineRunListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns'} # type: ignore
|
|
#
# -*- coding: utf-8 -*-
"""Machinery for running and validating transcripts.
If the user wants to run a transcript (see docs/transcript.rst),
we need a mechanism to run each command in the transcript as
a unit test, comparing the expected output to the actual output.
This file contains the class necessary to make that work. This
class is used in cmd2.py::run_transcript_tests()
"""
import re
import unittest
from typing import (
TYPE_CHECKING,
Iterator,
List,
Optional,
TextIO,
Tuple,
cast,
)
from . import (
ansi,
utils,
)
if TYPE_CHECKING: # pragma: no cover
from cmd2 import (
Cmd,
)
class Cmd2TestCase(unittest.TestCase):
"""A unittest class used for transcript testing.
Subclass this, setting CmdApp, to make a unittest.TestCase class
that will execute the commands in a transcript file and expect the
results shown.
See example.py
"""
cmdapp: Optional['Cmd'] = None
def setUp(self) -> None:
if self.cmdapp:
self._fetchTranscripts()
# Trap stdout
self._orig_stdout = self.cmdapp.stdout
self.cmdapp.stdout = cast(TextIO, utils.StdSim(cast(TextIO, self.cmdapp.stdout)))
def tearDown(self) -> None:
if self.cmdapp:
# Restore stdout
self.cmdapp.stdout = self._orig_stdout
def runTest(self) -> None: # was testall
if self.cmdapp:
its = sorted(self.transcripts.items())
for (fname, transcript) in its:
self._test_transcript(fname, transcript)
def _fetchTranscripts(self) -> None:
self.transcripts = {}
testfiles = cast(List[str], getattr(self.cmdapp, 'testfiles', []))
for fname in testfiles:
tfile = open(fname)
self.transcripts[fname] = iter(tfile.readlines())
tfile.close()
def _test_transcript(self, fname: str, transcript: Iterator[str]) -> None:
if self.cmdapp is None:
return
line_num = 0
finished = False
line = ansi.strip_style(next(transcript))
line_num += 1
while not finished:
# Scroll forward to where actual commands begin
while not line.startswith(self.cmdapp.visible_prompt):
try:
line = ansi.strip_style(next(transcript))
except StopIteration:
finished = True
break
line_num += 1
command_parts = [line[len(self.cmdapp.visible_prompt) :]]
try:
line = next(transcript)
except StopIteration:
line = ''
line_num += 1
# Read the entirety of a multi-line command
while line.startswith(self.cmdapp.continuation_prompt):
command_parts.append(line[len(self.cmdapp.continuation_prompt) :])
try:
line = next(transcript)
except StopIteration as exc:
msg = f'Transcript broke off while reading command beginning at line {line_num} with\n{command_parts[0]}'
raise StopIteration(msg) from exc
line_num += 1
command = ''.join(command_parts)
# Send the command into the application and capture the resulting output
stop = self.cmdapp.onecmd_plus_hooks(command)
result = self.cmdapp.stdout.read()
stop_msg = 'Command indicated application should quit, but more commands in transcript'
# Read the expected result from transcript
if ansi.strip_style(line).startswith(self.cmdapp.visible_prompt):
message = f'\nFile {fname}, line {line_num}\nCommand was:\n{command}\nExpected: (nothing)\nGot:\n{result}\n'
self.assertTrue(not (result.strip()), message)
# If the command signaled the application to quit there should be no more commands
self.assertFalse(stop, stop_msg)
continue
expected_parts = []
while not ansi.strip_style(line).startswith(self.cmdapp.visible_prompt):
expected_parts.append(line)
try:
line = next(transcript)
except StopIteration:
finished = True
break
line_num += 1
if stop:
# This should only be hit if the command that set stop to True had output text
self.assertTrue(finished, stop_msg)
# transform the expected text into a valid regular expression
expected = ''.join(expected_parts)
expected = self._transform_transcript_expected(expected)
message = f'\nFile {fname}, line {line_num}\nCommand was:\n{command}\nExpected:\n{expected}\nGot:\n{result}\n'
self.assertTrue(re.match(expected, result, re.MULTILINE | re.DOTALL), message)
def _transform_transcript_expected(self, s: str) -> str:
r"""Parse the string with slashed regexes into a valid regex.
Given a string like:
Match a 10 digit phone number: /\d{3}-\d{3}-\d{4}/
Turn it into a valid regular expression which matches the literal text
of the string and the regular expression. We have to remove the slashes
because they differentiate between plain text and a regular expression.
Unless the slashes are escaped, in which case they are interpreted as
plain text, or there is only one slash, which is treated as plain text
also.
Check the tests in tests/test_transcript.py to see all the edge
cases.
"""
regex = ''
start = 0
while True:
(regex, first_slash_pos, start) = self._escaped_find(regex, s, start, False)
if first_slash_pos == -1:
# no more slashes, add the rest of the string and bail
regex += re.escape(s[start:])
break
else:
# there is a slash, add everything we have found so far
# add stuff before the first slash as plain text
regex += re.escape(s[start:first_slash_pos])
start = first_slash_pos + 1
# and go find the next one
(regex, second_slash_pos, start) = self._escaped_find(regex, s, start, True)
if second_slash_pos > 0:
# add everything between the slashes (but not the slashes)
# as a regular expression
regex += s[start:second_slash_pos]
# and change where we start looking for slashed on the
# turn through the loop
start = second_slash_pos + 1
else:
# No closing slash, we have to add the first slash,
# and the rest of the text
regex += re.escape(s[start - 1 :])
break
return regex
@staticmethod
def _escaped_find(regex: str, s: str, start: int, in_regex: bool) -> Tuple[str, int, int]:
"""Find the next slash in {s} after {start} that is not preceded by a backslash.
If we find an escaped slash, add everything up to and including it to regex,
updating {start}. {start} therefore serves two purposes, tells us where to start
looking for the next thing, and also tells us where in {s} we have already
added things to {regex}
{in_regex} specifies whether we are currently searching in a regex, we behave
differently if we are or if we aren't.
"""
while True:
pos = s.find('/', start)
if pos == -1:
# no match, return to caller
break
elif pos == 0:
# slash at the beginning of the string, so it can't be
# escaped. We found it.
break
else:
# check if the slash is preceeded by a backslash
if s[pos - 1 : pos] == '\\':
# it is.
if in_regex:
# add everything up to the backslash as a
# regular expression
regex += s[start : pos - 1]
# skip the backslash, and add the slash
regex += s[pos]
else:
# add everything up to the backslash as escaped
# plain text
regex += re.escape(s[start : pos - 1])
# and then add the slash as escaped
# plain text
regex += re.escape(s[pos])
# update start to show we have handled everything
# before it
start = pos + 1
# and continue to look
else:
# slash is not escaped, this is what we are looking for
break
return regex, pos, start
|
|
# coding: utf-8
import random
import os
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from models import XpMsgException, CoreParam, Application, Action
from models import Menu, MenuParam, View, Workflow, Param, WFParamValue, WorkflowData, ServiceMenu
from models import WorkflowView, ViewMenu, SearchIndex, SearchIndexParam, Word, SearchIndexWord, XpTemplate, ViewTmpl, ServiceMenuCondition
from models import ViewMenuCondition, ApplicationMedia
from models import Context, JsResultDict
from ximpia.xpsite.models import Setting
# Settings
from ximpia.xpcore.util import get_class
settings = get_class(os.getenv("DJANGO_SETTINGS_MODULE"))
# Logging
import logging.config
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger(__name__)
class CommonDAO(object):
_numberMatches = 0
_ctx = None
model = None
_relatedFields = ()
_relatedDepth = None
def __init__(self, ctx, related_fields=(), related_depth=None, number_matches=100):
"""@param ctx: Context:
@param related_fields: tuple containing the fields to fetch data in the same query
@param related_depth: Number of depth relationships to follow. The higher the number, the bigger the query
@param number_matches: Number of rows for queries that support paging
"""
self._ctx = ctx
self._relatedFields = related_fields
self._relatedDepth = related_depth
self._numberMatches = number_matches
if related_depth != None and len(related_fields) != 0:
raise XpMsgException(None, _('relatedFields and relatedDepth cannot be combined. One of them must only be informed.'))
def _processRelated(self):
"""Process related objects using fields and depth, class attributes _relatedFields and _relatedDepth"""
if len(self._relatedFields) != 0 or self._relatedDepth != None:
if len(self._relatedFields) != 0:
dbObj = self.model.objects.select_related(self._relatedFields)
elif self._relatedDepth != None:
dbObj = self.model.objects.select_related(depth=self._relatedDepth)
else:
dbObj = self.model.objects
return dbObj
def _cleanDict(self, dd):
"""Clean dict removing xpXXX fields.
@param dict: Dictionary
@return: dictNew : New dictionary without xpXXX fields"""
fields = dd.keys()
dictNew = {}
for sKey in fields:
if sKey.find('xp') == 0:
pass
else:
dictNew[sKey] = dd[sKey]
return dictNew
def _getPagingStartEnd(self, page, numberMatches):
"""Get tuple (iStart, iEnd)"""
iStart = (page-1)*numberMatches
iEnd = iStart+numberMatches
values = (iStart, iEnd)
return values
def _getCtx(self):
"""Get context"""
return self._ctx
def _doManyById(self, model, idList, field):
"""Does request for map for list of ids (one query). Then processes map and adds to object obtained objects.
@param idList: List
@param object: Model"""
xpDict = self.getMap(idList, userModel=model)
for idTarget in xpDict.keys():
addModel = xpDict[idTarget]
field.add(addModel)
def _doManyByName(self, model, nameList, field):
"""Does request for map for list of ids (one query). Then processes map and adds to object obtained objects.
@param idList: List
@param object: Model"""
for value in nameList:
fields = model.objects.get_or_create(name=value)
nameModel = fields[0]
field.add(nameModel)
def _resolveDbName(self):
"""Resolves the db name to use. Supports multiple masters and multiple slaves. Views use slaves. Actions use masters.
Data about view or action is obtained from the context."""
#TODO: Include application mapping with settings variable XIMPIA_DATABASE_APPS = {}
# Build master and slave lists
if self._ctx.dbName == None:
dbList = settings.DATABASES.keys()
dbListMaster = []
dbListSlave = []
for dbNameI in dbList:
if dbNameI == 'default' or dbNameI.find('master') == 0:
dbListMaster.append(dbNameI)
elif dbNameI.find('slave') == 0:
dbListSlave.append(dbNameI)
dbName = ''
if self._ctx.isView == True:
if len(dbListSlave) != 0:
dbName = random.choice(dbListSlave)
else:
dbName = 'default'
elif self._ctx.isAction == True:
dbName = random.choice(dbListMaster)
else:
dbName = self._ctx.dbName
logger.debug('CommonDAO :: dbName: %s view: %s' % (dbName, self._ctx.viewNameSource) )
return dbName
def _getSetting(self, settingName):
"""
Get setting model instance.
** Attributes **
* ``settingName``:String : Setting name
** Returns **
models.site.Setting model instance
"""
setting = Setting.objects.get(name__name=settingName).value
return setting
def get_map(self, id_list):
"""Get object map for a list of ids
@param idList:
@param bFull: boolean : Follows all foreign keys
@return: Dict[id]: object"""
dd = {}
if len(id_list) != 0:
dbObj = self._processRelated()
fields = dbObj.using(self._resolveDbName()).filter(id__in=id_list)
for obj in fields:
dd[obj.id] = obj
return dd
def get_by_id(self, field_id):
"""Get model object by id
@param field_id: Object id
@return: Model object"""
try:
dbObj = self._processRelated()
obj = dbObj.using(self._resolveDbName()).get(id=field_id)
except Exception as e:
raise XpMsgException(e, _('Error in get object by id ') + str(field_id) + _(' in model ') + str(self.model),
origin='data')
return obj
def check(self, **qs_args):
"""Checks if object exists
@param qs_args: query arguments
@return: Boolean"""
try:
dbObj = self.model.objects
exists = dbObj.using(self._resolveDbName()).filter(**qs_args).exists()
except Exception as e:
raise XpMsgException(e, _('Error in check object. Args: ') + str(qs_args) + _(' in model ') + str(self.model),
origin='data')
return exists
def get(self, **qs_args):
"""Get object
@param qs_args: query arguments
@return: Model Object"""
try:
logger.debug('dbName:' + self._resolveDbName())
dbObj = self._processRelated()
data = dbObj.using(self._resolveDbName()).get(**qs_args)
except Exception as e:
raise XpMsgException(e, _('Error in get object. Args: ') + str(qs_args) + _(' in model ') + str(self.model),
origin='data')
return data
def save(self):
"""Save database object, either insert or update"""
try:
self.model.save(using=self._resolveDbName())
except Exception as e:
raise XpMsgException(e, _('Error in save model ') + str(self.model),
origin='data')
return self.model
def search(self, *qs_tuple, **qs_args):
"""Search model using filter. Support for related objects as FK to model"""
#try:
dbObj = self._processRelated()
filterList = dbObj.using(self._resolveDbName()).filter(*qs_tuple, **qs_args)
"""except Exception as e:
raise XpMsgException(e, _('Error in search operation. qs_tuple: ') + str(qs_tuple) + ' . Args: ' + str(qs_args) + _(' in model ') + str(self.model),
origin='data')"""
return filterList
def create(self, **qs_args):
"""Create object
@param qs_args: Query arguments
@return: Data Object"""
try:
dbObj = self.model.objects
data = dbObj.using(self._resolveDbName()).create(**qs_args)
except Exception as e:
raise XpMsgException(e, _('Error in create object. Args: ') + str(qs_args) + _(' in model ') + str(self.model),
origin='data')
return data
def get_create(self, **qs_args):
"""Get or create object. If exists, gets the current value. If does not exist, creates data.
@param qs_args: Query arguments
@return: tuple (Data Object, bCreated)"""
try:
dbObj = self.model.objects
xpTuple = dbObj.using(self._resolveDbName()).get_or_create(**qs_args)
except Exception as e:
raise XpMsgException(e, _('Error in get or create object. Args: ') + str(qs_args) + _(' in model ') + str(self.model),
origin='data')
return xpTuple
def delete_by_id(self, pk, is_real=False):
"""Delete model object by id
@param id: Object id
@return: Model object"""
try:
if is_real == False:
xpObject = self.model.objects.using(self._resolveDbName()).get(id=pk)
xpObject.isDeleted = True
xpObject.save(using=self._resolveDbName())
else:
xpObject = self.model.objects_del.using(self._resolveDbName()).get(id=pk)
xpObject.delete()
except Exception as e:
raise XpMsgException(e, _('Error delete object by id ') + str(pk),
origin='data')
return xpObject
def delete_if_exists(self, is_real=False, **qs_args):
"""Delete row in case item exists.If does not exist, catches a DoesNotExist exception
@param qs_args: query arguments"""
try:
dbObj = self.model.objects
try:
if is_real == False:
dbObj = self.model.objects.using(self._resolveDbName()).get(**qs_args)
dbObj.isDeleted = True
dbObj.save(using=self._resolveDbName())
else:
dbObj = self.model.objects_del.using(self._resolveDbName()).get(**qs_args)
dbObj.delete()
except self.model.DoesNotExist:
pass
except Exception as e:
raise XpMsgException(e, _('Error delete object. Args ') + str(qs_args) + _(' in model ') + str(self.model),
origin='data')
def delete(self, is_real=False, **qs_args):
"""Delete row. In case does not exist, throws model.DoesNotExist
@param qs_args: query arguments"""
try:
if is_real == False:
dbObj = self.model.objects.using(self._resolveDbName()).get(**qs_args)
dbObj.isDeleted = True
dbObj.save(using=self._resolveDbName())
else:
dbObj = self.model.objects_del.using(self._resolveDbName()).get(**qs_args)
dbObj.delete()
#dbObj.using(self._resolveDbName()).get(**qs_args).delete()
except Exception as e:
raise XpMsgException(e, _('Error delete object. Args ') + str(qs_args) + _(' in model ') + str(self.model),
origin='data')
def filter_data(self, **args_dict):
"""Search a model table with ordering support and paging
@param xpNumberMatches: Number of matches
@param xpPage: Page
@param xpOrderBy: Tuple of fields to order by
@return: list : List of model objects"""
try:
iNumberMatches = self._numberMatches
if args_dict.has_key('xpNumberMatches'):
iNumberMatches = args_dict['xpNumberMatches']
page = 1
if args_dict.has_key('xpPage'):
page = int(args_dict['xpPage'])
iStart, iEnd = self._getPagingStartEnd(page, iNumberMatches)
orderByTuple = ()
if args_dict.has_key('xpOrderBy'):
orderByTuple = args_dict['xpOrderBy']
ArgsDict = self._cleanDict(args_dict)
dbObj = self._processRelated()
if len(orderByTuple) != 0:
dbObj = self.model.objects.order_by(*orderByTuple)
logger.debug( self._resolveDbName() )
xpList = dbObj.using(self._resolveDbName()).filter(**ArgsDict)[iStart:iEnd]
except Exception as e:
raise XpMsgException(e, _('Error in search table model ') + str(self.model),
origin='data')
return xpList
def get_all(self):
"""Get all rows from table
@param bFull: boolean : Follows all foreign keys
@return: list"""
try:
dbObj = self._processRelated()
xpList = dbObj.using(self._resolveDbName()).all()
except Exception as e:
raise XpMsgException(e, _('Error in getting all fields from ') + str(self.model),
origin='data')
return xpList
def search_fields(self, fields, page_start=1, page_end=None, number_results=None, order_by=[], **args):
"""
Search table with paging, ordering for set of fields. listMap allows mapping from keys to model fields.
** Attributes **
* ``fields``:tuple<str>
* ``page_start``:int [optional] [default:1]
* ``page_end``:int [optional]
* ``number_results``:int [optional] [default:from settings]
* ``order_by``:tuple<str> [optional] [default:[]]
** Returns **
Returns the query set with values(*fields).
xpList:ValuesQueryset
"""
try:
logger.debug('CommonDAO.searchFields :: pageStart: %s pageEnd: %s' % (page_start, page_end) )
logger.debug('CommonDAO.searchFields :: numberResults: %s disablePaging: %s' % (number_results, args['disable_paging']) )
if (args.has_key('disablePaging') and not args['disablePaging']) or not args.has_key('disablePaging'):
iStart = (page_start-1)*number_results
if page_end is None:
iEnd = iStart+number_results
else:
iEnd = iStart + number_results*(page_end-page_start+1)
logger.debug('CommonDAO.searchFields :: iStart: %s iEnd: %s' % (iStart, iEnd) )
dbObj = self._processRelated()
"""if len(orderBy) != 0:
dbObj = self.model.objects.order_by(*orderBy)"""
logger.debug( self._resolveDbName() )
logger.debug('CommonDAO.searchFields :: args: %s' % (args) )
if (args.has_key('disablePaging') and not args['disablePaging']) or not args.has_key('disablePaging'):
logger.debug('CommonDAO.searchField:: iStart: %s iEnd: %s' % (iStart, iEnd) )
if args.has_key('disable_paging'):
del args['disable_paging']
if len(order_by) == 0:
xpList = dbObj.using(self._resolveDbName()).filter(**args)[iStart:iEnd].values_list(*fields)
else:
xpList = dbObj.using(self._resolveDbName()).filter(**args).order_by(*order_by)[iStart:iEnd].values_list(*fields)
else:
logger.debug('CommonDAO.searchField:: Have no paging, we get all the data...')
if args.has_key('disable_paging'):
del args['disable_paging']
if len(order_by) == 0:
xpList = dbObj.using(self._resolveDbName()).filter(**args).values_list(*fields)
else:
xpList = dbObj.using(self._resolveDbName()).filter(**args).order_by(*order_by).values_list(*fields)
"""if len(orderBy) != 0:
xpList.orderBy(*orderBy)"""
return xpList
except Exception as e:
raise XpMsgException(e, _('Error in searching fields in model ') + str(self.model), origin='data')
ctx = property(_getCtx, None)
class CoreParameterDAO(CommonDAO):
model = CoreParam
class ApplicationDAO(CommonDAO):
model = Application
class ApplicationMediaDAO(CommonDAO):
model = ApplicationMedia
class ActionDAO(CommonDAO):
model = Action
class MenuDAO(CommonDAO):
model = Menu
class ViewMenuDAO(CommonDAO):
model = ViewMenu
class ServiceMenuDAO(CommonDAO):
model = ServiceMenu
class MenuParamDAO(CommonDAO):
model = MenuParam
class ViewDAO(CommonDAO):
model = View
class WorkflowDAO(CommonDAO):
model = Workflow
class WorkflowDataDAO(CommonDAO):
model = WorkflowData
class ParamDAO(CommonDAO):
model = Param
class WFParamValueDAO(CommonDAO):
model = WFParamValue
class WorkflowViewDAO(CommonDAO):
model = WorkflowView
class SearchIndexDAO(CommonDAO):
model = SearchIndex
class SearchIndexParamDAO(CommonDAO):
model = SearchIndexParam
class WordDAO(CommonDAO):
model = Word
class SearchIndexWordDAO(CommonDAO):
model = SearchIndexWord
class TemplateDAO(CommonDAO):
model = XpTemplate
class ViewTmplDAO(CommonDAO):
model = ViewTmpl
class ServiceMenuConditionDAO(CommonDAO):
model = ServiceMenuCondition
class ViewMenuConditionDAO(CommonDAO):
model = ViewMenuCondition
|
|
# -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug import BaseRequest, responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException, e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
from werkzeug._internal import HTTP_STATUS_CODES, _get_environ
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None):
Exception.__init__(self, '%d %s' % (self.code, self.name))
if description is not None:
self.description = description
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, description=None):
cls.__init__(self, description)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES[self.code]
def get_description(self, environ):
"""Get the description."""
environ = _get_environ(environ)
return self.description
def get_body(self, environ):
"""Get the HTML body."""
return (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>%(code)s %(name)s</title>\n'
'<h1>%(name)s</h1>\n'
'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
}
def get_headers(self, environ):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ):
"""Get a response object.
:param environ: the environ for the request.
:return: a :class:`BaseResponse` object or a subclass thereof.
"""
# lazily imported for various reasons. For one, we can use the exceptions
# with custom responses (testing exception instances against types) and
# so we don't ever have to import the wrappers, but also because there
# are circular dependencies when bootstrapping the module.
environ = _get_environ(environ)
from werkzeug.wrappers import BaseResponse
headers = self.get_headers(environ)
return BaseResponse(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
if 'description' in self.__dict__:
txt = self.description
else:
txt = self.name
return '%d: %s' % (self.code, txt)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
@property
def status_code(self):
# This property is added for more robust consistency between
# BaseResponse and HTTPException.
# DEPRECATED 1.1
# This code is deprecated and will be removed in
# versions >= 1.5 or 2.0
from kay.conf import settings
if settings.DEBUG:
import logging
logging.debug("Deprecation warning. The status_code property on "
"werkzeug HTTPException objects is not part of the "
"standard werkzeug distribution and may be removed in a "
"future version of kay.")
return self.code
class _ProxyException(HTTPException):
"""An HTTP exception that expands renders a WSGI application on error."""
def __init__(self, response):
Exception.__init__(self, 'proxy exception for %r' % response)
self.response = response
def get_response(self, environ):
return self.response
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'<p>The browser (or proxy) sent a request that this server could '
'not understand.</p>'
)
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'<p>The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.</p><p>In case you are allowed to request '
'the document, please check your user-id and password and try '
'again.</p>'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'<p>You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.</p>'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'<p>The requested URL was not found on the server.</p>'
'<p>If you entered the URL manually please check your spelling and '
'try again.</p>'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
def get_description(self, environ):
m = escape(environ.get('REQUEST_METHOD', 'GET'))
return '<p>The method %s is not allowed for the requested URL.</p>' % m
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'<p>The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.</p>'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'<p>The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.</p>'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'<p>The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'<p>A request with this method requires a valid <code>Content-'
'Length</code> header.</p>'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'<p>The precondition on the request for the URL failed positive '
'evaluation.</p>'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'<p>The data value transmitted exceeds the capacity limit.</p>'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'<p>The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.</p>'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'<p>The server does not support the media type transmitted in '
'the request.</p>'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'<p>The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.</p>'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'<p>The server does not support the action requested by the '
'browser.</p>'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'<p>The proxy server received an invalid response from an upstream '
'server.</p>'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'<p>The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.</p>'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in globals().iteritems():
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
#: raised by the request functions if they were unable to decode the
#: incoming data properly.
HTTPUnicodeError = BadRequest.wrap(UnicodeError, 'HTTPUnicodeError')
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, (int, long)):
raise _ProxyException(code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
|
|
import numpy as np
import pandas as pd
import pytest
from threading import Lock
from multiprocessing.pool import ThreadPool
import dask.array as da
import dask.dataframe as dd
from dask.dataframe._compat import tm
from dask.dataframe.io.io import _meta_from_array
from dask.delayed import Delayed, delayed
from dask.utils import tmpfile
from dask.dataframe.utils import assert_eq, is_categorical_dtype
####################
# Arrays and BColz #
####################
def test_meta_from_array():
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res[0].dtype == np.int64
assert res[1].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index([0, 1]))
x = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)
res = _meta_from_array(x, columns=["a", "b"])
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.float64
tm.assert_index_equal(res.columns, pd.Index(["a", "b"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b", "c"])
np.random.seed(42)
x = np.random.rand(201, 2)
x = dd.from_array(x, chunksize=50, columns=["a", "b"])
assert len(x.divisions) == 6 # Should be 5 partitions and the end
def test_meta_from_1darray():
x = np.array([1.0, 2.0, 3.0], dtype=np.float64)
res = _meta_from_array(x)
assert isinstance(res, pd.Series)
assert res.dtype == np.float64
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns="x")
assert isinstance(res, pd.Series)
assert res.name == "x"
assert res.dtype == np.object_
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns=["x"])
assert isinstance(res, pd.DataFrame)
assert res["x"].dtype == np.object_
tm.assert_index_equal(res.columns, pd.Index(["x"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b"])
def test_meta_from_recarray():
x = np.array(
[(i, i * 10) for i in range(10)], dtype=[("a", np.float64), ("b", np.int64)]
)
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(["a", "b"]))
res = _meta_from_array(x, columns=["b", "a"])
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(["b", "a"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b", "c"])
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list("abc"))
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index(["a", "b", "c"]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
with pytest.raises(ValueError):
dd.from_array(np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i * 10) for i in range(10)], dtype=[("a", "i4"), ("b", "i4")])
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert list(d.columns) == ["a", "b"]
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz_multiple_threads():
bcolz = pytest.importorskip("bcolz")
pool = ThreadPool(processes=5)
def check(i):
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert is_categorical_dtype(d.dtypes["a"])
assert list(d.x.compute(scheduler="sync")) == [1, 2, 3]
assert list(d.a.compute(scheduler="sync")) == ["a", "b", "a"]
d = dd.from_bcolz(t, chunksize=2, index="x")
L = list(d.index.compute(scheduler="sync"))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(
dd.from_bcolz(t, chunksize=2).dask
)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(
dd.from_bcolz(t, chunksize=3).dask
)
pool.map(check, range(5))
def test_from_bcolz():
bcolz = pytest.importorskip("bcolz")
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert is_categorical_dtype(d.dtypes["a"])
assert list(d.x.compute(scheduler="sync")) == [1, 2, 3]
assert list(d.a.compute(scheduler="sync")) == ["a", "b", "a"]
L = list(d.index.compute(scheduler="sync"))
assert L == [0, 1, 2]
d = dd.from_bcolz(t, chunksize=2, index="x")
L = list(d.index.compute(scheduler="sync"))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(
dd.from_bcolz(t, chunksize=2).dask
)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(
dd.from_bcolz(t, chunksize=3).dask
)
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4.0, "b"))
t.flush()
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(dsk)
def test_from_bcolz_no_lock():
bcolz = pytest.importorskip("bcolz")
locktype = type(Lock())
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"], chunklen=2
)
a = dd.from_bcolz(t, chunksize=2)
b = dd.from_bcolz(t, chunksize=2, lock=True)
c = dd.from_bcolz(t, chunksize=2, lock=False)
assert_eq(a, b)
assert_eq(a, c)
assert not any(isinstance(item, locktype) for v in c.dask.values() for item in v)
def test_from_bcolz_filename():
bcolz = pytest.importorskip("bcolz")
with tmpfile(".bcolz") as fn:
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]],
names=["x", "y", "a"],
rootdir=fn,
)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_from_bcolz_column_order():
bcolz = pytest.importorskip("bcolz")
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
df = dd.from_bcolz(t, chunksize=2)
assert list(df.loc[0].compute().columns) == ["x", "y", "a"]
def test_from_pandas_dataframe():
a = list("aaaaaaabbbbbbbbccccccc")
df = pd.DataFrame(
dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start="20120101", periods=len(a)),
)
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
ddf = dd.from_pandas(df, chunksize=8)
msg = "Exactly one of npartitions and chunksize must be specified."
with pytest.raises(ValueError) as err:
dd.from_pandas(df, npartitions=2, chunksize=2)
assert msg in str(err.value)
with pytest.raises((ValueError, AssertionError)) as err:
dd.from_pandas(df)
assert msg in str(err.value)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({"x": [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
a = dd.from_pandas(df, chunksize=i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
for sort in [True, False]:
for i in [0, 2]:
df = pd.DataFrame({"x": [0] * i})
ddf = dd.from_pandas(df, npartitions=5, sort=sort)
assert_eq(df, ddf)
s = pd.Series([0] * i, name="x", dtype=int)
ds = dd.from_pandas(s, npartitions=5, sort=sort)
assert_eq(s, ds)
@pytest.mark.parametrize("n", [1, 2, 4, 5])
def test_from_pandas_npartitions_is_accurate(n):
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")}, index=[10, 20, 30, 40, 50, 60]
)
assert dd.from_pandas(df, npartitions=n).npartitions <= n
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n), index=pd.date_range(start="20120101", periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
ds = dd.from_pandas(s, chunksize=8)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
def test_from_pandas_non_sorted():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
ddf = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
def test_from_pandas_single_row():
df = pd.DataFrame({"x": [1]}, index=[1])
ddf = dd.from_pandas(df, npartitions=1)
assert ddf.divisions == (1, 1)
assert_eq(ddf, df)
def test_from_pandas_with_datetime_index():
df = pd.DataFrame(
{
"Date": [
"2015-08-28",
"2015-08-27",
"2015-08-26",
"2015-08-25",
"2015-08-24",
"2015-08-21",
"2015-08-20",
"2015-08-19",
"2015-08-18",
],
"Val": list(range(9)),
}
)
df.Date = df.Date.astype("datetime64[ns]")
ddf = dd.from_pandas(df, 2)
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2)
assert_eq(df, ddf)
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = dd.from_dask_array(x, ["a", "b", "c"])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df.columns, pd.Index(["a", "b", "c"]))
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(scheduler="sync").values == x.compute(scheduler="sync")).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=["a", "b", "c"])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df2.columns, df.columns)
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = dd.from_dask_array(x, "a")
assert isinstance(ser, dd.Series)
assert ser.name == "a"
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(scheduler="sync").values == x.compute(scheduler="sync")).all()
ser = dd.from_dask_array(x)
assert isinstance(ser, dd.Series)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert isinstance(ser2, dd.Series)
assert_eq(ser, ser2)
@pytest.mark.parametrize("as_frame", [True, False])
def test_from_dask_array_index(as_frame):
s = dd.from_pandas(pd.Series(range(10), index=list("abcdefghij")), npartitions=3)
if as_frame:
s = s.to_frame()
result = dd.from_dask_array(s.values, index=s.index)
assert_eq(s, result)
def test_from_dask_array_index_raises():
x = da.random.uniform(size=(10,), chunks=(5,))
with pytest.raises(ValueError) as m:
dd.from_dask_array(x, index=pd.Index(np.arange(10)))
assert m.match("must be an instance")
a = dd.from_pandas(pd.Series(range(12)), npartitions=2)
b = dd.from_pandas(pd.Series(range(12)), npartitions=4)
with pytest.raises(ValueError) as m:
dd.from_dask_array(a.values, index=b.index)
assert m.match("index")
assert m.match("number")
assert m.match("blocks")
assert m.match("4 != 2")
def test_from_dask_array_compat_numpy_array():
x = da.ones((3, 3, 3), chunks=2)
with pytest.raises(ValueError):
dd.from_dask_array(x) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute()) # numpy
x = da.ones((10, 3), chunks=(3, 3))
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))
with pytest.raises(ValueError):
dd.from_dask_array(x, columns=["a"]) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute(), columns=["a"]) # numpy
d1 = dd.from_dask_array(x, columns=["a", "b", "c"]) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(["a", "b", "c"]))
d2 = dd.from_array(x.compute(), columns=["a", "b", "c"]) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(["a", "b", "c"]))
def test_from_dask_array_compat_numpy_array_1d():
x = da.ones(10, chunks=3)
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name is None
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name is None
d1 = dd.from_dask_array(x, columns="name") # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name == "name"
d2 = dd.from_array(x.compute(), columns="name") # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name == "name"
# passing list via columns results in DataFrame
d1 = dd.from_dask_array(x, columns=["name"]) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(["name"]))
d2 = dd.from_array(x.compute(), columns=["name"]) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(["name"]))
def test_from_dask_array_struct_dtype():
x = np.array([(1, "a"), (2, "b")], dtype=[("a", "i4"), ("b", "object")])
y = da.from_array(x, chunks=(1,))
df = dd.from_dask_array(y)
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
assert_eq(df, pd.DataFrame(x))
assert_eq(
dd.from_dask_array(y, columns=["b", "a"]), pd.DataFrame(x, columns=["b", "a"])
)
def test_from_dask_array_unknown_chunks():
# Series
dx = da.Array(
{("x", 0): np.arange(5), ("x", 1): np.arange(5, 11)},
"x",
((np.nan, np.nan),),
np.arange(1).dtype,
)
df = dd.from_dask_array(dx)
assert isinstance(df, dd.Series)
assert not df.known_divisions
assert_eq(df, pd.Series(np.arange(11)), check_index=False)
# DataFrame
dsk = {("x", 0, 0): np.random.random((2, 3)), ("x", 1, 0): np.random.random((5, 3))}
dx = da.Array(dsk, "x", ((np.nan, np.nan), (3,)), np.float64)
df = dd.from_dask_array(dx)
assert isinstance(df, dd.DataFrame)
assert not df.known_divisions
assert_eq(df, pd.DataFrame(dx.compute()), check_index=False)
# Unknown width
dx = da.Array(dsk, "x", ((np.nan, np.nan), (np.nan,)), np.float64)
with pytest.raises(ValueError):
df = dd.from_dask_array(dx)
def test_to_bag():
pytest.importorskip("dask.bag")
a = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute() == list(a.itertuples(False))
assert ddf.to_bag(True).compute() == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute() == list(a.x.iteritems())
assert ddf.x.to_bag().compute() == list(a.x)
def test_to_records():
pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(df.to_records(), ddf.to_records())
@pytest.mark.parametrize("lengths", [[2, 2], True])
def test_to_records_with_lengths(lengths):
pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
result = ddf.to_records(lengths=lengths)
assert_eq(df.to_records(), result)
assert isinstance(result, da.Array)
expected_chunks = ((2, 2),)
assert result.chunks == expected_chunks
def test_to_records_raises():
pytest.importorskip("dask.array")
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
with pytest.raises(ValueError):
ddf.to_records(lengths=[2, 2, 2])
pytest.fail("3 != 2")
with pytest.raises(ValueError):
ddf.to_records(lengths=5)
pytest.fail("Unexpected value")
def test_from_delayed():
df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list("abcd"))
parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]
dfs = [delayed(parts.__getitem__)(i) for i in range(4)]
meta = dfs[0].compute()
my_len = lambda x: pd.Series([len(x)])
for divisions in [None, [0, 1, 3, 6, 10]]:
ddf = dd.from_delayed(dfs, meta=meta, divisions=divisions)
assert_eq(ddf, df)
assert list(ddf.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
s = dd.from_delayed([d.a for d in dfs], meta=meta.a, divisions=divisions)
assert_eq(s, df.a)
assert list(s.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
meta2 = [(c, "f8") for c in df.columns]
assert_eq(dd.from_delayed(dfs, meta=meta2), df)
assert_eq(dd.from_delayed([d.a for d in dfs], meta=("a", "f8")), df.a)
with pytest.raises(ValueError):
dd.from_delayed(dfs, meta=meta, divisions=[0, 1, 3, 6])
with pytest.raises(ValueError) as e:
dd.from_delayed(dfs, meta=meta.a).compute()
assert str(e.value).startswith("Metadata mismatch found in `from_delayed`")
def test_from_delayed_misordered_meta():
df = pd.DataFrame(
columns=["(1)", "(2)", "date", "ent", "val"],
data=[range(i * 5, i * 5 + 5) for i in range(3)],
index=range(3),
)
# meta with different order for columns
misordered_meta = pd.DataFrame(
columns=["date", "ent", "val", "(1)", "(2)"], data=[range(5)]
)
ddf = dd.from_delayed([delayed(lambda: df)()], meta=misordered_meta)
with pytest.raises(ValueError) as info:
# produces dataframe which does not match meta
ddf.reset_index().compute(scheduler="sync")
msg = (
"The columns in the computed data do not match the columns in the"
" provided metadata"
)
assert msg in str(info.value)
def test_from_delayed_sorted():
a = pd.DataFrame({"x": [1, 2]}, index=[1, 10])
b = pd.DataFrame({"x": [4, 1]}, index=[100, 200])
A = dd.from_delayed([delayed(a), delayed(b)], divisions="sorted")
assert A.known_divisions
assert A.divisions == (1, 100, 200)
def test_to_delayed():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
# Frame
a, b = ddf.to_delayed()
assert isinstance(a, Delayed)
assert isinstance(b, Delayed)
assert_eq(a.compute(), df.iloc[:2])
# Scalar
x = ddf.x.sum()
dx = x.to_delayed()
assert isinstance(dx, Delayed)
assert_eq(dx.compute(), x)
def test_to_delayed_optimize_graph():
df = pd.DataFrame({"x": list(range(20))})
ddf = dd.from_pandas(df, npartitions=20)
ddf2 = (ddf + 1).loc[:2]
# Frame
d = ddf2.to_delayed()[0]
assert len(d.dask) < 20
d2 = ddf2.to_delayed(optimize_graph=False)[0]
assert sorted(d2.dask) == sorted(ddf2.dask)
assert_eq(ddf2.get_partition(0), d.compute())
assert_eq(ddf2.get_partition(0), d2.compute())
# Scalar
x = ddf2.x.sum()
dx = x.to_delayed()
dx2 = x.to_delayed(optimize_graph=False)
assert len(dx.dask) < len(dx2.dask)
assert_eq(dx.compute(), dx2.compute())
def test_from_dask_array_index_dtype():
x = da.ones((10,), chunks=(5,))
df = pd.DataFrame(
{
"date": pd.date_range("2019-01-01", periods=10, freq="1T"),
"val1": list(range(10)),
}
)
ddf = dd.from_pandas(df, npartitions=2).set_index("date")
ddf2 = dd.from_dask_array(x, index=ddf.index, columns="val2")
assert ddf.index.dtype == ddf2.index.dtype
assert ddf.index.name == ddf2.index.name
df = pd.DataFrame({"idx": np.arange(0, 1, 0.1), "val1": list(range(10))})
ddf = dd.from_pandas(df, npartitions=2).set_index("idx")
ddf2 = dd.from_dask_array(x, index=ddf.index, columns="val2")
assert ddf.index.dtype == ddf2.index.dtype
assert ddf.index.name == ddf2.index.name
|
|
#
# Copyright (c) 2009-2010 Gintautas Miliauskas
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Pickle field implementation for Django.
Modified for Evennia by Griatch.
"""
from ast import literal_eval
from copy import deepcopy
from base64 import b64encode, b64decode
from zlib import compress, decompress
#import six # this is actually a pypy component, not in default syslib
import django
from django.core.exceptions import ValidationError
from django.db import models
# django 1.5 introduces force_text instead of force_unicode
from django.forms import CharField, Textarea
from django.forms.util import flatatt
from django.utils.html import format_html
from src.utils.dbserialize import from_pickle, to_pickle
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
# python 3.x does not have cPickle module
try:
from cPickle import loads, dumps # cpython 2.x
except ImportError:
from pickle import loads, dumps # cpython 3.x, other interpreters
DEFAULT_PROTOCOL = 2
class PickledObject(str):
"""
A subclass of string so it can be told whether a string is a pickled
object or not (if the object is an instance of this class then it must
[well, should] be a pickled one).
Only really useful for passing pre-encoded values to ``default``
with ``dbsafe_encode``, not that doing so is necessary. If you
remove PickledObject and its references, you won't be able to pass
in pre-encoded values anymore, but you can always just pass in the
python objects themselves.
"""
class _ObjectWrapper(object):
"""
A class used to wrap object that have properties that may clash with the
ORM internals.
For example, objects with the `prepare_database_save` property such as
`django.db.Model` subclasses won't work under certain conditions and the
same apply for trying to retrieve any `callable` object.
"""
__slots__ = ('_obj',)
def __init__(self, obj):
self._obj = obj
def wrap_conflictual_object(obj):
if hasattr(obj, 'prepare_database_save') or callable(obj):
obj = _ObjectWrapper(obj)
return obj
def dbsafe_encode(value, compress_object=False, pickle_protocol=DEFAULT_PROTOCOL):
# We use deepcopy() here to avoid a problem with cPickle, where dumps
# can generate different character streams for same lookup value if
# they are referenced differently.
# The reason this is important is because we do all of our lookups as
# simple string matches, thus the character streams must be the same
# for the lookups to work properly. See tests.py for more information.
value = dumps(deepcopy(value), protocol=pickle_protocol)
if compress_object:
value = compress(value)
value = b64encode(value).decode() # decode bytes to str
return PickledObject(value)
def dbsafe_decode(value, compress_object=False):
value = value.encode() # encode str to bytes
value = b64decode(value)
if compress_object:
value = decompress(value)
return loads(value)
def _get_subfield_superclass():
# hardcore trick to support django < 1.3 - there was something wrong with
# inheritance and SubfieldBase before django 1.3
# see https://github.com/django/django/commit/222c73261650201f5ce99e8dd4b1ce0d30a69eb4
if django.VERSION < (1,3):
return models.Field
# mimic six.with_metaclass
meta = models.SubfieldBase
base = models.Field
return meta("NewBase", (base,), {})
#return six.with_metaclass(models.SubfieldBase, models.Field)
class PickledWidget(Textarea):
def render(self, name, value, attrs=None):
value = repr(value)
try:
literal_eval(value)
except ValueError:
return value
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{0}>\r\n{1}</textarea>',
flatatt(final_attrs),
force_text(value))
class PickledFormField(CharField):
widget = PickledWidget
default_error_messages = dict(CharField.default_error_messages)
default_error_messages['invalid'] = (
"This is not a Python Literal. You can store things like strings, "
"integers, or floats, but you must do it by typing them as you would "
"type them in the Python Interpreter. For instance, strings must be "
"surrounded by quote marks. We have converted it to a string for your "
"convenience. If it is acceptable, please hit save again.")
def __init__(self, *args, **kwargs):
# This needs to fall through to literal_eval.
kwargs['required'] = False
super(PickledFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if value == '':
# Field was left blank. Make this None.
value = 'None'
try:
return literal_eval(value)
except (ValueError, SyntaxError):
raise ValidationError(self.error_messages['invalid'])
class PickledObjectField(_get_subfield_superclass()):
"""
A field that will accept *any* python object and store it in the
database. PickledObjectField will optionally compress its values if
declared with the keyword argument ``compress=True``.
Does not actually encode and compress ``None`` objects (although you
can still do lookups using None). This way, it is still possible to
use the ``isnull`` lookup type correctly.
"""
__metaclass__ = models.SubfieldBase # for django < 1.3
def __init__(self, *args, **kwargs):
self.compress = kwargs.pop('compress', False)
self.protocol = kwargs.pop('protocol', DEFAULT_PROTOCOL)
super(PickledObjectField, self).__init__(*args, **kwargs)
def get_default(self):
"""
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
# If the field doesn't have a default, then we punt to models.Field.
return super(PickledObjectField, self).get_default()
def to_python(self, value):
"""
B64decode and unpickle the object, optionally decompressing it.
If an error is raised in de-pickling and we're sure the value is
a definite pickle, the error is allowed to propagate. If we
aren't sure if the value is a pickle or not, then we catch the
error and return the original value instead.
"""
if value is not None:
try:
value = dbsafe_decode(value, self.compress)
except:
# If the value is a definite pickle; and an error is raised in
# de-pickling it should be allowed to propogate.
if isinstance(value, PickledObject):
raise
else:
if isinstance(value, _ObjectWrapper):
return value._obj
return value
def formfield(self, **kwargs):
return PickledFormField(**kwargs)
def pre_save(self, model_instance, add):
value = super(PickledObjectField, self).pre_save(model_instance, add)
return wrap_conflictual_object(value)
def get_db_prep_value(self, value, connection=None, prepared=False):
"""
Pickle and b64encode the object, optionally compressing it.
The pickling protocol is specified explicitly (by default 2),
rather than as -1 or HIGHEST_PROTOCOL, because we don't want the
protocol to change over time. If it did, ``exact`` and ``in``
lookups would likely fail, since pickle would now be generating
a different string.
"""
if value is not None and not isinstance(value, PickledObject):
# We call force_text here explicitly, so that the encoded string
# isn't rejected by the postgresql_psycopg2 backend. Alternatively,
# we could have just registered PickledObject with the psycopg
# marshaller (telling it to store it like it would a string), but
# since both of these methods result in the same value being stored,
# doing things this way is much easier.
value = force_text(dbsafe_encode(value, self.compress, self.protocol))
return value
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def get_internal_type(self):
return 'TextField'
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
if lookup_type not in ['exact', 'in', 'isnull']:
raise TypeError('Lookup type %s is not supported.' % lookup_type)
# The Field model already calls get_db_prep_value before doing the
# actual lookup, so all we need to do is limit the lookup types.
return super(PickledObjectField, self).get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
# South support; see http://south.aeracode.org/docs/tutorial/part4.html#simple-inheritance
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^src\.utils\.picklefield\.PickledObjectField"])
|
|
"""
This module provides two classes for managing LayerArtists with Qt.
The LayerArtistModel implements a QtModel to interface with a list of
LayerManagers.
The LayerArtistView is a list widget that displays
these layers, and provides GUI access to the model
"""
# pylint: disable=I0011, W0613, R0913, R0904, W0611
from __future__ import absolute_import, division, print_function
from ..external.qt.QtGui import (QColor,
QListView, QAbstractItemView, QAction,
QPalette, QKeySequence)
from ..external.qt.QtCore import (Qt, QAbstractListModel, QModelIndex,
QSize, QTimer)
from .qtutil import (layer_artist_icon, nonpartial, PythonListModel)
from .mime import PyMimeData, LAYERS_MIME_TYPE
from ..clients.layer_artist import LayerArtistBase, LayerArtistContainer
from .widgets.style_dialog import StyleDialog
class LayerArtistModel(PythonListModel):
"""A Qt model to manage a list of LayerArtists. Multiple
views into this model should stay in sync, thanks to Qt.
To properly maintain sync, any client that uses
this list of LayerArtists should always edit the
list in-place (so that the list managed by this model
and the client are the same object)
"""
def __init__(self, artists, parent=None):
super(LayerArtistModel, self).__init__(artists, parent)
self.artists = artists
def data(self, index, role):
"""Retrieve data at each index"""
if not index.isValid():
return None
if role == Qt.DecorationRole:
art = self.artists[index.row()]
result = layer_artist_icon(art)
return result
if role == Qt.CheckStateRole:
art = self.artists[index.row()]
result = Qt.Checked if art.visible else Qt.Unchecked
return result
if role == Qt.ToolTipRole:
art = self.artists[index.row()]
if not art.enabled:
return art.disabled_message
return super(LayerArtistModel, self).data(index, role)
def flags(self, index):
result = super(LayerArtistModel, self).flags(index)
if index.isValid():
result = (result | Qt.ItemIsEditable | Qt.ItemIsDragEnabled |
Qt.ItemIsUserCheckable)
else: # only drop between rows, where index isn't valid
result = (result | Qt.ItemIsDropEnabled)
return result
def setData(self, index, value, role):
if not index.isValid():
return False
if role == Qt.EditRole:
self.change_label(index.row(), str(value))
if role == Qt.CheckStateRole:
vis = value == Qt.Checked
self.artists[index.row()].visible = vis
self.artists[index.row()].redraw()
self.dataChanged.emit(index, index)
return True
def _remove_row(self, row):
art = self.artists.pop(row)
art.clear()
art.redraw()
def mimeTypes(self):
return [PyMimeData.MIME_TYPE, LAYERS_MIME_TYPE]
def mimeData(self, indexes):
arts = [self.artists[index.row()] for index in indexes]
layers = [a.layer for a in arts]
if len(indexes) == 0:
return 0
return PyMimeData(arts, **{LAYERS_MIME_TYPE: layers})
def supportedDropActions(self):
return Qt.MoveAction
def dropMimeData(self, data, action, row, column, index):
data = data.data(PyMimeData.MIME_TYPE)
# list of a single artist. Move
if isinstance(data, list) and len(data) == 1 and \
isinstance(data[0], LayerArtistBase) and \
data[0] in self.artists:
self.move_artist(data[0], row)
return True
return False
def move_artist(self, artist, row):
"""Move an artist before the entry in row
Row could be the end of the list (-> put it at the end)
"""
if len(self.artists) < 2: # can't rearrange lenght 0 or 1 list
return
try:
loc = self.artists.index(artist)
except ValueError:
return
dest = row
if not self.beginMoveRows(QModelIndex(), loc, loc,
QModelIndex(), dest):
return
if dest >= loc:
row -= 1
self.artists.pop(loc)
self.artists.insert(row, artist)
self._update_zorder()
self.endMoveRows()
def _update_zorder(self):
"""Redistribute zorders to match location in the list"""
zs = [m.zorder for m in self.artists]
zs = reversed(sorted(zs))
for z, m in zip(zs, self.artists):
m.zorder = z
if len(self.artists) > 0:
self.artists[0].redraw()
def row_label(self, row):
""" The textual label for the row"""
layer = self.artists[row].layer
if hasattr(layer, 'verbose_label'):
return layer.verbose_label
return layer.label
def change_label(self, row, label):
""" Reassign the labeel for whatever layer the artist manages"""
try:
art = self.artists[row]
art.layer.label = label
except IndexError:
pass
def add_artist(self, row, artist):
"""Add a new artist"""
self.beginInsertRows(QModelIndex(), row, row)
self.artists.insert(row, artist)
self.endInsertRows()
self.rowsInserted.emit(self.index(row), row, row)
def row_artist(self, row):
return self.artists[row]
class LayerArtistView(QListView):
"""A list view into an artist model. The zorder
of each artist can be shuffled by dragging and dropping
items. Right-clicking brings up a menu to edit style or delete"""
def __init__(self, parent=None):
super(LayerArtistView, self).__init__(parent)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDragDropMode(QAbstractItemView.InternalMove)
self.setIconSize(QSize(15, 15))
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.setEditTriggers(self.NoEditTriggers)
self._set_palette()
self._actions = {}
self._create_actions()
self._timer = QTimer(self)
self._timer.timeout.connect(self.viewport().update)
self._timer.start(1000)
def selectionChanged(self, selected, deselected):
super(LayerArtistView, self).selectionChanged(selected, deselected)
self._update_actions()
def current_artist(self):
model = self.selectionModel()
if model is None:
return
rows = model.selectedRows()
if len(rows) != 1:
return
return self.model().row_artist(rows[0].row())
def single_selection(self):
return self.current_artist() is not None
def current_row(self):
model = self.selectionModel()
if model is None:
return
rows = model.selectedRows()
if len(rows) != 1:
return
return rows[0].row()
def _set_palette(self):
p = self.palette()
c = QColor(240, 240, 240)
p.setColor(QPalette.Highlight, c)
p.setColor(QPalette.HighlightedText, QColor(Qt.black))
self.setPalette(p)
def _update_actions(self):
pass
def _bottom_left_of_current_index(self):
idx = self.currentIndex()
if not idx.isValid():
return
rect = self.visualRect(idx)
pos = self.mapToGlobal(rect.bottomLeft())
pos.setY(pos.y() + 1)
return pos
def _edit_style(self):
pos = self._bottom_left_of_current_index()
if pos is None:
return
item = self.current_artist().layer
StyleDialog.dropdown_editor(item, pos, edit_label=False)
def _create_actions(self):
act = QAction('Edit style', self)
act.triggered.connect(nonpartial(self._edit_style))
self.addAction(act)
act = QAction('Remove', self)
act.setShortcut(QKeySequence(Qt.Key_Backspace))
act.setShortcutContext(Qt.WidgetShortcut)
act.triggered.connect(
lambda *args: self.model().removeRow(self.current_row()))
self.addAction(act)
class QtLayerArtistContainer(LayerArtistContainer):
"""A subclass of LayerArtistContainer that dispatches to a
LayerArtistModel"""
def __init__(self):
super(QtLayerArtistContainer, self).__init__()
self.model = LayerArtistModel(self.artists)
self.model.rowsInserted.connect(self._notify)
self.model.rowsRemoved.connect(self._notify)
self.model.modelReset.connect(self._notify)
def append(self, artist):
self._check_duplicate(artist)
self.model.add_artist(0, artist)
artist.zorder = max(a.zorder for a in self.artists) + 1
assert self.artists[0] is artist
self._notify()
def remove(self, artist):
try:
index = self.artists.index(artist)
except ValueError:
return
self.model.removeRow(index)
assert artist not in self.artists
self._notify()
def __nonzero__(self):
return True
__bool__ = __nonzero__
|
|
import datetime
import warnings
from django.conf import settings
import haystack
from haystack.backends import BaseEngine
from haystack.constants import DEFAULT_OPERATOR, DJANGO_CT
from haystack.exceptions import MissingDependency
from haystack.utils import get_identifier, get_model_ct
# Backport support
from .constants import ALL_FIELD, FUZZINESS
from .elasticsearch_backend import (ElasticsearchSearchBackend,
ElasticsearchSearchQuery)
try:
import elasticsearch
if not ((5, 0, 0) <= elasticsearch.__version__ < (6, 0, 0)):
raise ImportError
from elasticsearch.helpers import bulk, scan
except ImportError:
raise MissingDependency(
"The 'elasticsearch5' backend requires the \
installation of 'elasticsearch>=5.0.0,<6.0.0'. \
Please refer to the documentation."
)
class Elasticsearch5SearchBackend(ElasticsearchSearchBackend):
def __init__(self, connection_alias, **connection_options):
super(Elasticsearch5SearchBackend, self).__init__(
connection_alias, **connection_options)
self.content_field_name = None
def clear(self, models=None, commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
:param models: List or tuple of models to clear.
:param commit: Not used.
"""
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
self.conn.indices.delete(index=self.index_name, ignore=404)
self.setup_complete = False
self.existing_mapping = {}
self.content_field_name = None
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
# Delete using scroll API
query = {
"query": {"query_string": {"query": " OR ".join(models_to_delete)}}
}
generator = scan(
self.conn,
query=query,
index=self.index_name,
**self._get_doc_type_option()
)
actions = (
{"_op_type": "delete", "_id": doc["_id"]} for doc in generator
)
bulk(
self.conn,
actions=actions,
index=self.index_name,
**self._get_doc_type_option()
)
self.conn.indices.refresh(index=self.index_name)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
if models is not None:
self.log.error(
"Failed to clear Elasticsearch index of models '%s': %s",
",".join(models_to_delete),
e,
exc_info=True,
)
else:
self.log.error(
"Failed to clear Elasticsearch index: %s", e, exc_info=True
)
def build_search_kwargs(
self,
query_string,
sort_by=None,
start_offset=0,
end_offset=None,
fields="",
highlight=False,
facets=None,
date_facets=None,
query_facets=None,
narrow_queries=None,
spelling_query=None,
within=None,
dwithin=None,
distance_point=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**extra_kwargs
):
index = haystack.connections[self.connection_alias].get_unified_index()
content_field = index.document_field
if query_string == "*:*":
kwargs = {"query": {"match_all": {}}}
else:
kwargs = {
"query": {
"query_string": {
"default_field": content_field,
"default_operator": DEFAULT_OPERATOR,
"query": query_string,
"analyze_wildcard": True,
"auto_generate_phrase_queries": True,
"fuzziness": FUZZINESS,
}
}
}
filters = []
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs["stored_fields"] = fields
if sort_by is not None:
order_list = []
for field, direction in sort_by:
if field == "distance" and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point["point"].coords
sort_kwargs = {
"_geo_distance": {
distance_point["field"]: [lng, lat],
"order": direction,
"unit": "km",
}
}
else:
if field == "distance":
warnings.warn(
"In order to sort by distance, you must call the '.distance(...)' method."
)
# Regular sorting.
sort_kwargs = {field: {"order": direction}}
order_list.append(sort_kwargs)
kwargs["sort"] = order_list
# From/size offsets don't seem to work right in Elasticsearch's DSL. :/
# if start_offset is not None:
# kwargs['from'] = start_offset
# if end_offset is not None:
# kwargs['size'] = end_offset - start_offset
if highlight:
# `highlight` can either be True or a dictionary containing custom parameters
# which will be passed to the backend and may override our default settings:
kwargs["highlight"] = {"fields": {content_field: {}}}
if isinstance(highlight, dict):
kwargs["highlight"].update(highlight)
if self.include_spelling:
kwargs["suggest"] = {
"suggest": {
"text": spelling_query or query_string,
"term": {
# Using content_field here will result in suggestions of stemmed words.
"field": ALL_FIELD,
},
}
}
if narrow_queries is None:
narrow_queries = set()
if facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, extra_options in facets.items():
facet_options = {
"meta": {"_type": "terms"},
"terms": {"field": index.get_facet_fieldname(facet_fieldname)},
}
if "order" in extra_options:
facet_options["meta"]["order"] = extra_options.pop("order")
# Special cases for options applied at the facet level (not the terms level).
if extra_options.pop("global_scope", False):
# Renamed "global_scope" since "global" is a python keyword.
facet_options["global"] = True
if "facet_filter" in extra_options:
facet_options["facet_filter"] = extra_options.pop("facet_filter")
facet_options["terms"].update(extra_options)
kwargs["aggs"][facet_fieldname] = facet_options
if date_facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, value in date_facets.items():
# Need to detect on gap_by & only add amount if it's more than one.
interval = value.get("gap_by").lower()
# Need to detect on amount (can't be applied on months or years).
if value.get("gap_amount", 1) != 1 and interval not in (
"month",
"year",
):
# Just the first character is valid for use.
interval = "%s%s" % (value["gap_amount"], interval[:1])
kwargs["aggs"][facet_fieldname] = {
"meta": {"_type": "date_histogram"},
"date_histogram": {"field": facet_fieldname, "interval": interval},
"aggs": {
facet_fieldname: {
"date_range": {
"field": facet_fieldname,
"ranges": [
{
"from": self._from_python(
value.get("start_date")
),
"to": self._from_python(value.get("end_date")),
}
],
}
}
},
}
if query_facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, value in query_facets:
kwargs["aggs"][facet_fieldname] = {
"meta": {"_type": "query"},
"filter": {"query_string": {"query": value}},
}
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
filters.append({"terms": {DJANGO_CT: model_choices}})
for q in narrow_queries:
filters.append({"query_string": {"query": q}})
if within is not None:
filters.append(self._build_search_query_within(within))
if dwithin is not None:
filters.append(self._build_search_query_dwithin(dwithin))
# if we want to filter, change the query type to bool
if filters:
kwargs["query"] = {"bool": {"must": kwargs.pop("query")}}
if len(filters) == 1:
kwargs["query"]["bool"]["filter"] = filters[0]
else:
kwargs["query"]["bool"]["filter"] = {"bool": {"must": filters}}
if extra_kwargs:
kwargs.update(extra_kwargs)
return kwargs
def _build_search_query_dwithin(self, dwithin):
lng, lat = dwithin["point"].coords
distance = "%(dist).6f%(unit)s" % {"dist": dwithin["distance"].km, "unit": "km"}
return {
"geo_distance": {
"distance": distance,
dwithin["field"]: {"lat": lat, "lon": lng},
}
}
def _build_search_query_within(self, within):
from haystack.utils.geo import generate_bounding_box
((south, west), (north, east)) = generate_bounding_box(
within["point_1"], within["point_2"]
)
return {
"geo_bounding_box": {
within["field"]: {
"top_left": {"lat": north, "lon": west},
"bottom_right": {"lat": south, "lon": east},
}
}
}
def more_like_this(
self,
model_instance,
additional_query_string=None,
start_offset=0,
end_offset=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
from haystack import connections
if not self.setup_complete:
self.setup()
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = (
connections[self.connection_alias]
.get_unified_index()
.get_index(model_klass)
)
field_name = index.get_content_field()
params = {}
if start_offset is not None:
params["from_"] = start_offset
if end_offset is not None:
params["size"] = end_offset - start_offset
doc_id = get_identifier(model_instance)
try:
# More like this Query
# https://www.elastic.co/guide/en/elasticsearch/reference/2.2/query-dsl-mlt-query.html
mlt_query = {
"query": {
"more_like_this": {
"fields": [field_name],
"like": [{"_id": doc_id}],
}
}
}
narrow_queries = []
if additional_query_string and additional_query_string != "*:*":
additional_filter = {"query_string": {"query": additional_query_string}}
narrow_queries.append(additional_filter)
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
model_filter = {"terms": {DJANGO_CT: model_choices}}
narrow_queries.append(model_filter)
if len(narrow_queries) > 0:
mlt_query = {
"query": {
"bool": {
"must": mlt_query["query"],
"filter": {"bool": {"must": list(narrow_queries)}},
}
}
}
search_kwargs = dict(self._get_doc_type_option())
search_kwargs.update(params)
raw_results = self.conn.search(
body=mlt_query,
index=self.index_name,
_source=True,
**search_kwargs
)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to fetch More Like This from Elasticsearch for document '%s': %s",
doc_id,
e,
exc_info=True,
)
raw_results = {}
return self._process_results(raw_results, result_class=result_class)
def _process_results(
self,
raw_results,
highlight=False,
result_class=None,
distance_point=None,
geo_sort=False,
):
results = super(Elasticsearch5SearchBackend, self)._process_results(
raw_results, highlight, result_class, distance_point, geo_sort
)
facets = {}
if "aggregations" in raw_results:
facets = {"fields": {}, "dates": {}, "queries": {}}
for facet_fieldname, facet_info in raw_results["aggregations"].items():
facet_type = facet_info["meta"]["_type"]
if facet_type == "terms":
facets["fields"][facet_fieldname] = [
(individual["key"], individual["doc_count"])
for individual in facet_info["buckets"]
]
if "order" in facet_info["meta"]:
if facet_info["meta"]["order"] == "reverse_count":
srt = sorted(
facets["fields"][facet_fieldname], key=lambda x: x[1]
)
facets["fields"][facet_fieldname] = srt
elif facet_type == "date_histogram":
# Elasticsearch provides UTC timestamps with an extra three
# decimals of precision, which datetime barfs on.
facets["dates"][facet_fieldname] = [
(
datetime.datetime.utcfromtimestamp(
individual["key"] / 1000
),
individual["doc_count"],
)
for individual in facet_info["buckets"]
]
elif facet_type == "query":
facets["queries"][facet_fieldname] = facet_info["doc_count"]
results["facets"] = facets
return results
class Elasticsearch5SearchQuery(ElasticsearchSearchQuery):
def add_field_facet(self, field, **options):
"""Adds a regular facet on a field."""
# to be renamed to the facet fieldname by build_search_kwargs later
self.facets[field] = options.copy()
class Elasticsearch5SearchEngine(BaseEngine):
backend = Elasticsearch5SearchBackend
query = Elasticsearch5SearchQuery
|
|
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \
PasswordResetForm
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.core.validators import validate_email
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.http import HttpRequest
from jinja2 import Markup as mark_safe
from zerver.lib.actions import do_change_password, email_not_system_bot, \
validate_email_for_realm
from zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain
from zerver.lib.request import JsonableError
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.subdomains import get_subdomain, user_matches_subdomain, is_root_domain_available
from zerver.lib.users import check_full_name
from zerver.models import Realm, get_user_by_delivery_email, UserProfile, get_realm, \
email_to_domain, \
email_allowed_for_realm, DisposableEmailError, DomainNotAllowedForRealmError, \
EmailContainsPlusError
from zproject.backends import email_auth_enabled, email_belongs_to_ldap
import logging
import re
import DNS
from typing import Any, Callable, List, Optional, Dict
from two_factor.forms import AuthenticationTokenForm as TwoFactorAuthenticationTokenForm
from two_factor.utils import totp_digits
MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \
u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \
u'If you want to sign up an alias for Zulip, ' + \
u'<a href="mailto:[email protected]">contact us</a>.'
WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \
"organization associated with this subdomain. " + \
"Please contact %s with any questions!" % (FromAddress.SUPPORT,)
def email_is_not_mit_mailing_list(email: str) -> None:
"""Prevent MIT mailing lists from signing up for Zulip"""
if "@mit.edu" in email:
username = email.rsplit("@", 1)[0]
# Check whether the user exists and can get mail.
try:
DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT)
except DNS.Base.ServerError as e:
if e.rcode == DNS.Status.NXDOMAIN:
raise ValidationError(mark_safe(MIT_VALIDATION_ERROR))
else:
raise AssertionError("Unexpected DNS error")
def check_subdomain_available(subdomain: str, from_management_command: bool=False) -> None:
error_strings = {
'too short': _("Subdomain needs to have length 3 or greater."),
'extremal dash': _("Subdomain cannot start or end with a '-'."),
'bad character': _("Subdomain can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Subdomain unavailable. Please choose a different one.")}
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
if is_root_domain_available():
return
raise ValidationError(error_strings['unavailable'])
if subdomain[0] == '-' or subdomain[-1] == '-':
raise ValidationError(error_strings['extremal dash'])
if not re.match('^[a-z0-9-]*$', subdomain):
raise ValidationError(error_strings['bad character'])
if from_management_command:
return
if len(subdomain) < 3:
raise ValidationError(error_strings['too short'])
if is_reserved_subdomain(subdomain) or \
get_realm(subdomain) is not None:
raise ValidationError(error_strings['unavailable'])
class RegistrationForm(forms.Form):
MAX_PASSWORD_LENGTH = 100
full_name = forms.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# The required-ness of the password field gets overridden if it isn't
# actually required for a realm
password = forms.CharField(widget=forms.PasswordInput, max_length=MAX_PASSWORD_LENGTH)
realm_subdomain = forms.CharField(max_length=Realm.MAX_REALM_SUBDOMAIN_LENGTH, required=False)
def __init__(self, *args: Any, **kwargs: Any) -> None:
# Since the superclass doesn't except random extra kwargs, we
# remove it from the kwargs dict before initializing.
self.realm_creation = kwargs['realm_creation']
del kwargs['realm_creation']
super().__init__(*args, **kwargs)
if settings.TERMS_OF_SERVICE:
self.fields['terms'] = forms.BooleanField(required=True)
self.fields['realm_name'] = forms.CharField(
max_length=Realm.MAX_REALM_NAME_LENGTH,
required=self.realm_creation)
def clean_full_name(self) -> str:
try:
return check_full_name(self.cleaned_data['full_name'])
except JsonableError as e:
raise ValidationError(e.msg)
def clean_realm_subdomain(self) -> str:
if not self.realm_creation:
# This field is only used if realm_creation
return ""
subdomain = self.cleaned_data['realm_subdomain']
if 'realm_in_root_domain' in self.data:
subdomain = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
check_subdomain_available(subdomain)
return subdomain
class ToSForm(forms.Form):
terms = forms.BooleanField(required=True)
class HomepageForm(forms.Form):
email = forms.EmailField()
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.realm = kwargs.pop('realm', None)
self.from_multiuse_invite = kwargs.pop('from_multiuse_invite', False)
super().__init__(*args, **kwargs)
def clean_email(self) -> str:
"""Returns the email if and only if the user's email address is
allowed to join the realm they are trying to join."""
email = self.cleaned_data['email']
# Otherwise, the user is trying to join a specific realm.
realm = self.realm
from_multiuse_invite = self.from_multiuse_invite
if realm is None:
raise ValidationError(_("The organization you are trying to "
"join using {email} does not "
"exist.").format(email=email))
if not from_multiuse_invite and realm.invite_required:
raise ValidationError(_("Please request an invite for {email} "
"from the organization "
"administrator.").format(email=email))
try:
email_allowed_for_realm(email, realm)
except DomainNotAllowedForRealmError:
raise ValidationError(
_("Your email address, {email}, is not in one of the domains "
"that are allowed to register for accounts in this organization.").format(
string_id=realm.string_id, email=email))
except DisposableEmailError:
raise ValidationError(_("Please use your real email address."))
except EmailContainsPlusError:
raise ValidationError(_("Email addresses containing + are not allowed in this organization."))
validate_email_for_realm(realm, email)
if realm.is_zephyr_mirror_realm:
email_is_not_mit_mailing_list(email)
return email
def email_is_not_disposable(email: str) -> None:
if is_disposable_domain(email_to_domain(email)):
raise ValidationError(_("Please use your real email address."))
class RealmCreationForm(forms.Form):
# This form determines whether users can create a new realm.
email = forms.EmailField(validators=[email_not_system_bot,
email_is_not_disposable])
class LoggingSetPasswordForm(SetPasswordForm):
def save(self, commit: bool=True) -> UserProfile:
do_change_password(self.user, self.cleaned_data['new_password1'],
commit=commit)
return self.user
def generate_password_reset_url(user_profile: UserProfile,
token_generator: PasswordResetTokenGenerator) -> str:
token = token_generator.make_token(user_profile)
uid = urlsafe_base64_encode(force_bytes(user_profile.id)).decode('ascii')
endpoint = reverse('django.contrib.auth.views.password_reset_confirm',
kwargs=dict(uidb64=uid, token=token))
return "{}{}".format(user_profile.realm.uri, endpoint)
class ZulipPasswordResetForm(PasswordResetForm):
def save(self,
domain_override: Optional[bool]=None,
subject_template_name: str='registration/password_reset_subject.txt',
email_template_name: str='registration/password_reset_email.html',
use_https: bool=False,
token_generator: PasswordResetTokenGenerator=default_token_generator,
from_email: Optional[str]=None,
request: HttpRequest=None,
html_email_template_name: Optional[str]=None,
extra_email_context: Optional[Dict[str, Any]]=None
) -> None:
"""
If the email address has an account in the target realm,
generates a one-use only link for resetting password and sends
to the user.
We send a different email if an associated account does not exist in the
database, or an account does exist, but not in the realm.
Note: We ignore protocol and the various email template arguments (those
are an artifact of using Django's password reset framework).
"""
email = self.cleaned_data["email"]
realm = get_realm(get_subdomain(request))
if not email_auth_enabled(realm):
logging.info("Password reset attempted for %s even though password auth is disabled." % (email,))
return
if email_belongs_to_ldap(realm, email):
# TODO: Ideally, we'd provide a user-facing error here
# about the fact that they aren't allowed to have a
# password in the Zulip server and should change it in LDAP.
logging.info("Password reset not allowed for user in LDAP domain")
return
if realm.deactivated:
logging.info("Realm is deactivated")
return
user = None # type: Optional[UserProfile]
try:
user = get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
pass
context = {
'email': email,
'realm_uri': realm.uri,
}
if user is not None and not user.is_active:
context['user_deactivated'] = True
user = None
if user is not None:
context['active_account_in_realm'] = True
context['reset_url'] = generate_password_reset_url(user, token_generator)
send_email('zerver/emails/password_reset', to_user_ids=[user.id],
from_name="Zulip Account Security",
from_address=FromAddress.tokenized_no_reply_address(),
context=context)
else:
context['active_account_in_realm'] = False
active_accounts_in_other_realms = UserProfile.objects.filter(
delivery_email__iexact=email, is_active=True)
if active_accounts_in_other_realms:
context['active_accounts_in_other_realms'] = active_accounts_in_other_realms
send_email('zerver/emails/password_reset', to_emails=[email],
from_name="Zulip Account Security",
from_address=FromAddress.tokenized_no_reply_address(),
context=context)
class CreateUserForm(forms.Form):
full_name = forms.CharField(max_length=100)
email = forms.EmailField()
class OurAuthenticationForm(AuthenticationForm):
def clean(self) -> Dict[str, Any]:
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
subdomain = get_subdomain(self.request)
realm = get_realm(subdomain)
return_data = {} # type: Dict[str, Any]
self.user_cache = authenticate(self.request, username=username, password=password,
realm=realm, return_data=return_data)
if return_data.get("inactive_realm"):
raise AssertionError("Programming error: inactive realm in authentication form")
if return_data.get("inactive_user") and not return_data.get("is_mirror_dummy"):
# We exclude mirror dummy accounts here. They should be treated as the
# user never having had an account, so we let them fall through to the
# normal invalid_login case below.
error_msg = (
u"Your account is no longer active. "
u"Please contact your organization administrator to reactivate it.")
raise ValidationError(mark_safe(error_msg))
if return_data.get("invalid_subdomain"):
logging.warning("User %s attempted to password login to wrong subdomain %s" %
(username, subdomain))
raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def add_prefix(self, field_name: str) -> str:
"""Disable prefix, since Zulip doesn't use this Django forms feature
(and django-two-factor does use it), and we'd like both to be
happy with this form.
"""
return field_name
class AuthenticationTokenForm(TwoFactorAuthenticationTokenForm):
"""
We add this form to update the widget of otp_token. The default
widget is an input element whose type is a number, which doesn't
stylistically match our theme.
"""
otp_token = forms.IntegerField(label=_("Token"), min_value=1,
max_value=int('9' * totp_digits()),
widget=forms.TextInput)
class MultiEmailField(forms.Field):
def to_python(self, emails: str) -> List[str]:
"""Normalize data to a list of strings."""
if not emails:
return []
return [email.strip() for email in emails.split(',')]
def validate(self, emails: List[str]) -> None:
"""Check if value consists only of valid emails."""
super().validate(emails)
for email in emails:
validate_email(email)
class FindMyTeamForm(forms.Form):
emails = MultiEmailField(
help_text=_("Add up to 10 comma-separated email addresses."))
def clean_emails(self) -> List[str]:
emails = self.cleaned_data['emails']
if len(emails) > 10:
raise forms.ValidationError(_("Please enter at most 10 emails."))
return emails
class RealmRedirectForm(forms.Form):
subdomain = forms.CharField(max_length=Realm.MAX_REALM_SUBDOMAIN_LENGTH, required=True)
def clean_subdomain(self) -> str:
subdomain = self.cleaned_data['subdomain']
if get_realm(subdomain) is None:
raise ValidationError(_("We couldn't find that Zulip organization."))
return subdomain
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batching dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.util.tf_export import tf_export
def batch_window(dataset):
"""Batches a window of tensors.
Args:
dataset: the input dataset.
Returns:
A `Tensor` representing the batch of the entire input dataset.
"""
if isinstance(dataset.output_classes, tuple):
raise TypeError("Input dataset expected to have a single component")
if dataset.output_classes is ops.Tensor:
return _batch_dense_window(dataset)
elif dataset.output_classes is sparse_tensor.SparseTensor:
return _batch_sparse_window(dataset)
else:
raise TypeError("Unsupported dataset type: %s" % dataset.output_classes)
def _batch_dense_window(dataset):
"""Batches a window of dense tensors."""
def key_fn(_):
return np.int64(0)
def shape_init_fn(_):
return array_ops.shape(first_element)
def shape_reduce_fn(state, value):
check_ops.assert_equal(state, array_ops.shape(value))
return state
def finalize_fn(state):
return state
if dataset.output_shapes.is_fully_defined():
shape = dataset.output_shapes
else:
first_element = get_single_element.get_single_element(dataset.take(1))
shape_reducer = grouping.Reducer(shape_init_fn, shape_reduce_fn,
finalize_fn)
shape = get_single_element.get_single_element(
dataset.apply(grouping.group_by_reducer(key_fn, shape_reducer)))
def batch_init_fn(_):
batch_shape = array_ops.concat([[0], shape], 0)
return gen_array_ops.empty(batch_shape, dtype=dataset.output_types)
def batch_reduce_fn(state, value):
return array_ops.concat([state, [value]], 0)
batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
return get_single_element.get_single_element(
dataset.apply(grouping.group_by_reducer(key_fn, batch_reducer)))
def _batch_sparse_window(dataset):
"""Batches a window of sparse tensors."""
def key_fn(_):
return np.int64(0)
def shape_init_fn(_):
return first_element.dense_shape
def shape_reduce_fn(state, value):
check_ops.assert_equal(state, value.dense_shape)
return state
def finalize_fn(state):
return state
if dataset.output_shapes.is_fully_defined():
shape = dataset.output_shapes
else:
first_element = get_single_element.get_single_element(dataset.take(1))
shape_reducer = grouping.Reducer(shape_init_fn, shape_reduce_fn,
finalize_fn)
shape = get_single_element.get_single_element(
dataset.apply(grouping.group_by_reducer(key_fn, shape_reducer)))
def batch_init_fn(_):
indices_shape = array_ops.concat([[0], [array_ops.size(shape) + 1]], 0)
return sparse_tensor.SparseTensor(
indices=gen_array_ops.empty(indices_shape, dtype=dtypes.int64),
values=constant_op.constant([], shape=[0], dtype=dataset.output_types),
dense_shape=array_ops.concat(
[np.array([0], dtype=np.int64),
math_ops.cast(shape, dtypes.int64)], 0))
def batch_reduce_fn(state, value):
return sparse_ops.sparse_concat(0, [state, value])
def reshape_fn(value):
return sparse_ops.sparse_reshape(
value,
array_ops.concat([np.array([1], dtype=np.int64), value.dense_shape], 0))
batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
return get_single_element.get_single_element(
dataset.map(reshape_fn).apply(
grouping.group_by_reducer(key_fn, batch_reducer)))
@tf_export("data.experimental.dense_to_sparse_batch")
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(
batch_size=2, row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the
number of consecutive elements of this dataset to combine in a
single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like
object representing the equivalent dense shape of a row in the
resulting `tf.SparseTensor`. Each element of this dataset must
have the same rank as `row_shape`, and must have size less
than or equal to `row_shape` in each dimension.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _DenseToSparseBatchDataset(dataset, batch_size, row_shape)
return _apply_fn
def padded_batch_window(dataset, padded_shape, padding_value=None):
"""Batches a window of tensors with padding.
Args:
dataset: the input dataset.
padded_shape: (Optional.) `tf.TensorShape` or `tf.int64` vector tensor-like
object representing the shape to which the input elements should be padded
prior to batching. Any unknown dimensions (e.g. `tf.Dimension(None)` in a
`tf.TensorShape` or `-1` in a tensor-like object) will be padded to the
maximum size of that dimension in each batch.
padding_value: (Optional.) A scalar-shaped `tf.Tensor`, representing the
padding value to use. Defaults are `0` for numeric types and the empty
string for string types. If `dataset` contains `tf.SparseTensor`, this
value is ignored.
Returns:
A `Tensor` representing the batch of the entire input dataset.
Raises:
ValueError: if invalid arguments are provided.
"""
if not issubclass(dataset.output_classes,
(ops.Tensor, sparse_tensor.SparseTensor)):
raise TypeError("Input dataset expected to have a single tensor component")
if issubclass(dataset.output_classes, (ops.Tensor)):
return _padded_batch_dense_window(dataset, padded_shape, padding_value)
elif issubclass(dataset.output_classes, (sparse_tensor.SparseTensor)):
if padding_value is not None:
raise ValueError("Padding value not allowed for sparse tensors")
return _padded_batch_sparse_window(dataset, padded_shape)
else:
raise TypeError("Unsupported dataset type: %s" % dataset.output_classes)
def _padded_batch_dense_window(dataset, padded_shape, padding_value=None):
"""Batches a window of dense tensors with padding."""
padded_shape = math_ops.cast(
convert.partial_shape_to_tensor(padded_shape), dtypes.int32)
def key_fn(_):
return np.int64(0)
def max_init_fn(_):
return padded_shape
def max_reduce_fn(state, value):
"""Computes the maximum shape to pad to."""
condition = math_ops.reduce_all(
math_ops.logical_or(
math_ops.less_equal(array_ops.shape(value), padded_shape),
math_ops.equal(padded_shape, -1)))
assert_op = control_flow_ops.Assert(condition, [
"Actual shape greater than padded shape: ",
array_ops.shape(value), padded_shape
])
with ops.control_dependencies([assert_op]):
return math_ops.maximum(state, array_ops.shape(value))
def finalize_fn(state):
return state
# Compute the padded shape.
max_reducer = grouping.Reducer(max_init_fn, max_reduce_fn, finalize_fn)
padded_shape = get_single_element.get_single_element(
dataset.apply(grouping.group_by_reducer(key_fn, max_reducer)))
if padding_value is None:
if dataset.output_types == dtypes.string:
padding_value = ""
elif dataset.output_types == dtypes.bool:
padding_value = False
elif dataset.output_types == dtypes.variant:
raise TypeError("Unable to create padding for field of type 'variant'")
else:
padding_value = 0
def batch_init_fn(_):
batch_shape = array_ops.concat(
[np.array([0], dtype=np.int32), padded_shape], 0)
return gen_array_ops.empty(batch_shape, dtype=dataset.output_types)
def batch_reduce_fn(state, value):
return array_ops.concat([state, [value]], 0)
def pad_fn(value):
shape = array_ops.shape(value)
left = array_ops.zeros_like(shape)
right = padded_shape - shape
return array_ops.pad(
value, array_ops.stack([left, right], 1), constant_values=padding_value)
batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
return get_single_element.get_single_element(
dataset.map(pad_fn).apply(
grouping.group_by_reducer(key_fn, batch_reducer)))
def _padded_batch_sparse_window(dataset, padded_shape):
"""Batches a window of sparse tensors with padding."""
def key_fn(_):
return np.int64(0)
def max_init_fn(_):
return convert.partial_shape_to_tensor(padded_shape)
def max_reduce_fn(state, value):
"""Computes the maximum shape to pad to."""
condition = math_ops.reduce_all(
math_ops.logical_or(
math_ops.less_equal(value.dense_shape, padded_shape),
math_ops.equal(padded_shape, -1)))
assert_op = control_flow_ops.Assert(condition, [
"Actual shape greater than padded shape: ", value.dense_shape,
padded_shape
])
with ops.control_dependencies([assert_op]):
return math_ops.maximum(state, value.dense_shape)
def finalize_fn(state):
return state
# Compute the padded shape.
max_reducer = grouping.Reducer(max_init_fn, max_reduce_fn, finalize_fn)
padded_shape = get_single_element.get_single_element(
dataset.apply(grouping.group_by_reducer(key_fn, max_reducer)))
def batch_init_fn(_):
indices_shape = array_ops.concat([[0], [array_ops.size(padded_shape) + 1]],
0)
return sparse_tensor.SparseTensor(
indices=gen_array_ops.empty(indices_shape, dtype=dtypes.int64),
values=constant_op.constant([], shape=[0], dtype=dataset.output_types),
dense_shape=array_ops.concat(
[np.array([0], dtype=np.int64), padded_shape], 0))
def batch_reduce_fn(state, value):
padded_value = sparse_tensor.SparseTensor(
indices=value.indices, values=value.values, dense_shape=padded_shape)
reshaped_value = sparse_ops.sparse_reshape(
padded_value,
array_ops.concat(
[np.array([1], dtype=np.int64), padded_value.dense_shape], 0))
return sparse_ops.sparse_concat(0, [state, reshaped_value])
reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
return get_single_element.get_single_element(
dataset.apply(grouping.group_by_reducer(key_fn, reducer)))
class _UnbatchDataset(dataset_ops.UnaryDataset):
"""A dataset that splits the elements of its input into multiple elements."""
def __init__(self, input_dataset):
"""See `unbatch()` for more details."""
super(_UnbatchDataset, self).__init__(input_dataset)
flat_shapes = nest.flatten(input_dataset.output_shapes)
if any(s.ndims == 0 for s in flat_shapes):
raise ValueError("Cannot unbatch an input with scalar components.")
known_batch_dim = tensor_shape.Dimension(None)
for s in flat_shapes:
try:
known_batch_dim = known_batch_dim.merge_with(s[0])
except ValueError:
raise ValueError("Cannot unbatch an input whose components have "
"different batch sizes.")
self._input_dataset = input_dataset
def _as_variant_tensor(self):
return gen_dataset_ops.unbatch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return nest.map_structure(lambda s: s[1:],
self._input_dataset.output_shapes)
@property
def output_types(self):
return self._input_dataset.output_types
@tf_export("data.experimental.unbatch")
def unbatch():
"""Splits elements of a dataset into multiple elements on the batch dimension.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.unbatch()) == {
'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
if not sparse.any_sparse(dataset.output_classes):
return _UnbatchDataset(dataset)
# NOTE(mrry): We must ensure that any SparseTensors in `dataset`
# are normalized to the rank-1 dense representation, so that the
# sparse-oblivious unbatching logic will slice them
# appropriately. This leads to a somewhat inefficient re-encoding step
# for all SparseTensor components.
# TODO(mrry): Consider optimizing this in future
# if it turns out to be a bottleneck.
def normalize(arg, *rest):
if rest:
return sparse.serialize_many_sparse_tensors((arg,) + rest)
else:
return sparse.serialize_many_sparse_tensors(arg)
normalized_dataset = dataset.map(normalize)
# NOTE(mrry): Our `map()` has lost information about the sparseness
# of any SparseTensor components, so re-apply the structure of the
# original dataset.
restructured_dataset = _RestructuredDataset(
normalized_dataset,
dataset.output_types,
dataset.output_shapes,
dataset.output_classes,
allow_unsafe_cast=True)
return _UnbatchDataset(restructured_dataset)
return _apply_fn
class _DenseToSparseBatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that batches ragged dense elements into `tf.SparseTensor`s."""
def __init__(self, input_dataset, batch_size, row_shape):
"""See `Dataset.dense_to_sparse_batch()` for more details."""
super(_DenseToSparseBatchDataset, self).__init__(input_dataset)
if not isinstance(input_dataset.output_types, dtypes.DType):
raise TypeError("DenseToSparseDataset requires an input whose elements "
"have a single component, whereas the input has %r." %
input_dataset.output_types)
self._input_dataset = input_dataset
self._batch_size = batch_size
self._row_shape = row_shape
def _as_variant_tensor(self):
return gen_dataset_ops.dense_to_sparse_batch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._batch_size,
row_shape=convert.partial_shape_to_tensor(self._row_shape),
**dataset_ops.flat_structure(self))
@property
def output_classes(self):
return sparse_tensor.SparseTensor
@property
def output_shapes(self):
return tensor_shape.vector(None).concatenate(self._row_shape)
@property
def output_types(self):
return self._input_dataset.output_types
class _RestructuredDataset(dataset_ops.UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self,
dataset,
output_types,
output_shapes=None,
output_classes=None,
allow_unsafe_cast=False):
"""Creates a new dataset with the given output types and shapes.
The given `dataset` must have a structure that is convertible:
* `dataset.output_types` must be the same as `output_types` module nesting.
* Each shape in `dataset.output_shapes` must be compatible with each shape
in `output_shapes` (if given).
Note: This helper permits "unsafe casts" for shapes, equivalent to using
`tf.Tensor.set_shape()` where domain-specific knowledge is available.
Args:
dataset: A `Dataset` object.
output_types: A nested structure of `tf.DType` objects.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects.
If omitted, the shapes will be inherited from `dataset`.
output_classes: (Optional.) A nested structure of class types.
If omitted, the class types will be inherited from `dataset`.
allow_unsafe_cast: (Optional.) If `True`, the caller may switch the
reported output types and shapes of the restructured dataset, e.g. to
switch a sparse tensor represented as `tf.variant` to its user-visible
type and shape.
Raises:
ValueError: If either `output_types` or `output_shapes` is not compatible
with the structure of `dataset`.
"""
super(_RestructuredDataset, self).__init__(dataset)
self._input_dataset = dataset
if not allow_unsafe_cast:
# Validate that the types are compatible.
output_types = nest.map_structure(dtypes.as_dtype, output_types)
flat_original_types = nest.flatten(dataset.output_types)
flat_new_types = nest.flatten(output_types)
if flat_original_types != flat_new_types:
raise ValueError(
"Dataset with output types %r cannot be restructured to have "
"output types %r" % (dataset.output_types, output_types))
self._output_types = output_types
if output_shapes is None:
# Inherit shapes from the original `dataset`.
self._output_shapes = nest.pack_sequence_as(output_types,
nest.flatten(
dataset.output_shapes))
else:
if not allow_unsafe_cast:
# Validate that the shapes are compatible.
nest.assert_same_structure(output_types, output_shapes)
flat_original_shapes = nest.flatten(dataset.output_shapes)
flat_new_shapes = nest.flatten_up_to(output_types, output_shapes)
for original_shape, new_shape in zip(flat_original_shapes,
flat_new_shapes):
if not original_shape.is_compatible_with(new_shape):
raise ValueError(
"Dataset with output shapes %r cannot be restructured to have "
"incompatible output shapes %r" % (dataset.output_shapes,
output_shapes))
self._output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
# Inherit class types from the original `dataset`.
self._output_classes = nest.pack_sequence_as(output_types,
nest.flatten(
dataset.output_classes))
else:
self._output_classes = output_classes
def _as_variant_tensor(self):
return self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
@property
def output_classes(self):
return self._output_classes
@property
def output_types(self):
return self._output_types
@property
def output_shapes(self):
return self._output_shapes
class _MapAndBatchDataset(dataset_ops.MapDataset):
"""A `Dataset` that maps a function over a batch of elements."""
def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls,
drop_remainder):
"""See `Dataset.map()` for details."""
super(_MapAndBatchDataset, self).__init__(input_dataset, map_func)
self._batch_size_t = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._num_parallel_calls_t = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
self._drop_remainder_t = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
self._batch_size = batch_size
self._drop_remainder = drop_remainder
def _as_variant_tensor(self):
# pylint: disable=protected-access
input_resource = self._input_dataset._as_variant_tensor()
return gen_dataset_ops.map_and_batch_dataset_v2(
input_resource,
self._map_func.captured_inputs,
f=self._map_func,
batch_size=self._batch_size_t,
num_parallel_calls=self._num_parallel_calls_t,
drop_remainder=self._drop_remainder_t,
**dataset_ops.flat_structure(self))
# pylint: enable=protected-access
@property
def output_shapes(self):
dim = self._batch_size if self._drop_remainder else None
return nest.pack_sequence_as(self._output_shapes, [
tensor_shape.vector(dim).concatenate(s)
for s in nest.flatten(self._output_shapes)
])
@property
def output_types(self):
return self._output_types
@tf_export("data.experimental.map_and_batch")
def map_and_batch(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
Maps `map_func` across `batch_size` consecutive elements of this dataset
and then combines them into a batch. Functionally, it is equivalent to `map`
followed by `batch`. However, by fusing the two transformations together, the
implementation can be more efficient. Surfacing this transformation in the API
is temporary. Once automatic input pipeline optimization is implemented,
the fusing of `map` and `batch` will happen automatically and this API will be
deprecated.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be
processed in parallel.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise ValueError("The `num_parallel_batches` and `num_parallel_calls` "
"arguments are mutually exclusive.")
def _apply_fn(dataset):
return _MapAndBatchDataset(dataset, map_func, batch_size,
num_parallel_calls, drop_remainder)
return _apply_fn
|
|
#pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from distutils.version import LooseVersion # pylint:disable=W0611
import pytest
from mock import patch
from ..scatter_widget import ScatterWidget
from ..mpl_widget import MplCanvas
from .... import core
from . import simple_session
from matplotlib import __version__ as mpl_version # pylint:disable=W0611
class TestScatterWidget(object):
def setup_method(self, method):
s = simple_session()
self.hub = s.hub
self.d1 = core.Data(x=[1, 2, 3], y=[2, 3, 4],
z=[3, 4, 5], w=[4, 5, 6])
self.d1.label = 'd1'
self.d2 = core.Data(x=[1, 2, 3], y=[2, 3, 4],
z=[3, 4, 5], w=[4, 5, 6])
self.d2.label = 'd2'
self.data = [self.d1, self.d2]
self.collect = s.data_collection
self.collect.append(self.data)
self.widget = ScatterWidget(s)
self.session = s
self.connect_to_hub()
def teardown_method(self, method):
self.assert_widget_synced()
def assert_widget_synced(self):
cl = self.widget.client
w = self.widget
assert abs(w.xmin - cl.xmin) < 1e-3
assert abs(w.xmax - cl.xmax) < 1e-3
assert w.xlog == cl.xlog
assert w.ylog == cl.ylog
assert w.xflip == cl.xflip
assert w.yflip == cl.yflip
assert abs(w.ymin - cl.ymin) < 1e-3
assert abs(w.ymax - cl.ymax) < 1e-3
def connect_to_hub(self):
self.widget.register_to_hub(self.hub)
self.collect.register_to_hub(self.hub)
def add_layer_via_hub(self):
layer = self.data[0]
layer.label = 'Test Layer'
self.collect.append(layer)
return layer
def add_layer_via_method(self, index=0):
layer = self.data[index]
self.widget.add_data(layer)
return layer
def plot_data(self, layer):
""" Return the data bounds for a given layer (data or subset)
Output format: [xmin, xmax], [ymin, ymax]
"""
client = self.widget.client
x, y = client.artists[layer][0].get_data()
assert x.size > 0
assert y.size > 0
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
return [xmin, xmax], [ymin, ymax]
def plot_limits(self):
""" Return the plot limits
Output format [xmin, xmax], [ymin, ymax]
"""
ax = self.widget.client.axes
xlim = ax.get_xlim()
ylim = ax.get_ylim()
return xlim, ylim
def assert_layer_inside_limits(self, layer):
"""Assert that points of a layer are within plot limits """
xydata = self.plot_data(layer)
xylimits = self.plot_limits()
assert xydata[0][0] >= xylimits[0][0]
assert xydata[1][0] >= xylimits[1][0]
assert xydata[0][1] <= xylimits[0][1]
assert xydata[1][1] <= xylimits[1][1]
def is_layer_present(self, layer):
return self.widget.client.is_layer_present(layer)
def is_layer_visible(self, layer):
return self.widget.client.is_visible(layer)
def test_rescaled_on_init(self):
layer = self.add_layer_via_method()
self.assert_layer_inside_limits(layer)
def test_hub_data_add_is_ignored(self):
layer = self.add_layer_via_hub()
assert not self.widget.client.is_layer_present(layer)
def test_valid_add_data_via_method(self):
layer = self.add_layer_via_method()
assert self.is_layer_present(layer)
def test_add_first_data_updates_combos(self):
layer = self.add_layer_via_method()
xatt = str(self.widget.ui.xAxisComboBox.currentText())
yatt = str(self.widget.ui.yAxisComboBox.currentText())
assert xatt is not None
assert yatt is not None
def test_flip_x(self):
layer = self.add_layer_via_method()
self.widget.xflip = True
assert self.widget.client.xflip
self.widget.xflip = False
assert not self.widget.client.xflip
def test_flip_y(self):
layer = self.add_layer_via_method()
self.widget.yflip = True
assert self.widget.client.yflip
self.widget.yflip = False
assert not self.widget.client.yflip
def test_log_x(self):
layer = self.add_layer_via_method()
self.widget.xlog = True
assert self.widget.client.xlog
self.widget.xlog = False
assert not self.widget.client.xlog
def test_log_y(self):
self.widget.ylog = True
assert self.widget.client.ylog
self.widget.ylog = False
assert not self.widget.client.ylog
def test_double_add_ignored(self):
layer = self.add_layer_via_method()
nobj = self.widget.ui.xAxisComboBox.count()
layer = self.add_layer_via_method()
assert self.widget.ui.xAxisComboBox.count() == nobj
def test_subsets_dont_duplicate_fields(self):
layer = self.add_layer_via_method()
nobj = self.widget.ui.xAxisComboBox.count()
subset = layer.new_subset()
subset.register()
assert self.widget.ui.xAxisComboBox.count() == nobj
def test_correct_title_single_data(self):
ct = self.widget.client.layer_count
assert ct == 0
layer = self.add_layer_via_method()
ct = self.widget.client.layer_count
assert ct == 1
assert len(layer.label) > 0
assert self.widget.windowTitle() == layer.label
def test_title_updates_with_label_change(self):
layer = self.add_layer_via_method()
assert layer.hub is self.hub
layer.label = "changed label"
assert self.widget.windowTitle() == layer.label
def test_title_updates_with_second_data(self):
l1 = self.add_layer_via_method(0)
l2 = self.add_layer_via_method(1)
expected = '%s | %s' % (l1.label, l2.label)
self.widget.windowTitle() == expected
def test_second_data_add_preserves_plot_variables(self):
l1 = self.add_layer_via_method(0)
self.widget.ui.xAxisComboBox.setCurrentIndex(3)
self.widget.ui.yAxisComboBox.setCurrentIndex(2)
l2 = self.add_layer_via_method(1)
assert self.widget.ui.xAxisComboBox.currentIndex() == 3
assert self.widget.ui.yAxisComboBox.currentIndex() == 2
def test_set_limits(self):
l1 = self.add_layer_via_method(0)
w = self.widget
c = self.widget.client
ax = self.widget.client.axes
print w.xmin, w.xmax, w.ymin, w.ymax
print c.xmin, c.xmax, c.ymin, c.ymax
print ax.get_xlim(), ax.get_ylim()
self.widget.xmax = 20
print w.xmin, w.xmax, w.ymin, w.ymax
print c.xmin, c.xmax, c.ymin, c.ymax
print ax.get_xlim(), ax.get_ylim()
self.widget.xmin = 10
print w.xmin, w.xmax, w.ymin, w.ymax
print c.xmin, c.xmax, c.ymin, c.ymax
print ax.get_xlim(), ax.get_ylim()
self.widget.ymax = 40
print w.xmin, w.xmax, w.ymin, w.ymax
print c.xmin, c.xmax, c.ymin, c.ymax
print ax.get_xlim(), ax.get_ylim()
self.widget.ymin = 30
print w.xmin, w.xmax, w.ymin, w.ymax
print c.xmin, c.xmax, c.ymin, c.ymax
print ax.get_xlim(), ax.get_ylim()
assert self.widget.client.axes.get_xlim() == (10, 20)
assert self.widget.client.axes.get_ylim() == (30, 40)
assert float(self.widget.ui.xmin.text()) == 10
assert float(self.widget.ui.xmax.text()) == 20
assert float(self.widget.ui.ymin.text()) == 30
assert float(self.widget.ui.ymax.text()) == 40
def test_widget_props_synced_with_client(self):
self.widget.client.xmax = 100
assert self.widget.xmax == 100
self.widget.client.ymax = 200
assert self.widget.ymax == 200
self.widget.client.xmin = 10
assert self.widget.xmin == 10
self.widget.client.ymin = 30
assert self.widget.ymin == 30
@pytest.mark.xfail("LooseVersion(mpl_version) <= LooseVersion('1.1.0')")
def test_labels_sync_with_plot_limits(self):
"""For some reason, manually calling draw() doesnt trigger the
draw_event in MPL 1.1.0. Ths functionality nevertheless seems
to work when actually using Glue"""
l1 = self.add_layer_via_method(0)
self.widget.client.axes.set_xlim((3, 4))
self.widget.client.axes.set_ylim((5, 6))
#call MPL draw to force render, not Glue draw
super(MplCanvas, self.widget.client.axes.figure.canvas).draw()
assert float(self.widget.ui.xmin.text()) == 3
assert float(self.widget.ui.xmax.text()) == 4
assert float(self.widget.ui.ymin.text()) == 5
assert float(self.widget.ui.ymax.text()) == 6
def assert_component_present(self, label):
ui = self.widget.ui
for combo in [ui.xAxisComboBox, ui.yAxisComboBox]:
atts = [combo.itemText(i) for i in range(combo.count())]
assert label in atts
def test_component_change_syncs_with_combo(self):
l1 = self.add_layer_via_method()
cid = l1.add_component(l1[l1.components[0]], 'testing')
self.assert_component_present('testing')
def test_swap_axes(self):
l1 = self.add_layer_via_method()
cl = self.widget.client
cl.xlog, cl.xflip = True, True
cl.ylog, cl.yflip = False, False
x, y = cl.xatt, cl.yatt
self.widget.swap_axes()
assert (cl.xlog, cl.xflip) == (False, False)
assert (cl.ylog, cl.yflip) == (True, True)
assert (cl.xatt, cl.yatt) == (y, x)
def test_hidden(self):
l1 = self.add_layer_via_method()
xcombo = self.widget.ui.xAxisComboBox
self.widget.hidden = False
assert xcombo.count() == 4
self.widget.hidden = True
assert xcombo.count() == 6
self.widget.hidden = False
assert xcombo.count() == 4
def test_add_subset_preserves_plot_variables(self):
l1 = self.add_layer_via_method(0)
print self.widget.client.layer_count
self.widget.ui.xAxisComboBox.setCurrentIndex(3)
self.widget.ui.yAxisComboBox.setCurrentIndex(2)
assert self.widget.ui.xAxisComboBox.currentIndex() == 3
assert self.widget.ui.yAxisComboBox.currentIndex() == 2
s = self.data[1].new_subset(label='new')
self.widget.add_subset(s)
assert self.widget.ui.xAxisComboBox.currentIndex() == 3
assert self.widget.ui.yAxisComboBox.currentIndex() == 2
class TestDrawCount(TestScatterWidget):
def patch_draw(self):
return patch('glue.qt.widgets.mpl_widget.MplCanvas.draw')
def test_xatt_redraws_once(self):
self.add_layer_via_method()
with self.patch_draw() as draw:
self.widget.yatt = self.widget.xatt
assert draw.call_count == 1
def test_swap_redraws_once(self):
self.add_layer_via_method()
with self.patch_draw() as draw:
self.widget.swap_axes()
assert draw.call_count == 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.