repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
AlexanderFabisch/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 46 | 2798 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
meh/servo | tests/wpt/update/upstream.py | 43 | 13551 | import os
import re
import subprocess
import sys
import urlparse
from wptrunner.update.sync import LoadManifest
from wptrunner.update.tree import get_unique_name
from wptrunner.update.base import Step, StepRunner, exit_clean, exit_unclean
from .tree import Commit, GitTree, Patch
import github
from .github import GitHub
def rewrite_patch(patch, strip_dir):
"""Take a Patch and convert to a different repository by stripping a prefix from the
file paths. Also rewrite the message to remove the bug number and reviewer, but add
a bugzilla link in the summary.
:param patch: the Patch to convert
:param strip_dir: the path prefix to remove
"""
if not strip_dir.startswith("/"):
strip_dir = "/%s"% strip_dir
new_diff = []
line_starts = ["diff ", "+++ ", "--- "]
for line in patch.diff.split("\n"):
for start in line_starts:
if line.startswith(start):
new_diff.append(line.replace(strip_dir, "").encode("utf8"))
break
else:
new_diff.append(line)
new_diff = "\n".join(new_diff)
assert new_diff != patch
return Patch(patch.author, patch.email, rewrite_message(patch), new_diff)
def rewrite_message(patch):
rest = patch.message.body
if patch.message.bug is not None:
return "\n".join([patch.message.summary,
patch.message.body,
"",
"Upstreamed from https://bugzilla.mozilla.org/show_bug.cgi?id=%s" %
patch.message.bug])
return "\n".join([patch.message.full_summary, rest])
class SyncToUpstream(Step):
"""Sync local changes to upstream"""
def create(self, state):
if not state.kwargs["upstream"]:
return
if not isinstance(state.local_tree, GitTree):
self.logger.error("Cannot sync with upstream from a non-Git checkout.")
return exit_clean
try:
import requests
except ImportError:
self.logger.error("Upstream sync requires the requests module to be installed")
return exit_clean
if not state.sync_tree:
os.makedirs(state.sync["path"])
state.sync_tree = GitTree(root=state.sync["path"])
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "tests_path", "metadata_path",
"sync"]):
state.token = kwargs["token"]
runner = SyncToUpstreamRunner(self.logger, state)
runner.run()
class CheckoutBranch(Step):
"""Create a branch in the sync tree pointing at the last upstream sync commit
and check it out"""
provides = ["branch"]
def create(self, state):
self.logger.info("Updating sync tree from %s" % state.sync["remote_url"])
state.branch = state.sync_tree.unique_branch_name(
"outbound_update_%s" % state.test_manifest.rev)
state.sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.branch)
state.sync_tree.checkout(state.test_manifest.rev, state.branch, force=True)
class GetLastSyncCommit(Step):
"""Find the gecko commit at which we last performed a sync with upstream."""
provides = ["last_sync_path", "last_sync_commit"]
def create(self, state):
self.logger.info("Looking for last sync commit")
state.last_sync_path = os.path.join(state.metadata_path, "mozilla-sync")
with open(state.last_sync_path) as f:
last_sync_sha1 = f.read().strip()
state.last_sync_commit = Commit(state.local_tree, last_sync_sha1)
if not state.local_tree.contains_commit(state.last_sync_commit):
self.logger.error("Could not find last sync commit %s" % last_sync_sha1)
return exit_clean
self.logger.info("Last sync to web-platform-tests happened in %s" % state.last_sync_commit.sha1)
class GetBaseCommit(Step):
"""Find the latest upstream commit on the branch that we are syncing with"""
provides = ["base_commit"]
def create(self, state):
state.base_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
self.logger.debug("New base commit is %s" % state.base_commit.sha1)
class LoadCommits(Step):
"""Get a list of commits in the gecko tree that need to be upstreamed"""
provides = ["source_commits"]
def create(self, state):
state.source_commits = state.local_tree.log(state.last_sync_commit,
state.tests_path)
update_regexp = re.compile("Bug \d+ - Update web-platform-tests to revision [0-9a-f]{40}")
for i, commit in enumerate(state.source_commits[:]):
if update_regexp.match(commit.message.text):
# This is a previous update commit so ignore it
state.source_commits.remove(commit)
continue
if commit.message.backouts:
#TODO: Add support for collapsing backouts
raise NotImplementedError("Need to get the Git->Hg commits for backouts and remove the backed out patch")
if not commit.message.bug:
self.logger.error("Commit %i (%s) doesn't have an associated bug number." %
(i + 1, commit.sha1))
return exit_unclean
self.logger.debug("Source commits: %s" % state.source_commits)
class SelectCommits(Step):
"""Provide a UI to select which commits to upstream"""
def create(self, state):
if not state.source_commits:
return
while True:
commits = state.source_commits[:]
for i, commit in enumerate(commits):
print "%i:\t%s" % (i, commit.message.summary)
remove = raw_input("Provide a space-separated list of any commits numbers to remove from the list to upstream:\n").strip()
remove_idx = set()
invalid = False
for item in remove.split(" "):
try:
item = int(item)
except:
invalid = True
break
if item < 0 or item >= len(commits):
invalid = True
break
remove_idx.add(item)
if invalid:
continue
keep_commits = [(i,cmt) for i,cmt in enumerate(commits) if i not in remove_idx]
#TODO: consider printed removed commits
print "Selected the following commits to keep:"
for i, commit in keep_commits:
print "%i:\t%s" % (i, commit.message.summary)
confirm = raw_input("Keep the above commits? y/n\n").strip().lower()
if confirm == "y":
state.source_commits = [item[1] for item in keep_commits]
break
class MovePatches(Step):
"""Convert gecko commits into patches against upstream and commit these to the sync tree."""
provides = ["commits_loaded"]
def create(self, state):
state.commits_loaded = 0
strip_path = os.path.relpath(state.tests_path,
state.local_tree.root)
self.logger.debug("Stripping patch %s" % strip_path)
for commit in state.source_commits[state.commits_loaded:]:
i = state.commits_loaded + 1
self.logger.info("Moving commit %i: %s" % (i, commit.message.full_summary))
patch = commit.export_patch(state.tests_path)
stripped_patch = rewrite_patch(patch, strip_path)
try:
state.sync_tree.import_patch(stripped_patch)
except:
print patch.diff
raise
state.commits_loaded = i
class RebaseCommits(Step):
"""Rebase commits from the current branch on top of the upstream destination branch.
This step is particularly likely to fail if the rebase generates merge conflicts.
In that case the conflicts can be fixed up locally and the sync process restarted
with --continue.
"""
provides = ["rebased_commits"]
def create(self, state):
self.logger.info("Rebasing local commits")
continue_rebase = False
# Check if there's a rebase in progress
if (os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-merge")) or
os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-apply"))):
continue_rebase = True
try:
state.sync_tree.rebase(state.base_commit, continue_rebase=continue_rebase)
except subprocess.CalledProcessError:
self.logger.info("Rebase failed, fix merge and run %s again with --continue" % sys.argv[0])
raise
state.rebased_commits = state.sync_tree.log(state.base_commit)
self.logger.info("Rebase successful")
class CheckRebase(Step):
"""Check if there are any commits remaining after rebase"""
def create(self, state):
if not state.rebased_commits:
self.logger.info("Nothing to upstream, exiting")
return exit_clean
class MergeUpstream(Step):
"""Run steps to push local commits as seperate PRs and merge upstream."""
provides = ["merge_index", "gh_repo"]
def create(self, state):
gh = GitHub(state.token)
if "merge_index" not in state:
state.merge_index = 0
org, name = urlparse.urlsplit(state.sync["remote_url"]).path[1:].split("/")
if name.endswith(".git"):
name = name[:-4]
state.gh_repo = gh.repo(org, name)
for commit in state.rebased_commits[state.merge_index:]:
with state.push(["gh_repo", "sync_tree"]):
state.commit = commit
pr_merger = PRMergeRunner(self.logger, state)
rv = pr_merger.run()
if rv is not None:
return rv
state.merge_index += 1
class UpdateLastSyncCommit(Step):
"""Update the gecko commit at which we last performed a sync with upstream."""
provides = []
def create(self, state):
self.logger.info("Updating last sync commit")
with open(state.last_sync_path, "w") as f:
f.write(state.local_tree.rev)
# This gets added to the patch later on
class MergeLocalBranch(Step):
"""Create a local branch pointing at the commit to upstream"""
provides = ["local_branch"]
def create(self, state):
branch_prefix = "sync_%s" % state.commit.sha1
local_branch = state.sync_tree.unique_branch_name(branch_prefix)
state.sync_tree.create_branch(local_branch, state.commit)
state.local_branch = local_branch
class MergeRemoteBranch(Step):
"""Get an unused remote branch name to use for the PR"""
provides = ["remote_branch"]
def create(self, state):
remote_branch = "sync_%s" % state.commit.sha1
branches = [ref[len("refs/heads/"):] for sha1, ref in
state.sync_tree.list_remote(state.gh_repo.url)
if ref.startswith("refs/heads")]
state.remote_branch = get_unique_name(branches, remote_branch)
class PushUpstream(Step):
"""Push local branch to remote"""
def create(self, state):
self.logger.info("Pushing commit upstream")
state.sync_tree.push(state.gh_repo.url,
state.local_branch,
state.remote_branch)
class CreatePR(Step):
"""Create a PR for the remote branch"""
provides = ["pr"]
def create(self, state):
self.logger.info("Creating a PR")
commit = state.commit
state.pr = state.gh_repo.create_pr(commit.message.full_summary,
state.remote_branch,
"master",
commit.message.body if commit.message.body else "")
class PRAddComment(Step):
"""Add an issue comment indicating that the code has been reviewed already"""
def create(self, state):
state.pr.issue.add_comment("Code reviewed upstream.")
class MergePR(Step):
"""Merge the PR"""
def create(self, state):
self.logger.info("Merging PR")
state.pr.merge()
class PRDeleteBranch(Step):
"""Delete the remote branch"""
def create(self, state):
self.logger.info("Deleting remote branch")
state.sync_tree.push(state.gh_repo.url, "", state.remote_branch)
class SyncToUpstreamRunner(StepRunner):
"""Runner for syncing local changes to upstream"""
steps = [LoadManifest,
CheckoutBranch,
GetLastSyncCommit,
GetBaseCommit,
LoadCommits,
SelectCommits,
MovePatches,
RebaseCommits,
CheckRebase,
MergeUpstream,
UpdateLastSyncCommit]
class PRMergeRunner(StepRunner):
"""(Sub)Runner for creating and merging a PR"""
steps = [
MergeLocalBranch,
MergeRemoteBranch,
PushUpstream,
CreatePR,
PRAddComment,
MergePR,
PRDeleteBranch,
]
| mpl-2.0 |
ubc/compair | compair/kaltura/media.py | 1 | 3320 | import requests
from flask import current_app
from compair.core import abort
from . import KalturaCore
class Media(object):
@classmethod
def generate_media_entry(cls, ks, upload_token_id, media_type):
entry = cls._api_add(ks, media_type)
entry = cls._api_add_content(ks, entry.get('id'), upload_token_id)
return entry
@classmethod
def get_media_entry(cls, ks, entry_id):
return cls._api_get(ks, entry_id)
@classmethod
def update_media_entry(cls, ks, entry_id, update_params):
return cls._api_update(ks, entry_id, update_params)
@classmethod
def _api_add(cls, ks, media_type):
url = KalturaCore.base_url()+"/service/media/action/add"
params = {
'entry[mediaType]': media_type,
'ks': ks,
'format': 1, # json return value
}
result = requests.get(url, params=params, verify=KalturaCore.enforce_ssl())
if result.status_code == 200:
return result.json()
else:
current_app.logger.error(result)
abort(400, title="File Not Uploaded",
message="There was a problem with the Kaltura server. Please try again later.")
@classmethod
def _api_add_content(cls, ks, entry_id, upload_token_id):
url = KalturaCore.base_url()+"/service/media/action/addContent"
params = {
'entryId': entry_id,
'resource[objectType]': 'KalturaUploadedFileTokenResource',
'resource[token]': upload_token_id,
'ks': ks,
'format': 1, # json return value
}
result = requests.get(url, params=params, verify=KalturaCore.enforce_ssl())
if result.status_code == 200:
return result.json()
else:
current_app.logger.error(result)
abort(400, title="File Not Uploaded",
message="There was a problem with the Kaltura server. Please try again later.")
@classmethod
def _api_get(cls, ks, entry_id):
url = KalturaCore.base_url()+"/service/media/action/get"
params = {
'entryId': entry_id,
'ks': ks,
'format': 1, # json return value
}
result = requests.get(url, params=params, verify=KalturaCore.enforce_ssl())
if result.status_code == 200:
return result.json()
else:
current_app.logger.error(result)
abort(400, title="File Not Accessible",
message="There was a problem with the Kaltura server. Please try again later.")
@classmethod
def _api_update(cls, ks, entry_id, update_params={}):
url = KalturaCore.base_url()+"/service/media/action/update"
params = {
'entryId': entry_id,
'ks': ks,
'format': 1, # json return value
'mediaEntry[objectType]': "KalturaMediaEntry"
}
params.update(update_params)
result = requests.get(url, params=params, verify=KalturaCore.enforce_ssl())
if result.status_code == 200:
return result.json()
else:
current_app.logger.error(result)
abort(400, title="File Not Updated",
message="There was a problem with the Kaltura server. Please try again later.") | gpl-3.0 |
paladin74/neural-network-animation | matplotlib/tests/test_dviread.py | 15 | 1788 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| mit |
alexgleith/Quantum-GIS | python/plugins/sextante/algs/QGISAlgorithmProvider.py | 2 | 8315 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from sextante.algs.Polygonize import Polygonize
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtGui import *
from sextante.core.AlgorithmProvider import AlgorithmProvider
from sextante.algs.ftools.PointsInPolygon import PointsInPolygon
from sextante.algs.ftools.PointsInPolygonUnique import PointsInPolygonUnique
from sextante.algs.ftools.PointsInPolygonWeighted import PointsInPolygonWeighted
from sextante.algs.ftools.SumLines import SumLines
from sextante.algs.ftools.BasicStatisticsNumbers import BasicStatisticsNumbers
from sextante.algs.ftools.BasicStatisticsStrings import BasicStatisticsStrings
from sextante.algs.ftools.NearestNeighbourAnalysis import NearestNeighbourAnalysis
from sextante.algs.ftools.LinesIntersection import LinesIntersection
from sextante.algs.ftools.MeanCoords import MeanCoords
from sextante.algs.ftools.PointDistance import PointDistance
from sextante.algs.ftools.UniqueValues import UniqueValues
from sextante.algs.ftools.ReprojectLayer import ReprojectLayer
from sextante.algs.ftools.ExportGeometryInfo import ExportGeometryInfo
from sextante.algs.ftools.Centroids import Centroids
from sextante.algs.ftools.Delaunay import Delaunay
from sextante.algs.ftools.VoronoiPolygons import VoronoiPolygons
from sextante.algs.ftools.DensifyGeometries import DensifyGeometries
from sextante.algs.ftools.MultipartToSingleparts import MultipartToSingleparts
from sextante.algs.ftools.SimplifyGeometries import SimplifyGeometries
from sextante.algs.ftools.LinesToPolygons import LinesToPolygons
from sextante.algs.ftools.PolygonsToLines import PolygonsToLines
from sextante.algs.ftools.SinglePartsToMultiparts import SinglePartsToMultiparts
from sextante.algs.ftools.ExtractNodes import ExtractNodes
from sextante.algs.ftools.ConvexHull import ConvexHull
from sextante.algs.ftools.FixedDistanceBuffer import FixedDistanceBuffer
from sextante.algs.ftools.VariableDistanceBuffer import VariableDistanceBuffer
from sextante.algs.ftools.Clip import Clip
from sextante.algs.ftools.Difference import Difference
from sextante.algs.ftools.Dissolve import Dissolve
from sextante.algs.ftools.Intersection import Intersection
from sextante.algs.ftools.ExtentFromLayer import ExtentFromLayer
from sextante.algs.ftools.RandomSelection import RandomSelection
from sextante.algs.ftools.RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from sextante.algs.ftools.SelectByLocation import SelectByLocation
from sextante.algs.ftools.Union import Union
from sextante.algs.ftools.DensifyGeometriesInterval import DensifyGeometriesInterval
from sextante.algs.mmqgisx.MMQGISXAlgorithms import (mmqgisx_delete_columns_algorithm,
mmqgisx_delete_duplicate_geometries_algorithm,
mmqgisx_geometry_convert_algorithm,
mmqgisx_grid_algorithm, mmqgisx_gridify_algorithm,
mmqgisx_hub_distance_algorithm, mmqgisx_hub_lines_algorithm,
mmqgisx_merge_algorithm, mmqgisx_select_algorithm,
mmqgisx_text_to_float_algorithm)
from sextante.algs.RasterLayerStatistics import RasterLayerStatistics
from sextante.algs.StatisticsByCategories import StatisticsByCategories
from sextante.algs.EquivalentNumField import EquivalentNumField
from sextante.algs.AddTableField import AddTableField
from sextante.algs.FieldsCalculator import FieldsCalculator
from sextante.algs.SaveSelectedFeatures import SaveSelectedFeatures
from sextante.algs.Explode import Explode
from sextante.algs.AutoincrementalField import AutoincrementalField
from sextante.algs.FieldPyculator import FieldsPyculator
from sextante.algs.JoinAttributes import JoinAttributes
from sextante.algs.CreateConstantRaster import CreateConstantRaster
from sextante.algs.PointsLayerFromTable import PointsLayerFromTable
#from sextante.algs.VectorLayerHistogram import VectorLayerHistogram
#from sextante.algs.VectorLayerScatterplot import VectorLayerScatterplot
#from sextante.algs.MeanAndStdDevPlot import MeanAndStdDevPlot
#from sextante.algs.BarPlot import BarPlot
#from sextante.algs.PolarPlot import PolarPlot
#from sextante.algs.RasterLayerHistogram import RasterLayerHistogram
import sextante.resources_rc
class QGISAlgorithmProvider(AlgorithmProvider):
def __init__(self):
AlgorithmProvider.__init__(self)
self.alglist = [# ------ fTools ------
SumLines(), PointsInPolygon(), PointsInPolygonWeighted(),
PointsInPolygonUnique(), BasicStatisticsStrings(),
BasicStatisticsNumbers(), NearestNeighbourAnalysis(),
MeanCoords(), LinesIntersection(), UniqueValues(), PointDistance(),
# data management
ReprojectLayer(),
# geometry
ExportGeometryInfo(), Centroids(), Delaunay(), VoronoiPolygons(),
SimplifyGeometries(), DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(), PolygonsToLines(),
LinesToPolygons(), ExtractNodes(),
# geoprocessing
ConvexHull(), FixedDistanceBuffer(), VariableDistanceBuffer(),
Dissolve(), Difference(), Intersection(), Union(), Clip(),
# research
ExtentFromLayer(), RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(),
# ------ mmqgisx ------
mmqgisx_delete_columns_algorithm(),
mmqgisx_delete_duplicate_geometries_algorithm(),
mmqgisx_geometry_convert_algorithm(),
mmqgisx_grid_algorithm(),
mmqgisx_gridify_algorithm(),
mmqgisx_hub_distance_algorithm(),
mmqgisx_hub_lines_algorithm(),
mmqgisx_merge_algorithm(),
mmqgisx_select_algorithm(),
mmqgisx_text_to_float_algorithm(),
# ------ native algs ------
AddTableField(), FieldsCalculator(), SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(), EquivalentNumField(),
PointsLayerFromTable(), StatisticsByCategories(), Polygonize(),
# ------ raster ------
#CreateConstantRaster(),
RasterLayerStatistics()
# ------ graphics ------
#VectorLayerHistogram(), VectorLayerScatterplot(), RasterLayerHistogram(),
#MeanAndStdDevPlot(), BarPlot(), PolarPlot()
]
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return "qgis"
def getDescription(self):
return "QGIS geoalgorithms"
def getIcon(self):
return QIcon(":/sextante/images/qgis.png")
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
Novasoft-India/OperERP-AM-Motors | openerp/conf/deprecation.py | 76 | 1602 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Regroup variables for deprecated features.
To keep the OpenERP server backward compatible with older modules, some
additional code is needed throughout the core library. This module keeps
track of those specific measures by providing variables that can be unset
by the user to check if her code is future proof.
"""
# If True, the Python modules inside the openerp namespace are made available
# without the 'openerp.' prefix. E.g. openerp.osv.osv and osv.osv refer to the
# same module.
# Introduced around 2011.02.
open_openerp_namespace = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eezee-it/account-invoicing | account_invoice_merge_payment/models/account_invoice.py | 24 | 1583 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of account_invoice_merge_payment,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_invoice_merge_payment is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# account_invoice_merge_payment is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with account_invoice_merge_payment.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.model
def _get_invoice_key_cols(self):
return super(AccountInvoice, self)._get_invoice_key_cols() + [
'payment_mode_id',
]
@api.model
def _get_first_invoice_fields(self, invoice):
res = super(AccountInvoice, self)._get_first_invoice_fields(invoice)
res.update({'payment_mode_id': invoice.payment_mode_id.id})
return res
| agpl-3.0 |
patricklaw/pants | src/python/pants/backend/python/goals/setup_py_test.py | 3 | 33865 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
from typing import Iterable, Type
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals.setup_py import (
AmbiguousOwnerError,
DependencyOwner,
ExportedTarget,
ExportedTargetRequirements,
FirstPartyDependencyVersionScheme,
InvalidEntryPoint,
InvalidSetupPyArgs,
NoOwnerError,
OwnedDependencies,
OwnedDependency,
SetupKwargs,
SetupKwargsRequest,
SetupPyChroot,
SetupPyChrootRequest,
SetupPyGeneration,
SetupPySources,
SetupPySourcesRequest,
declares_pkg_resources_namespace_package,
determine_setup_kwargs,
distutils_repr,
generate_chroot,
get_exporting_owner,
get_owned_dependencies,
get_requirements,
get_sources,
validate_commands,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import (
PexBinary,
PythonDistribution,
PythonLibrary,
PythonRequirementLibrary,
)
from pants.backend.python.util_rules import python_sources
from pants.core.target_types import Files, Resources
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import SubsystemRule, rule
from pants.engine.target import Targets
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import QueryRule, RuleRunner
_namespace_decl = "__import__('pkg_resources').declare_namespace(__name__)"
def create_setup_py_rule_runner(*, rules: Iterable) -> RuleRunner:
rule_runner = RuleRunner(
rules=rules,
target_types=[
PexBinary,
PythonDistribution,
PythonLibrary,
PythonRequirementLibrary,
Resources,
Files,
],
objects={"setup_py": PythonArtifact},
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
# We use a trivial test that our SetupKwargs plugin hook works.
class PluginSetupKwargsRequest(SetupKwargsRequest):
@classmethod
def is_applicable(cls, _) -> bool:
return True
@rule
def setup_kwargs_plugin(request: PluginSetupKwargsRequest) -> SetupKwargs:
if "setup_script" in request.explicit_kwargs:
kwargs = request.explicit_kwargs
else:
kwargs = {**request.explicit_kwargs, "plugin_demo": "hello world"}
return SetupKwargs(kwargs, address=request.target.address)
@pytest.fixture
def chroot_rule_runner() -> RuleRunner:
return create_setup_py_rule_runner(
rules=[
determine_setup_kwargs,
generate_chroot,
get_sources,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*python_sources.rules(),
*target_types_rules.rules(),
setup_kwargs_plugin,
SubsystemRule(SetupPyGeneration),
UnionRule(SetupKwargsRequest, PluginSetupKwargsRequest),
QueryRule(SetupPyChroot, (SetupPyChrootRequest,)),
]
)
def assert_chroot(
rule_runner: RuleRunner,
expected_files: list[str],
expected_setup_script: str,
expected_setup_kwargs,
addr: Address,
) -> None:
tgt = rule_runner.get_target(addr)
chroot = rule_runner.request(
SetupPyChroot,
[SetupPyChrootRequest(ExportedTarget(tgt), py2=False)],
)
snapshot = rule_runner.request(Snapshot, [chroot.digest])
assert sorted(expected_files) == sorted(snapshot.files)
assert chroot.setup_script == expected_setup_script
assert expected_setup_kwargs == chroot.setup_kwargs.kwargs
def assert_chroot_error(rule_runner: RuleRunner, addr: Address, exc_cls: Type[Exception]) -> None:
tgt = rule_runner.get_target(addr)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
SetupPyChroot,
[SetupPyChrootRequest(ExportedTarget(tgt), py2=False)],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def test_use_existing_setup_script(chroot_rule_runner) -> None:
chroot_rule_runner.add_to_build_file("src/python/foo/bar", "python_library()")
chroot_rule_runner.create_file("src/python/foo/bar/__init__.py")
chroot_rule_runner.create_file("src/python/foo/bar/bar.py")
# Add a `.pyi` stub file to ensure we include it in the final result.
chroot_rule_runner.create_file("src/python/foo/bar/bar.pyi")
chroot_rule_runner.add_to_build_file(
"src/python/foo/resources", 'resources(sources=["js/code.js"])'
)
chroot_rule_runner.create_file("src/python/foo/resources/js/code.js")
chroot_rule_runner.add_to_build_file("files", 'files(sources=["README.txt"])')
chroot_rule_runner.create_file("files/README.txt")
chroot_rule_runner.add_to_build_file(
"src/python/foo",
textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
':foo',
],
provides=setup_py(
setup_script='src/python/foo/setup.py',
name='foo', version='1.2.3'
)
)
python_library(
dependencies=[
'src/python/foo/bar',
'src/python/foo/resources',
'files',
]
)
"""
),
)
chroot_rule_runner.create_file("src/python/foo/__init__.py", _namespace_decl)
chroot_rule_runner.create_file("src/python/foo/foo.py")
chroot_rule_runner.create_file(
"src/python/foo/setup.py",
textwrap.dedent(
"""
from setuptools import setup
setup(
name = "foo",
version = "1.2.3",
packages = ["foo"],
)
"""
),
)
assert_chroot(
chroot_rule_runner,
[
"files/README.txt",
"foo/bar/__init__.py",
"foo/bar/bar.py",
"foo/bar/bar.pyi",
"foo/resources/js/code.js",
"foo/__init__.py",
"foo/foo.py",
"foo/setup.py",
],
"foo/setup.py",
{
"name": "foo",
"version": "1.2.3",
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_generate_chroot(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_distribution(
name="baz-dist",
dependencies=[':baz'],
provides=setup_py(
name='baz',
version='1.1.1'
)
)
python_library()
"""
),
)
chroot_rule_runner.create_file("src/python/foo/bar/baz/baz.py")
chroot_rule_runner.add_to_build_file(
"src/python/foo/qux",
textwrap.dedent(
"""
python_library()
pex_binary(name="bin", entry_point="foo.qux.bin:main")
"""
),
)
chroot_rule_runner.create_file("src/python/foo/qux/__init__.py")
chroot_rule_runner.create_file("src/python/foo/qux/qux.py")
# Add a `.pyi` stub file to ensure we include it in the final result.
chroot_rule_runner.create_file("src/python/foo/qux/qux.pyi")
chroot_rule_runner.add_to_build_file(
"src/python/foo/resources", 'resources(sources=["js/code.js"])'
)
chroot_rule_runner.create_file("src/python/foo/resources/js/code.js")
chroot_rule_runner.add_to_build_file("files", 'files(sources=["README.txt"])')
chroot_rule_runner.create_file("files/README.txt")
chroot_rule_runner.add_to_build_file(
"src/python/foo",
textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
':foo',
],
provides=setup_py(
name='foo', version='1.2.3'
).with_binaries(
foo_main='src/python/foo/qux:bin'
)
)
python_library(
dependencies=[
'src/python/foo/bar/baz',
'src/python/foo/qux',
'src/python/foo/resources',
'files',
]
)
"""
),
)
chroot_rule_runner.create_file("src/python/foo/__init__.py", _namespace_decl)
chroot_rule_runner.create_file("src/python/foo/foo.py")
assert_chroot(
chroot_rule_runner,
[
"src/files/README.txt",
"src/foo/qux/__init__.py",
"src/foo/qux/qux.py",
"src/foo/qux/qux.pyi",
"src/foo/resources/js/code.js",
"src/foo/__init__.py",
"src/foo/foo.py",
"setup.py",
"MANIFEST.in",
],
"setup.py",
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"package_dir": {"": "src"},
"packages": ("foo", "foo.qux"),
"namespace_packages": ("foo",),
"package_data": {"foo": ("resources/js/code.js",)},
"install_requires": ("baz==1.1.1",),
"entry_points": {"console_scripts": ["foo_main=foo.qux.bin:main"]},
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_invalid_binary(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.create_files("src/python/invalid_binary", ["app1.py", "app2.py"])
chroot_rule_runner.add_to_build_file(
"src/python/invalid_binary",
textwrap.dedent(
"""
python_library(name='not_a_binary', sources=[])
pex_binary(name='invalid_entrypoint_unowned1', entry_point='app1.py')
pex_binary(name='invalid_entrypoint_unowned2', entry_point='invalid_binary.app2')
python_distribution(
name='invalid_bin1',
provides=setup_py(
name='invalid_bin1', version='1.1.1'
).with_binaries(foo=':not_a_binary')
)
python_distribution(
name='invalid_bin2',
provides=setup_py(
name='invalid_bin2', version='1.1.1'
).with_binaries(foo=':invalid_entrypoint_unowned1')
)
python_distribution(
name='invalid_bin3',
provides=setup_py(
name='invalid_bin3', version='1.1.1'
).with_binaries(foo=':invalid_entrypoint_unowned2')
)
"""
),
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin1"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin2"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin3"),
InvalidEntryPoint,
)
def test_binary_shorthand(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.create_file("src/python/project/app.py")
chroot_rule_runner.add_to_build_file(
"src/python/project",
textwrap.dedent(
"""
python_library()
pex_binary(name='bin', entry_point='app.py:func')
python_distribution(
name='dist',
provides=setup_py(
name='bin', version='1.1.1'
).with_binaries(foo=':bin')
)
"""
),
)
assert_chroot(
chroot_rule_runner,
["src/project/app.py", "setup.py", "MANIFEST.in"],
"setup.py",
{
"name": "bin",
"version": "1.1.1",
"plugin_demo": "hello world",
"package_dir": {"": "src"},
"packages": ("project",),
"namespace_packages": (),
"install_requires": (),
"package_data": {},
"entry_points": {"console_scripts": ["foo=project.app:func"]},
},
Address("src/python/project", target_name="dist"),
)
def test_get_sources() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
get_sources,
*python_sources.rules(),
QueryRule(SetupPySources, (SetupPySourcesRequest,)),
]
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_library(name='baz1', sources=['baz1.py'])
python_library(name='baz2', sources=['baz2.py'])
"""
),
)
rule_runner.create_file("src/python/foo/bar/baz/baz1.py")
rule_runner.create_file("src/python/foo/bar/baz/baz2.py")
rule_runner.create_file("src/python/foo/bar/__init__.py", _namespace_decl)
rule_runner.add_to_build_file("src/python/foo/qux", "python_library()")
rule_runner.create_file("src/python/foo/qux/__init__.py")
rule_runner.create_file("src/python/foo/qux/qux.py")
rule_runner.add_to_build_file("src/python/foo/resources", 'resources(sources=["js/code.js"])')
rule_runner.create_file("src/python/foo/resources/js/code.js")
rule_runner.create_file("src/python/foo/__init__.py")
def assert_sources(
expected_files,
expected_packages,
expected_namespace_packages,
expected_package_data,
addrs,
):
targets = Targets(rule_runner.get_target(addr) for addr in addrs)
srcs = rule_runner.request(
SetupPySources,
[SetupPySourcesRequest(targets, py2=False)],
)
chroot_snapshot = rule_runner.request(Snapshot, [srcs.digest])
assert sorted(expected_files) == sorted(chroot_snapshot.files)
assert sorted(expected_packages) == sorted(srcs.packages)
assert sorted(expected_namespace_packages) == sorted(srcs.namespace_packages)
assert expected_package_data == dict(srcs.package_data)
assert_sources(
expected_files=["foo/bar/baz/baz1.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz1")],
)
assert_sources(
expected_files=["foo/bar/baz/baz2.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz2")],
)
assert_sources(
expected_files=["foo/qux/qux.py", "foo/qux/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.qux"],
expected_namespace_packages=[],
expected_package_data={},
addrs=[Address("src/python/foo/qux")],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/baz/baz2.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/bar/baz", target_name="baz2"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
def test_get_requirements() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
SubsystemRule(SetupPyGeneration),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.add_to_build_file(
"3rdparty",
textwrap.dedent(
"""
python_requirement_library(
name='ext1',
requirements=['ext1==1.22.333'],
)
python_requirement_library(
name='ext2',
requirements=['ext2==4.5.6'],
)
python_requirement_library(
name='ext3',
requirements=['ext3==0.0.1'],
)
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
"python_library(dependencies=['3rdparty:ext1'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar/qux",
"python_library(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=[':bar'],
provides=setup_py(name='bar', version='9.8.7'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'],
)
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/corge",
textwrap.dedent(
"""
python_distribution(
name='corge-dist',
# Tests having a 3rdparty requirement directly on a python_distribution.
dependencies=[':corge', '3rdparty:ext3'],
provides=setup_py(name='corge', version='2.2.2'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar'],
)
"""
),
)
assert_requirements(
rule_runner,
["ext1==1.22.333", "ext2==4.5.6"],
Address("src/python/foo/bar", target_name="bar-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar==9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar~=9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.COMPATIBLE,
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.ANY,
)
def test_get_requirements_with_exclude() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
SubsystemRule(SetupPyGeneration),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.add_to_build_file(
"3rdparty",
textwrap.dedent(
"""
python_requirement_library(
name='ext1',
requirements=['ext1==1.22.333'],
)
python_requirement_library(
name='ext2',
requirements=['ext2==4.5.6'],
)
python_requirement_library(
name='ext3',
requirements=['ext3==0.0.1'],
)
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
"python_library(dependencies=['3rdparty:ext1'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar/qux",
"python_library(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=['!!3rdparty:ext2',':bar'],
provides=setup_py(name='bar', version='9.8.7'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'],
)
"""
),
)
assert_requirements(
rule_runner, ["ext1==1.22.333"], Address("src/python/foo/bar", target_name="bar-dist")
)
def assert_requirements(
rule_runner,
expected_req_strs,
addr: Address,
*,
version_scheme: FirstPartyDependencyVersionScheme = FirstPartyDependencyVersionScheme.EXACT,
):
rule_runner.set_options(
[f"--setup-py-generation-first-party-dependency-version-scheme={version_scheme.value}"],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
tgt = rule_runner.get_target(addr)
reqs = rule_runner.request(
ExportedTargetRequirements,
[DependencyOwner(ExportedTarget(tgt))],
)
assert sorted(expected_req_strs) == list(reqs)
def test_owned_dependencies() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
get_owned_dependencies,
get_exporting_owner,
QueryRule(OwnedDependencies, (DependencyOwner,)),
]
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_library(name='baz1', sources=[])
python_library(name='baz2', sources=[])
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar1-dist',
dependencies=[':bar1'],
provides=setup_py(name='bar1', version='1.1.1'),
)
python_library(
name='bar1',
sources=[],
dependencies=['src/python/foo/bar/baz:baz1'],
)
python_library(
name='bar2',
sources=[],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resources(name='bar-resources', sources=[])
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo",
textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[':foo'],
provides=setup_py(name='foo', version='3.4.5'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar:bar1', 'src/python/foo/bar:bar2'],
)
"""
),
)
def assert_owned(owned: Iterable[str], exported: Address):
tgt = rule_runner.get_target(exported)
assert sorted(owned) == sorted(
od.target.address.spec
for od in rule_runner.request(
OwnedDependencies,
[DependencyOwner(ExportedTarget(tgt))],
)
)
assert_owned(
["src/python/foo/bar:bar1", "src/python/foo/bar:bar1-dist", "src/python/foo/bar/baz:baz1"],
Address("src/python/foo/bar", target_name="bar1-dist"),
)
assert_owned(
[
"src/python/foo",
"src/python/foo:foo-dist",
"src/python/foo/bar:bar2",
"src/python/foo/bar:bar-resources",
"src/python/foo/bar/baz:baz2",
],
Address("src/python/foo", target_name="foo-dist"),
)
@pytest.fixture
def exporting_owner_rule_runner() -> RuleRunner:
return create_setup_py_rule_runner(
rules=[
get_exporting_owner,
QueryRule(ExportedTarget, (OwnedDependency,)),
]
)
def assert_is_owner(rule_runner: RuleRunner, owner: str, owned: Address):
tgt = rule_runner.get_target(owned)
assert (
owner
== rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
).target.address.spec
)
def assert_owner_error(rule_runner, owned: Address, exc_cls: Type[Exception]):
tgt = rule_runner.get_target(owned)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def assert_no_owner(rule_runner: RuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, NoOwnerError)
def assert_ambiguous_owner(rule_runner: RuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, AmbiguousOwnerError)
def test_get_owner_simple(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_library(name='baz1', sources=[])
python_library(name='baz2', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar1',
dependencies=['src/python/foo/bar/baz:baz1'],
provides=setup_py(name='bar1', version='1.1.1'),
)
python_library(
name='bar2',
sources=[],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resources(name='bar-resources', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/foo",
textwrap.dedent(
"""
python_distribution(
name='foo1',
dependencies=['src/python/foo/bar/baz:baz2'],
provides=setup_py(name='foo1', version='0.1.2'),
)
python_library(name='foo2', sources=[])
python_distribution(
name='foo3',
dependencies=['src/python/foo/bar:bar2'],
provides=setup_py(name='foo3', version='3.4.5'),
)
"""
),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar", target_name="bar1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar/baz", target_name="baz1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo1",
Address("src/python/foo", target_name="foo1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo", target_name="foo3"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar2"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar-resources"),
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/foo", target_name="foo2"))
assert_ambiguous_owner(
exporting_owner_rule_runner, Address("src/python/foo/bar/baz", target_name="baz2")
)
def test_get_owner_siblings(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/siblings",
textwrap.dedent(
"""
python_library(name='sibling1', sources=[])
python_distribution(
name='sibling2',
dependencies=['src/python/siblings:sibling1'],
provides=setup_py(name='siblings', version='2.2.2'),
)
"""
),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling2"),
)
def test_get_owner_not_an_ancestor(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/notanancestor/aaa",
textwrap.dedent(
"""
python_library(name='aaa', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/notanancestor/bbb",
textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/notanancestor/aaa'],
provides=setup_py(name='bbb', version='11.22.33'),
)
"""
),
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/notanancestor/aaa"))
assert_is_owner(
exporting_owner_rule_runner,
"src/python/notanancestor/bbb",
Address("src/python/notanancestor/bbb"),
)
def test_get_owner_multiple_ancestor_generations(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/aaa/bbb/ccc",
textwrap.dedent(
"""
python_library(name='ccc', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/aaa/bbb",
textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/aaa/bbb/ccc'],
provides=setup_py(name='bbb', version='1.1.1'),
)
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/aaa",
textwrap.dedent(
"""
python_distribution(
name='aaa',
dependencies=['src/python/aaa/bbb/ccc'],
provides=setup_py(name='aaa', version='2.2.2'),
)
"""
),
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb", Address("src/python/aaa/bbb/ccc")
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb", Address("src/python/aaa/bbb")
)
assert_is_owner(exporting_owner_rule_runner, "src/python/aaa", Address("src/python/aaa"))
def test_validate_args() -> None:
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("bdist_wheel", "upload"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("sdist", "-d", "new_distdir/"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("--dist-dir", "new_distdir/", "sdist"))
validate_commands(("sdist",))
validate_commands(("bdist_wheel", "--foo"))
def test_distutils_repr() -> None:
testdata = {
"foo": "bar",
"baz": {"qux": [123, 456], "quux": ("abc", b"xyz"), "corge": {1, 2, 3}},
"various_strings": ["x'y", "aaa\nbbb"],
}
expected = """
{
'foo': 'bar',
'baz': {
'qux': [
123,
456,
],
'quux': (
'abc',
'xyz',
),
'corge': {
1,
2,
3,
},
},
'various_strings': [
'x\\\'y',
\"\"\"aaa\nbbb\"\"\",
],
}
""".strip()
assert expected == distutils_repr(testdata)
@pytest.mark.parametrize(
"python_src",
[
"__import__('pkg_resources').declare_namespace(__name__)",
"\n__import__('pkg_resources').declare_namespace(__name__) # type: ignore[attr-defined]",
"import pkg_resources; pkg_resources.declare_namespace(__name__)",
"from pkg_resources import declare_namespace; declare_namespace(__name__)",
],
)
def test_declares_pkg_resources_namespace_package(python_src: str) -> None:
assert declares_pkg_resources_namespace_package(python_src)
@pytest.mark.parametrize(
"python_src",
[
"",
"import os\n\nos.getcwd()",
"__path__ = 'foo'",
"import pkg_resources",
"add(1, 2); foo(__name__); self.shoot(__name__)",
"declare_namespace(bonk)",
"just nonsense, not even parseable",
],
)
def test_does_not_declare_pkg_resources_namespace_package(python_src: str) -> None:
assert not declares_pkg_resources_namespace_package(python_src)
| apache-2.0 |
xin3liang/platform_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/common/system/logutils.py | 68 | 7376 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports webkitpy logging."""
# FIXME: Move this file to webkitpy/python24 since logging needs to
# be configured prior to running version-checking code.
import logging
import os
import sys
import webkitpy
_log = logging.getLogger(__name__)
# We set these directory paths lazily in get_logger() below.
_scripts_dir = ""
"""The normalized, absolute path to the ...Scripts directory."""
_webkitpy_dir = ""
"""The normalized, absolute path to the ...Scripts/webkitpy directory."""
def _normalize_path(path):
"""Return the given path normalized.
Converts a path to an absolute path, removes any trailing slashes,
removes any extension, and lower-cases it.
"""
path = os.path.abspath(path)
path = os.path.normpath(path)
path = os.path.splitext(path)[0] # Remove the extension, if any.
path = path.lower()
return path
# Observe that the implementation of this function does not require
# the use of any hard-coded strings like "webkitpy", etc.
#
# The main benefit this function has over using--
#
# _log = logging.getLogger(__name__)
#
# is that get_logger() returns the same value even if __name__ is
# "__main__" -- i.e. even if the module is the script being executed
# from the command-line.
def get_logger(path):
"""Return a logging.logger for the given path.
Returns:
A logger whose name is the name of the module corresponding to
the given path. If the module is in webkitpy, the name is
the fully-qualified dotted module name beginning with webkitpy....
Otherwise, the name is the base name of the module (i.e. without
any dotted module name prefix).
Args:
path: The path of the module. Normally, this parameter should be
the __file__ variable of the module.
Sample usage:
from webkitpy.common.system import logutils
_log = logutils.get_logger(__file__)
"""
# Since we assign to _scripts_dir and _webkitpy_dir in this function,
# we need to declare them global.
global _scripts_dir
global _webkitpy_dir
path = _normalize_path(path)
# Lazily evaluate _webkitpy_dir and _scripts_dir.
if not _scripts_dir:
# The normalized, absolute path to ...Scripts/webkitpy/__init__.
webkitpy_path = _normalize_path(webkitpy.__file__)
_webkitpy_dir = os.path.split(webkitpy_path)[0]
_scripts_dir = os.path.split(_webkitpy_dir)[0]
if path.startswith(_webkitpy_dir):
# Remove the initial Scripts directory portion, so the path
# starts with /webkitpy, for example "/webkitpy/init/logutils".
path = path[len(_scripts_dir):]
parts = []
while True:
(path, tail) = os.path.split(path)
if not tail:
break
parts.insert(0, tail)
logger_name = ".".join(parts) # For example, webkitpy.common.system.logutils.
else:
# The path is outside of webkitpy. Default to the basename
# without the extension.
basename = os.path.basename(path)
logger_name = os.path.splitext(basename)[0]
return logging.getLogger(logger_name)
def _default_handlers(stream, logging_level):
"""Return a list of the default logging handlers to use.
Args:
stream: See the configure_logging() docstring.
"""
# Create the filter.
def should_log(record):
"""Return whether a logging.LogRecord should be logged."""
if record.name.startswith("webkitpy.thirdparty"):
return False
return True
logging_filter = logging.Filter()
logging_filter.filter = should_log
# Create the handler.
handler = logging.StreamHandler(stream)
if logging_level == logging.DEBUG:
formatter = logging.Formatter("%(name)s: [%(levelname)s] %(message)s")
else:
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
handler.addFilter(logging_filter)
return [handler]
def configure_logging(logging_level=None, logger=None, stream=None,
handlers=None):
"""Configure logging for standard purposes.
Returns:
A list of references to the logging handlers added to the root
logger. This allows the caller to later remove the handlers
using logger.removeHandler. This is useful primarily during unit
testing where the caller may want to configure logging temporarily
and then undo the configuring.
Args:
logging_level: The minimum logging level to log. Defaults to
logging.INFO.
logger: A logging.logger instance to configure. This parameter
should be used only in unit tests. Defaults to the
root logger.
stream: A file-like object to which to log used in creating the default
handlers. The stream must define an "encoding" data attribute,
or else logging raises an error. Defaults to sys.stderr.
handlers: A list of logging.Handler instances to add to the logger
being configured. If this parameter is provided, then the
stream parameter is not used.
"""
# If the stream does not define an "encoding" data attribute, the
# logging module can throw an error like the following:
#
# Traceback (most recent call last):
# File "/System/Library/Frameworks/Python.framework/Versions/2.6/...
# lib/python2.6/logging/__init__.py", line 761, in emit
# self.stream.write(fs % msg.encode(self.stream.encoding))
# LookupError: unknown encoding: unknown
if logging_level is None:
logging_level = logging.INFO
if logger is None:
logger = logging.getLogger()
if stream is None:
stream = sys.stderr
if handlers is None:
handlers = _default_handlers(stream, logging_level)
logger.setLevel(logging_level)
for handler in handlers:
logger.addHandler(handler)
_log.debug("Debug logging enabled.")
return handlers
| bsd-3-clause |
SerCeMan/intellij-community | plugins/hg4idea/testData/bin/hgext/convert/cvsps.py | 91 | 31477 | # Mercurial built-in replacement for cvsps.
#
# Copyright 2008, Frank Kingswood <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import re
import cPickle as pickle
from mercurial import util
from mercurial.i18n import _
from mercurial import hook
from mercurial import util
class logentry(object):
'''Class logentry has the following attributes:
.author - author name as CVS knows it
.branch - name of branch this revision is on
.branches - revision tuple of branches starting at this revision
.comment - commit message
.commitid - CVS commitid or None
.date - the commit date as a (time, tz) tuple
.dead - true if file revision is dead
.file - Name of file
.lines - a tuple (+lines, -lines) or None
.parent - Previous revision of this entry
.rcs - name of file as returned from CVS
.revision - revision number as tuple
.tags - list of tags on the file
.synthetic - is this a synthetic "file ... added on ..." revision?
.mergepoint - the branch that has been merged from (if present in
rlog output) or None
.branchpoints - the branches that start at the current entry or empty
'''
def __init__(self, **entries):
self.synthetic = False
self.__dict__.update(entries)
def __repr__(self):
items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
return "%s(%s)"%(type(self).__name__, ", ".join(items))
class logerror(Exception):
pass
def getrepopath(cvspath):
"""Return the repository path from a CVS path.
>>> getrepopath('/foo/bar')
'/foo/bar'
>>> getrepopath('c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:10/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:10c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:[email protected]:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:[email protected]:c:/foo/bar')
'/foo/bar'
>>> getrepopath('user@server/path/to/repository')
'/path/to/repository'
"""
# According to CVS manual, CVS paths are expressed like:
# [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
#
# CVSpath is splitted into parts and then position of the first occurrence
# of the '/' char after the '@' is located. The solution is the rest of the
# string after that '/' sign including it
parts = cvspath.split(':')
atposition = parts[-1].find('@')
start = 0
if atposition != -1:
start = atposition
repopath = parts[-1][parts[-1].find('/', start):]
return repopath
def createlog(ui, directory=None, root="", rlog=True, cache=None):
'''Collect the CVS rlog'''
# Because we store many duplicate commit log messages, reusing strings
# saves a lot of memory and pickle storage space.
_scache = {}
def scache(s):
"return a shared version of a string"
return _scache.setdefault(s, s)
ui.status(_('collecting CVS rlog\n'))
log = [] # list of logentry objects containing the CVS state
# patterns to match in CVS (r)log output, by state of use
re_00 = re.compile('RCS file: (.+)$')
re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
re_02 = re.compile('cvs (r?log|server): (.+)\n$')
re_03 = re.compile("(Cannot access.+CVSROOT)|"
"(can't create temporary directory.+)$")
re_10 = re.compile('Working file: (.+)$')
re_20 = re.compile('symbolic names:')
re_30 = re.compile('\t(.+): ([\\d.]+)$')
re_31 = re.compile('----------------------------$')
re_32 = re.compile('======================================='
'======================================$')
re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
r'(\s+commitid:\s+([^;]+);)?'
r'(.*mergepoint:\s+([^;]+);)?')
re_70 = re.compile('branches: (.+);$')
file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
prefix = '' # leading path to strip of what we get from CVS
if directory is None:
# Current working directory
# Get the real directory in the repository
try:
prefix = open(os.path.join('CVS','Repository')).read().strip()
directory = prefix
if prefix == ".":
prefix = ""
except IOError:
raise logerror(_('not a CVS sandbox'))
if prefix and not prefix.endswith(os.sep):
prefix += os.sep
# Use the Root file in the sandbox, if it exists
try:
root = open(os.path.join('CVS','Root')).read().strip()
except IOError:
pass
if not root:
root = os.environ.get('CVSROOT', '')
# read log cache if one exists
oldlog = []
date = None
if cache:
cachedir = os.path.expanduser('~/.hg.cvsps')
if not os.path.exists(cachedir):
os.mkdir(cachedir)
# The cvsps cache pickle needs a uniquified name, based on the
# repository location. The address may have all sort of nasties
# in it, slashes, colons and such. So here we take just the
# alphanumeric characters, concatenated in a way that does not
# mix up the various components, so that
# :pserver:user@server:/path
# and
# /pserver/user/server/path
# are mapped to different cache file names.
cachefile = root.split(":") + [directory, "cache"]
cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
cachefile = os.path.join(cachedir,
'.'.join([s for s in cachefile if s]))
if cache == 'update':
try:
ui.note(_('reading cvs log cache %s\n') % cachefile)
oldlog = pickle.load(open(cachefile))
for e in oldlog:
if not (util.safehasattr(e, 'branchpoints') and
util.safehasattr(e, 'commitid') and
util.safehasattr(e, 'mergepoint')):
ui.status(_('ignoring old cache\n'))
oldlog = []
break
ui.note(_('cache has %d log entries\n') % len(oldlog))
except Exception, e:
ui.note(_('error reading cache: %r\n') % e)
if oldlog:
date = oldlog[-1].date # last commit date as a (time,tz) tuple
date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
# build the CVS commandline
cmd = ['cvs', '-q']
if root:
cmd.append('-d%s' % root)
p = util.normpath(getrepopath(root))
if not p.endswith('/'):
p += '/'
if prefix:
# looks like normpath replaces "" by "."
prefix = p + util.normpath(prefix)
else:
prefix = p
cmd.append(['log', 'rlog'][rlog])
if date:
# no space between option and date string
cmd.append('-d>%s' % date)
cmd.append(directory)
# state machine begins here
tags = {} # dictionary of revisions on current file with their tags
branchmap = {} # mapping between branch names and revision numbers
state = 0
store = False # set when a new record can be appended
cmd = [util.shellquote(arg) for arg in cmd]
ui.note(_("running %s\n") % (' '.join(cmd)))
ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
pfp = util.popen(' '.join(cmd))
peek = pfp.readline()
while True:
line = peek
if line == '':
break
peek = pfp.readline()
if line.endswith('\n'):
line = line[:-1]
#ui.debug('state=%d line=%r\n' % (state, line))
if state == 0:
# initial state, consume input until we see 'RCS file'
match = re_00.match(line)
if match:
rcs = match.group(1)
tags = {}
if rlog:
filename = util.normpath(rcs[:-2])
if filename.startswith(prefix):
filename = filename[len(prefix):]
if filename.startswith('/'):
filename = filename[1:]
if filename.startswith('Attic/'):
filename = filename[6:]
else:
filename = filename.replace('/Attic/', '/')
state = 2
continue
state = 1
continue
match = re_01.match(line)
if match:
raise logerror(match.group(1))
match = re_02.match(line)
if match:
raise logerror(match.group(2))
if re_03.match(line):
raise logerror(line)
elif state == 1:
# expect 'Working file' (only when using log instead of rlog)
match = re_10.match(line)
assert match, _('RCS file must be followed by working file')
filename = util.normpath(match.group(1))
state = 2
elif state == 2:
# expect 'symbolic names'
if re_20.match(line):
branchmap = {}
state = 3
elif state == 3:
# read the symbolic names and store as tags
match = re_30.match(line)
if match:
rev = [int(x) for x in match.group(2).split('.')]
# Convert magic branch number to an odd-numbered one
revn = len(rev)
if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
rev = rev[:-2] + rev[-1:]
rev = tuple(rev)
if rev not in tags:
tags[rev] = []
tags[rev].append(match.group(1))
branchmap[match.group(1)] = match.group(2)
elif re_31.match(line):
state = 5
elif re_32.match(line):
state = 0
elif state == 4:
# expecting '------' separator before first revision
if re_31.match(line):
state = 5
else:
assert not re_32.match(line), _('must have at least '
'some revisions')
elif state == 5:
# expecting revision number and possibly (ignored) lock indication
# we create the logentry here from values stored in states 0 to 4,
# as this state is re-entered for subsequent revisions of a file.
match = re_50.match(line)
assert match, _('expected revision number')
e = logentry(rcs=scache(rcs),
file=scache(filename),
revision=tuple([int(x) for x in
match.group(1).split('.')]),
branches=[],
parent=None,
commitid=None,
mergepoint=None,
branchpoints=set())
state = 6
elif state == 6:
# expecting date, author, state, lines changed
match = re_60.match(line)
assert match, _('revision must be followed by date line')
d = match.group(1)
if d[2] == '/':
# Y2K
d = '19' + d
if len(d.split()) != 3:
# cvs log dates always in GMT
d = d + ' UTC'
e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S'])
e.author = scache(match.group(2))
e.dead = match.group(3).lower() == 'dead'
if match.group(5):
if match.group(6):
e.lines = (int(match.group(5)), int(match.group(6)))
else:
e.lines = (int(match.group(5)), 0)
elif match.group(6):
e.lines = (0, int(match.group(6)))
else:
e.lines = None
if match.group(7): # cvs 1.12 commitid
e.commitid = match.group(8)
if match.group(9): # cvsnt mergepoint
myrev = match.group(10).split('.')
if len(myrev) == 2: # head
e.mergepoint = 'HEAD'
else:
myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
branches = [b for b in branchmap if branchmap[b] == myrev]
assert len(branches) == 1, ('unknown branch: %s'
% e.mergepoint)
e.mergepoint = branches[0]
e.comment = []
state = 7
elif state == 7:
# read the revision numbers of branches that start at this revision
# or store the commit log message otherwise
m = re_70.match(line)
if m:
e.branches = [tuple([int(y) for y in x.strip().split('.')])
for x in m.group(1).split(';')]
state = 8
elif re_31.match(line) and re_50.match(peek):
state = 5
store = True
elif re_32.match(line):
state = 0
store = True
else:
e.comment.append(line)
elif state == 8:
# store commit log message
if re_31.match(line):
cpeek = peek
if cpeek.endswith('\n'):
cpeek = cpeek[:-1]
if re_50.match(cpeek):
state = 5
store = True
else:
e.comment.append(line)
elif re_32.match(line):
state = 0
store = True
else:
e.comment.append(line)
# When a file is added on a branch B1, CVS creates a synthetic
# dead trunk revision 1.1 so that the branch has a root.
# Likewise, if you merge such a file to a later branch B2 (one
# that already existed when the file was added on B1), CVS
# creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
# these revisions now, but mark them synthetic so
# createchangeset() can take care of them.
if (store and
e.dead and
e.revision[-1] == 1 and # 1.1 or 1.1.x.1
len(e.comment) == 1 and
file_added_re.match(e.comment[0])):
ui.debug('found synthetic revision in %s: %r\n'
% (e.rcs, e.comment[0]))
e.synthetic = True
if store:
# clean up the results and save in the log.
store = False
e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
e.comment = scache('\n'.join(e.comment))
revn = len(e.revision)
if revn > 3 and (revn % 2) == 0:
e.branch = tags.get(e.revision[:-1], [None])[0]
else:
e.branch = None
# find the branches starting from this revision
branchpoints = set()
for branch, revision in branchmap.iteritems():
revparts = tuple([int(i) for i in revision.split('.')])
if len(revparts) < 2: # bad tags
continue
if revparts[-2] == 0 and revparts[-1] % 2 == 0:
# normal branch
if revparts[:-2] == e.revision:
branchpoints.add(branch)
elif revparts == (1, 1, 1): # vendor branch
if revparts in e.branches:
branchpoints.add(branch)
e.branchpoints = branchpoints
log.append(e)
if len(log) % 100 == 0:
ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
log.sort(key=lambda x: (x.rcs, x.revision))
# find parent revisions of individual files
versions = {}
for e in log:
branch = e.revision[:-1]
p = versions.get((e.rcs, branch), None)
if p is None:
p = e.revision[:-2]
e.parent = p
versions[(e.rcs, branch)] = e.revision
# update the log cache
if cache:
if log:
# join up the old and new logs
log.sort(key=lambda x: x.date)
if oldlog and oldlog[-1].date >= log[0].date:
raise logerror(_('log cache overlaps with new log entries,'
' re-run without cache.'))
log = oldlog + log
# write the new cachefile
ui.note(_('writing cvs log cache %s\n') % cachefile)
pickle.dump(log, open(cachefile, 'w'))
else:
log = oldlog
ui.status(_('%d log entries\n') % len(log))
hook.hook(ui, None, "cvslog", True, log=log)
return log
class changeset(object):
'''Class changeset has the following attributes:
.id - integer identifying this changeset (list index)
.author - author name as CVS knows it
.branch - name of branch this changeset is on, or None
.comment - commit message
.commitid - CVS commitid or None
.date - the commit date as a (time,tz) tuple
.entries - list of logentry objects in this changeset
.parents - list of one or two parent changesets
.tags - list of tags on this changeset
.synthetic - from synthetic revision "file ... added on branch ..."
.mergepoint- the branch that has been merged from or None
.branchpoints- the branches that start at the current entry or empty
'''
def __init__(self, **entries):
self.synthetic = False
self.__dict__.update(entries)
def __repr__(self):
items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
return "%s(%s)"%(type(self).__name__, ", ".join(items))
def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
'''Convert log into changesets.'''
ui.status(_('creating changesets\n'))
# try to order commitids by date
mindate = {}
for e in log:
if e.commitid:
mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
# Merge changesets
log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
x.author, x.branch, x.date, x.branchpoints))
changesets = []
files = set()
c = None
for i, e in enumerate(log):
# Check if log entry belongs to the current changeset or not.
# Since CVS is file-centric, two different file revisions with
# different branchpoints should be treated as belonging to two
# different changesets (and the ordering is important and not
# honoured by cvsps at this point).
#
# Consider the following case:
# foo 1.1 branchpoints: [MYBRANCH]
# bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
#
# Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
# later version of foo may be in MYBRANCH2, so foo should be the
# first changeset and bar the next and MYBRANCH and MYBRANCH2
# should both start off of the bar changeset. No provisions are
# made to ensure that this is, in fact, what happens.
if not (c and e.branchpoints == c.branchpoints and
(# cvs commitids
(e.commitid is not None and e.commitid == c.commitid) or
(# no commitids, use fuzzy commit detection
(e.commitid is None or c.commitid is None) and
e.comment == c.comment and
e.author == c.author and
e.branch == c.branch and
((c.date[0] + c.date[1]) <=
(e.date[0] + e.date[1]) <=
(c.date[0] + c.date[1]) + fuzz) and
e.file not in files))):
c = changeset(comment=e.comment, author=e.author,
branch=e.branch, date=e.date,
entries=[], mergepoint=e.mergepoint,
branchpoints=e.branchpoints, commitid=e.commitid)
changesets.append(c)
files = set()
if len(changesets) % 100 == 0:
t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
ui.status(util.ellipsis(t, 80) + '\n')
c.entries.append(e)
files.add(e.file)
c.date = e.date # changeset date is date of latest commit in it
# Mark synthetic changesets
for c in changesets:
# Synthetic revisions always get their own changeset, because
# the log message includes the filename. E.g. if you add file3
# and file4 on a branch, you get four log entries and three
# changesets:
# "File file3 was added on branch ..." (synthetic, 1 entry)
# "File file4 was added on branch ..." (synthetic, 1 entry)
# "Add file3 and file4 to fix ..." (real, 2 entries)
# Hence the check for 1 entry here.
c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
# Sort files in each changeset
def entitycompare(l, r):
'Mimic cvsps sorting order'
l = l.file.split('/')
r = r.file.split('/')
nl = len(l)
nr = len(r)
n = min(nl, nr)
for i in range(n):
if i + 1 == nl and nl < nr:
return -1
elif i + 1 == nr and nl > nr:
return +1
elif l[i] < r[i]:
return -1
elif l[i] > r[i]:
return +1
return 0
for c in changesets:
c.entries.sort(entitycompare)
# Sort changesets by date
def cscmp(l, r):
d = sum(l.date) - sum(r.date)
if d:
return d
# detect vendor branches and initial commits on a branch
le = {}
for e in l.entries:
le[e.rcs] = e.revision
re = {}
for e in r.entries:
re[e.rcs] = e.revision
d = 0
for e in l.entries:
if re.get(e.rcs, None) == e.parent:
assert not d
d = 1
break
for e in r.entries:
if le.get(e.rcs, None) == e.parent:
assert not d
d = -1
break
return d
changesets.sort(cscmp)
# Collect tags
globaltags = {}
for c in changesets:
for e in c.entries:
for tag in e.tags:
# remember which is the latest changeset to have this tag
globaltags[tag] = c
for c in changesets:
tags = set()
for e in c.entries:
tags.update(e.tags)
# remember tags only if this is the latest changeset to have it
c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
# Find parent changesets, handle {{mergetobranch BRANCHNAME}}
# by inserting dummy changesets with two parents, and handle
# {{mergefrombranch BRANCHNAME}} by setting two parents.
if mergeto is None:
mergeto = r'{{mergetobranch ([-\w]+)}}'
if mergeto:
mergeto = re.compile(mergeto)
if mergefrom is None:
mergefrom = r'{{mergefrombranch ([-\w]+)}}'
if mergefrom:
mergefrom = re.compile(mergefrom)
versions = {} # changeset index where we saw any particular file version
branches = {} # changeset index where we saw a branch
n = len(changesets)
i = 0
while i < n:
c = changesets[i]
for f in c.entries:
versions[(f.rcs, f.revision)] = i
p = None
if c.branch in branches:
p = branches[c.branch]
else:
# first changeset on a new branch
# the parent is a changeset with the branch in its
# branchpoints such that it is the latest possible
# commit without any intervening, unrelated commits.
for candidate in xrange(i):
if c.branch not in changesets[candidate].branchpoints:
if p is not None:
break
continue
p = candidate
c.parents = []
if p is not None:
p = changesets[p]
# Ensure no changeset has a synthetic changeset as a parent.
while p.synthetic:
assert len(p.parents) <= 1, \
_('synthetic changeset cannot have multiple parents')
if p.parents:
p = p.parents[0]
else:
p = None
break
if p is not None:
c.parents.append(p)
if c.mergepoint:
if c.mergepoint == 'HEAD':
c.mergepoint = None
c.parents.append(changesets[branches[c.mergepoint]])
if mergefrom:
m = mergefrom.search(c.comment)
if m:
m = m.group(1)
if m == 'HEAD':
m = None
try:
candidate = changesets[branches[m]]
except KeyError:
ui.warn(_("warning: CVS commit message references "
"non-existent branch %r:\n%s\n")
% (m, c.comment))
if m in branches and c.branch != m and not candidate.synthetic:
c.parents.append(candidate)
if mergeto:
m = mergeto.search(c.comment)
if m:
if m.groups():
m = m.group(1)
if m == 'HEAD':
m = None
else:
m = None # if no group found then merge to HEAD
if m in branches and c.branch != m:
# insert empty changeset for merge
cc = changeset(
author=c.author, branch=m, date=c.date,
comment='convert-repo: CVS merge from branch %s'
% c.branch,
entries=[], tags=[],
parents=[changesets[branches[m]], c])
changesets.insert(i + 1, cc)
branches[m] = i + 1
# adjust our loop counters now we have inserted a new entry
n += 1
i += 2
continue
branches[c.branch] = i
i += 1
# Drop synthetic changesets (safe now that we have ensured no other
# changesets can have them as parents).
i = 0
while i < len(changesets):
if changesets[i].synthetic:
del changesets[i]
else:
i += 1
# Number changesets
for i, c in enumerate(changesets):
c.id = i + 1
ui.status(_('%d changeset entries\n') % len(changesets))
hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
return changesets
def debugcvsps(ui, *args, **opts):
'''Read CVS rlog for current directory or named path in
repository, and convert the log to changesets based on matching
commit log entries and dates.
'''
if opts["new_cache"]:
cache = "write"
elif opts["update_cache"]:
cache = "update"
else:
cache = None
revisions = opts["revisions"]
try:
if args:
log = []
for d in args:
log += createlog(ui, d, root=opts["root"], cache=cache)
else:
log = createlog(ui, root=opts["root"], cache=cache)
except logerror, e:
ui.write("%r\n"%e)
return
changesets = createchangeset(ui, log, opts["fuzz"])
del log
# Print changesets (optionally filtered)
off = len(revisions)
branches = {} # latest version number in each branch
ancestors = {} # parent branch
for cs in changesets:
if opts["ancestors"]:
if cs.branch not in branches and cs.parents and cs.parents[0].id:
ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
cs.parents[0].id)
branches[cs.branch] = cs.id
# limit by branches
if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
continue
if not off:
# Note: trailing spaces on several lines here are needed to have
# bug-for-bug compatibility with cvsps.
ui.write('---------------------\n')
ui.write(('PatchSet %d \n' % cs.id))
ui.write(('Date: %s\n' % util.datestr(cs.date,
'%Y/%m/%d %H:%M:%S %1%2')))
ui.write(('Author: %s\n' % cs.author))
ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
','.join(cs.tags) or '(none)')))
if cs.branchpoints:
ui.write(('Branchpoints: %s \n') %
', '.join(sorted(cs.branchpoints)))
if opts["parents"] and cs.parents:
if len(cs.parents) > 1:
ui.write(('Parents: %s\n' %
(','.join([str(p.id) for p in cs.parents]))))
else:
ui.write(('Parent: %d\n' % cs.parents[0].id))
if opts["ancestors"]:
b = cs.branch
r = []
while b:
b, c = ancestors[b]
r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
if r:
ui.write(('Ancestors: %s\n' % (','.join(r))))
ui.write(('Log:\n'))
ui.write('%s\n\n' % cs.comment)
ui.write(('Members: \n'))
for f in cs.entries:
fn = f.file
if fn.startswith(opts["prefix"]):
fn = fn[len(opts["prefix"]):]
ui.write('\t%s:%s->%s%s \n' % (
fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
'.'.join([str(x) for x in f.revision]),
['', '(DEAD)'][f.dead]))
ui.write('\n')
# have we seen the start tag?
if revisions and off:
if revisions[0] == str(cs.id) or \
revisions[0] in cs.tags:
off = False
# see if we reached the end tag
if len(revisions) > 1 and not off:
if revisions[1] == str(cs.id) or \
revisions[1] in cs.tags:
break
| apache-2.0 |
scith/htpc-manager_ynh | sources/libs/cherrypy/test/test_routes.py | 22 | 2383 | import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import cherrypy
from cherrypy.test import helper
import nose
class RoutesDispatchTest(helper.CPWebCase):
def setup_server():
try:
import routes
except ImportError:
raise nose.SkipTest('Install routes to test RoutesDispatcher code')
class Dummy:
def index(self):
return "I said good day!"
class City:
def __init__(self, name):
self.name = name
self.population = 10000
def index(self, **kwargs):
return "Welcome to %s, pop. %s" % (self.name, self.population)
index._cp_config = {
'tools.response_headers.on': True,
'tools.response_headers.headers': [
('Content-Language', 'en-GB')
]
}
def update(self, **kwargs):
self.population = kwargs['pop']
return "OK"
d = cherrypy.dispatch.RoutesDispatcher()
d.connect(action='index', name='hounslow', route='/hounslow',
controller=City('Hounslow'))
d.connect(
name='surbiton', route='/surbiton', controller=City('Surbiton'),
action='index', conditions=dict(method=['GET']))
d.mapper.connect('/surbiton', controller='surbiton',
action='update', conditions=dict(method=['POST']))
d.connect('main', ':action', controller=Dummy())
conf = {'/': {'request.dispatch': d}}
cherrypy.tree.mount(root=None, config=conf)
setup_server = staticmethod(setup_server)
def test_Routes_Dispatch(self):
self.getPage("/hounslow")
self.assertStatus("200 OK")
self.assertBody("Welcome to Hounslow, pop. 10000")
self.getPage("/foo")
self.assertStatus("404 Not Found")
self.getPage("/surbiton")
self.assertStatus("200 OK")
self.assertBody("Welcome to Surbiton, pop. 10000")
self.getPage("/surbiton", method="POST", body="pop=1327")
self.assertStatus("200 OK")
self.assertBody("OK")
self.getPage("/surbiton")
self.assertStatus("200 OK")
self.assertHeader("Content-Language", "en-GB")
self.assertBody("Welcome to Surbiton, pop. 1327")
| gpl-3.0 |
QualiSystems/Ansible-Shell | package/cloudshell/cm/ansible/domain/temp_folder_scope.py | 1 | 1075 | import os
from .file_system_service import FileSystemService
from logging import Logger
class TempFolderScope(object):
def __init__(self, file_system, logger):
"""
:type file_system: FileSystemService
:type logger: Logger
"""
self.file_system = file_system
self.logger = logger
def __enter__(self):
"""
:rtype: str
"""
self.logger.info('Creating temp folder and making it the working dir...')
self.folder = self.file_system.create_temp_folder()
self.prev_working_dir = self.file_system.get_working_dir()
self.file_system.set_working_dir(self.folder)
self.logger.info('Done (folder: %s)' % self.folder)
return self.folder
def __exit__(self, exc_type, exc_val, exc_tb):
self.logger.info('Deleting temp folder and restoring the previous working dir...')
self.file_system.set_working_dir(self.prev_working_dir)
self.file_system.delete_temp_folder(self.folder)
self.logger.info('Done (folder: %s)' % self.folder) | apache-2.0 |
eamars/shadowsocks | tests/nose_plugin.py | 1072 | 1164 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nose
from nose.plugins.base import Plugin
class ExtensionPlugin(Plugin):
name = "ExtensionPlugin"
def options(self, parser, env):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
self.enabled = True
def wantFile(self, file):
return file.endswith('.py')
def wantDirectory(self, directory):
return True
def wantModule(self, file):
return True
if __name__ == '__main__':
nose.main(addplugins=[ExtensionPlugin()])
| apache-2.0 |
sargas/scipy | scipy/weave/examples/md5_speed.py | 3 | 2569 | """
Storing actual strings instead of their md5 value appears to
be about 10 times faster.
>>> md5_speed.run(200,50000)
md5 build(len,sec): 50000 0.870999932289
md5 retrv(len,sec): 50000 0.680999994278
std build(len,sec): 50000 0.259999990463
std retrv(len,sec): 50000 0.0599999427795
This test actually takes several minutes to generate the random
keys used to populate the dictionaries. Here is a smaller run,
but with longer keys.
>>> md5_speed.run(1000,4000)
md5 build(len,sec,per): 4000 0.129999995232 3.24999988079e-005
md5 retrv(len,sec,per): 4000 0.129999995232 3.24999988079e-005
std build(len,sec,per): 4000 0.0500000715256 1.25000178814e-005
std retrv(len,sec,per): 4000 0.00999999046326 2.49999761581e-006
Results are similar, though not statistically to good because of
the short times used and the available clock resolution.
Still, I think it is safe to say that, for speed, it is better
to store entire strings instead of using md5 versions of
their strings. Yeah, the expected result, but it never hurts
to check...
"""
from __future__ import absolute_import, print_function
import random, md5, time, cStringIO
def speed(n,m):
s = 'a'*n
t1 = time.time()
for i in range(m):
q= md5.new(s).digest()
t2 = time.time()
print((t2 - t1) / m)
#speed(50,1e6)
def generate_random(avg_length,count):
all_str = []
alphabet = 'abcdefghijklmnopqrstuvwxyz'
lo,hi = [30,avg_length*2+30]
for i in range(count):
new_str = cStringIO.StringIO()
l = random.randrange(lo,hi)
for i in range(l):
new_str.write(random.choice(alphabet))
all_str.append(new_str.getvalue())
return all_str
def md5_dict(lst):
catalog = {}
t1 = time.time()
for s in lst:
key= md5.new(s).digest()
catalog[key] = None
t2 = time.time()
print('md5 build(len,sec,per):', len(lst), t2 - t1, (t2-t1)/len(lst))
t1 = time.time()
for s in lst:
key= md5.new(s).digest()
val = catalog[key]
t2 = time.time()
print('md5 retrv(len,sec,per):', len(lst), t2 - t1, (t2-t1)/len(lst))
def std_dict(lst):
catalog = {}
t1 = time.time()
for s in lst:
catalog[s] = None
t2 = time.time()
print('std build(len,sec,per):', len(lst), t2 - t1, (t2-t1)/len(lst))
t1 = time.time()
for s in lst:
val = catalog[s]
t2 = time.time()
print('std retrv(len,sec,per):', len(lst), t2 - t1, (t2-t1)/len(lst))
def run(m=200,n=10):
lst = generate_random(m,n)
md5_dict(lst)
std_dict(lst)
run(2000,100)
| bsd-3-clause |
BellScurry/gem5-fault-injection | src/python/m5/main.py | 9 | 14422 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import code
import datetime
import os
import socket
import sys
__all__ = [ 'options', 'arguments', 'main' ]
usage="%prog [gem5 options] script.py [script options]"
version="%prog 2.0"
brief_copyright=\
"gem5 is copyrighted software; use the --copyright option for details."
def parse_options():
import config
from options import OptionParser
options = OptionParser(usage=usage, version=version,
description=brief_copyright)
option = options.add_option
group = options.set_group
# Help options
option('-B', "--build-info", action="store_true", default=False,
help="Show build information")
option('-C', "--copyright", action="store_true", default=False,
help="Show full copyright information")
option('-R', "--readme", action="store_true", default=False,
help="Show the readme")
# Options for configuring the base simulator
option('-d', "--outdir", metavar="DIR", default="m5out",
help="Set the output directory to DIR [Default: %default]")
option('-r', "--redirect-stdout", action="store_true", default=False,
help="Redirect stdout (& stderr, without -e) to file")
option('-e', "--redirect-stderr", action="store_true", default=False,
help="Redirect stderr to file")
option("--stdout-file", metavar="FILE", default="simout",
help="Filename for -r redirection [Default: %default]")
option("--stderr-file", metavar="FILE", default="simerr",
help="Filename for -e redirection [Default: %default]")
option('-i', "--interactive", action="store_true", default=False,
help="Invoke the interactive interpreter after running the script")
option("--pdb", action="store_true", default=False,
help="Invoke the python debugger before running the script")
option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
help="Prepend PATH to the system path when invoking the script")
option('-q', "--quiet", action="count", default=0,
help="Reduce verbosity")
option('-v', "--verbose", action="count", default=0,
help="Increase verbosity")
# Statistics options
group("Statistics Options")
option("--stats-file", metavar="FILE", default="stats.txt",
help="Sets the output file for statistics [Default: %default]")
# Configuration Options
group("Configuration Options")
option("--dump-config", metavar="FILE", default="config.ini",
help="Dump configuration output file [Default: %default]")
option("--json-config", metavar="FILE", default="config.json",
help="Create JSON output of the configuration [Default: %default]")
option("--dot-config", metavar="FILE", default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %default]")
option("--dot-dvfs-config", metavar="FILE", default=None,
help="Create DOT & pdf outputs of the DVFS configuration" + \
" [Default: %default]")
# Debugging options
group("Debugging Options")
option("--debug-break", metavar="TICK[,TICK]", action='append', split=',',
help="Create breakpoint(s) at TICK(s) " \
"(kills process if no debugger attached)")
option("--debug-help", action='store_true',
help="Print help on debug flags")
option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',',
help="Sets the flags for debug output (-FLAG disables a flag)")
option("--debug-start", metavar="TICK", type='int',
help="Start debug output at TICK")
option("--debug-end", metavar="TICK", type='int',
help="End debug output at TICK")
option("--debug-file", metavar="FILE", default="cout",
help="Sets the output file for debug [Default: %default]")
option("--debug-ignore", metavar="EXPR", action='append', split=':',
help="Ignore EXPR sim objects")
option("--remote-gdb-port", type='int', default=7000,
help="Remote gdb base port (set to 0 to disable listening)")
# Help options
group("Help Options")
option("--list-sim-objects", action='store_true', default=False,
help="List all built-in SimObjects, their params and default values")
# load the options.py config file to allow people to set their own
# default options
options_file = config.get('options.py')
if options_file:
scope = { 'options' : options }
execfile(options_file, scope)
arguments = options.parse_args()
return options,arguments
def interact(scope):
banner = "gem5 Interactive Console"
ipshell = None
prompt_in1 = "gem5 \\#> "
prompt_out = "gem5 \\#: "
# Is IPython version 0.10 or earlier available?
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(argv=["-prompt_in1", prompt_in1,
"-prompt_out", prompt_out],
banner=banner, user_ns=scope)
except ImportError:
pass
# Is IPython version 0.11 or later available?
if not ipshell:
try:
import IPython
from IPython.config.loader import Config
from IPython.terminal.embed import InteractiveShellEmbed
cfg = Config()
cfg.PromptManager.in_template = prompt_in1
cfg.PromptManager.out_template = prompt_out
ipshell = InteractiveShellEmbed(config=cfg, user_ns=scope,
banner1=banner)
except ImportError:
pass
if ipshell:
ipshell()
else:
# Use the Python shell in the standard library if IPython
# isn't available.
code.InteractiveConsole(scope).interact(banner)
def main(*args):
import m5
import core
import debug
import defines
import event
import info
import stats
import trace
from util import fatal
if len(args) == 0:
options, arguments = parse_options()
elif len(args) == 2:
options, arguments = args
else:
raise TypeError, "main() takes 0 or 2 arguments (%d given)" % len(args)
m5.options = options
def check_tracing():
if defines.TRACING_ON:
return
fatal("Tracing is not enabled. Compile with TRACING_ON")
# Set the main event queue for the main thread.
event.mainq = event.getEventQueue(0)
event.setEventQueue(event.mainq)
if not os.path.isdir(options.outdir):
os.makedirs(options.outdir)
# These filenames are used only if the redirect_std* options are set
stdout_file = os.path.join(options.outdir, options.stdout_file)
stderr_file = os.path.join(options.outdir, options.stderr_file)
# Print redirection notices here before doing any redirection
if options.redirect_stdout and not options.redirect_stderr:
print "Redirecting stdout and stderr to", stdout_file
else:
if options.redirect_stdout:
print "Redirecting stdout to", stdout_file
if options.redirect_stderr:
print "Redirecting stderr to", stderr_file
# Now redirect stdout/stderr as desired
if options.redirect_stdout:
redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stdout.fileno())
if not options.redirect_stderr:
os.dup2(redir_fd, sys.stderr.fileno())
if options.redirect_stderr:
redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stderr.fileno())
done = False
if options.build_info:
done = True
print 'Build information:'
print
print 'compiled %s' % defines.compileDate;
print 'build options:'
keys = defines.buildEnv.keys()
keys.sort()
for key in keys:
val = defines.buildEnv[key]
print ' %s = %s' % (key, val)
print
if options.copyright:
done = True
print info.COPYING
print
if options.readme:
done = True
print 'Readme:'
print
print info.README
print
if options.debug_help:
done = True
check_tracing()
debug.help()
if options.list_sim_objects:
import SimObject
done = True
print "SimObjects:"
objects = SimObject.allClasses.keys()
objects.sort()
for name in objects:
obj = SimObject.allClasses[name]
print " %s" % obj
params = obj._params.keys()
params.sort()
for pname in params:
param = obj._params[pname]
default = getattr(param, 'default', '')
print " %s" % pname
if default:
print " default: %s" % default
print " desc: %s" % param.desc
print
print
if done:
sys.exit(0)
# setting verbose and quiet at the same time doesn't make sense
if options.verbose > 0 and options.quiet > 0:
options.usage(2)
verbose = options.verbose - options.quiet
if verbose >= 0:
print "gem5 Simulator System. http://gem5.org"
print brief_copyright
print
print "gem5 compiled %s" % defines.compileDate;
print "gem5 started %s" % \
datetime.datetime.now().strftime("%b %e %Y %X")
print "gem5 executing on %s, pid %d" % \
(socket.gethostname(), os.getpid())
# in Python 3 pipes.quote() is moved to shlex.quote()
import pipes
print "command line:", " ".join(map(pipes.quote, sys.argv))
print
# check to make sure we can find the listed script
if not arguments or not os.path.isfile(arguments[0]):
if arguments and not os.path.isfile(arguments[0]):
print "Script %s not found" % arguments[0]
options.usage(2)
# tell C++ about output directory
core.setOutputDir(options.outdir)
# update the system path with elements from the -p option
sys.path[0:0] = options.path
# set stats options
stats.initText(options.stats_file)
# set debugging options
debug.setRemoteGDBPort(options.remote_gdb_port)
for when in options.debug_break:
debug.schedBreak(int(when))
if options.debug_flags:
check_tracing()
on_flags = []
off_flags = []
for flag in options.debug_flags:
off = False
if flag.startswith('-'):
flag = flag[1:]
off = True
if flag not in debug.flags:
print >>sys.stderr, "invalid debug flag '%s'" % flag
sys.exit(1)
if off:
debug.flags[flag].disable()
else:
debug.flags[flag].enable()
if options.debug_start:
check_tracing()
e = event.create(trace.enable, event.Event.Debug_Enable_Pri)
event.mainq.schedule(e, options.debug_start)
else:
trace.enable()
if options.debug_end:
check_tracing()
e = event.create(trace.disable, event.Event.Debug_Enable_Pri)
event.mainq.schedule(e, options.debug_end)
trace.output(options.debug_file)
for ignore in options.debug_ignore:
check_tracing()
trace.ignore(ignore)
sys.argv = arguments
sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path
filename = sys.argv[0]
filedata = file(filename, 'r').read()
filecode = compile(filedata, filename, 'exec')
scope = { '__file__' : filename,
'__name__' : '__m5_main__' }
# we want readline if we're doing anything interactive
if options.interactive or options.pdb:
exec "import readline" in scope
# if pdb was requested, execfile the thing under pdb, otherwise,
# just do the execfile normally
if options.pdb:
import pdb
import traceback
pdb = pdb.Pdb()
try:
pdb.run(filecode, scope)
except SystemExit:
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
else:
exec filecode in scope
# once the script is done
if options.interactive:
interact(scope)
if __name__ == '__main__':
from pprint import pprint
options, arguments = parse_options()
print 'opts:'
pprint(options, indent=4)
print
print 'args:'
pprint(arguments, indent=4)
| bsd-3-clause |
napkindrawing/ansible | lib/ansible/plugins/callback/logstash.py | 22 | 6893 | # (C) 2016, Ievgen Khmelenko <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import socket
import uuid
import logging
try:
import logstash
HAS_LOGSTASH = True
except ImportError:
HAS_LOGSTASH = False
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
ansible logstash callback plugin
ansible.cfg:
callback_plugins = <path_to_callback_plugins_folder>
callback_whitelist = logstash
and put the plugin in <path_to_callback_plugins_folder>
logstash config:
input {
tcp {
port => 5000
codec => json
}
}
Requires:
python-logstash
This plugin makes use of the following environment variables:
LOGSTASH_SERVER (optional): defaults to localhost
LOGSTASH_PORT (optional): defaults to 5000
LOGSTASH_TYPE (optional): defaults to ansible
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'logstash'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
if not HAS_LOGSTASH:
self.disabled = True
self._display.warning("The required python-logstash is not installed. "
"pip install python-logstash")
else:
self.logger = logging.getLogger('python-logstash-logger')
self.logger.setLevel(logging.DEBUG)
self.handler = logstash.TCPLogstashHandler(
os.getenv('LOGSTASH_SERVER', 'localhost'),
int(os.getenv('LOGSTASH_PORT', 5000)),
version=1,
message_type=os.getenv('LOGSTASH_TYPE', 'ansible')
)
self.logger.addHandler(self.handler)
self.hostname = socket.gethostname()
self.session = str(uuid.uuid1())
self.errors = 0
def v2_playbook_on_start(self, playbook):
self.playbook = playbook._file_name
data = {
'status': "OK",
'host': self.hostname,
'session': self.session,
'ansible_type': "start",
'ansible_playbook': self.playbook,
}
self.logger.info("ansible start", extra=data)
def v2_playbook_on_stats(self, stats):
summarize_stat = {}
for host in stats.processed.keys():
summarize_stat[host] = stats.summarize(host)
if self.errors == 0:
status = "OK"
else:
status = "FAILED"
data = {
'status': status,
'host': self.hostname,
'session': self.session,
'ansible_type': "finish",
'ansible_playbook': self.playbook,
'ansible_result': json.dumps(summarize_stat),
}
self.logger.info("ansible stats", extra=data)
def v2_runner_on_ok(self, result, **kwargs):
data = {
'status': "OK",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.logger.info("ansible ok", extra=data)
def v2_runner_on_skipped(self, result, **kwargs):
data = {
'status': "SKIPPED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_task': result._task,
'ansible_host': result._host.name
}
self.logger.info("ansible skipped", extra=data)
def v2_playbook_on_import_for_host(self, result, imported_file):
data = {
'status': "IMPORTED",
'host': self.hostname,
'session': self.session,
'ansible_type': "import",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'imported_file': imported_file
}
self.logger.info("ansible import", extra=data)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
data = {
'status': "NOT IMPORTED",
'host': self.hostname,
'session': self.session,
'ansible_type': "import",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'missing_file': missing_file
}
self.logger.info("ansible import", extra=data)
def v2_runner_on_failed(self, result, **kwargs):
data = {
'status': "FAILED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.errors += 1
self.logger.error("ansible failed", extra=data)
def v2_runner_on_unreachable(self, result, **kwargs):
data = {
'status': "UNREACHABLE",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.logger.error("ansible unreachable", extra=data)
def v2_runner_on_async_failed(self, result, **kwargs):
data = {
'status': "FAILED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.errors += 1
self.logger.error("ansible async", extra=data)
| gpl-3.0 |
zuowang/incubator-hawq | tools/bin/gppylib/commands/dca.py | 54 | 1705 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2011. All Rights Reserved.
#
"""
Module for commands that are DCA specific
"""
import os
from gppylib.gplog import get_default_logger
from base import Command, LOCAL, REMOTE
logger = get_default_logger()
# NOTE THIS IS A CHECK FOR 1040 or later appliance
def is_dca_appliance():
try:
if os.path.isfile('/opt/dca/bin/dca_gpdb_initialized'):
return True
except:
pass
return False
#-----------------------------------------------
class DcaGpdbInitialized(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
self.cmdStr="/opt/dca/bin/dca_gpdb_initialized"
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
@staticmethod
def local():
try:
cmd=DcaGpdbInitialized('dcainit')
cmd.run(validateAfter=True)
except Exception, e:
logger.error(e.__str__())
logger.error("Exception running dca initialization")
except:
logger.error("Exception running dca initialization")
#-----------------------------------------------
class DcaGpdbStopped(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
self.cmdStr="/opt/dca/bin/dca_gpdb_stopped"
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
@staticmethod
def local():
try:
cmd=DcaGpdbStopped('dcastop')
cmd.run(validateAfter=True)
except Exception, e:
logger.error(e.__str__())
logger.error("Exception running dca de-initialization")
except:
logger.error("Exception running dca de-initialization")
| apache-2.0 |
bksim/chinese-translation | nltk/metrics/scores.py | 5 | 7814 | # Natural Language Toolkit: Evaluation
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
from itertools import izip
from math import fabs
import operator
from random import shuffle
try:
from scipy.stats.stats import betai
except ImportError:
betai = None
from nltk.util import LazyConcatenation, LazyMap
def accuracy(reference, test):
"""
Given a list of reference values and a corresponding list of test
values, return the fraction of corresponding values that are
equal. In particular, return the fraction of indices
``0<i<=len(test)`` such that ``test[i] == reference[i]``.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the corresponding
reference values.
:raise ValueError: If ``reference`` and ``length`` do not have the
same length.
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
return float(sum(x == y for x, y in izip(reference, test))) / len(test)
def precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of test values that appear in the reference set.
In particular, return card(``reference`` intersection ``test``)/card(``test``).
If ``test`` is empty, then return None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
if (not hasattr(reference, 'intersection') or
not hasattr(test, 'intersection')):
raise TypeError('reference and test should be sets')
if len(test) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(test)
def recall(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of reference values that appear in the test set.
In particular, return card(``reference`` intersection ``test``)/card(``reference``).
If ``reference`` is empty, then return None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
if (not hasattr(reference, 'intersection') or
not hasattr(test, 'intersection')):
raise TypeError('reference and test should be sets')
if len(reference) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(reference)
def f_measure(reference, test, alpha=0.5):
"""
Given a set of reference values and a set of test values, return
the f-measure of the test values, when compared against the
reference values. The f-measure is the harmonic mean of the
``precision`` and ``recall``, weighted by ``alpha``. In particular,
given the precision *p* and recall *r* defined by:
- *p* = card(``reference`` intersection ``test``)/card(``test``)
- *r* = card(``reference`` intersection ``test``)/card(``reference``)
The f-measure is:
- *1/(alpha/p + (1-alpha)/r)*
If either ``reference`` or ``test`` is empty, then ``f_measure``
returns None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
p = precision(reference, test)
r = recall(reference, test)
if p is None or r is None:
return None
if p == 0 or r == 0:
return 0
return 1.0/(alpha/p + (1-alpha)/r)
def log_likelihood(reference, test):
"""
Given a list of reference values and a corresponding list of test
probability distributions, return the average log likelihood of
the reference values, given the probability distributions.
:param reference: A list of reference values
:type reference: list
:param test: A list of probability distributions over values to
compare against the corresponding reference values.
:type test: list(ProbDistI)
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
# Return the average value of dist.logprob(val).
total_likelihood = sum(dist.logprob(val)
for (val, dist) in izip(reference, test))
return total_likelihood/len(reference)
def approxrand(a, b, **kwargs):
"""
Returns an approximate significance level between two lists of
independently generated test values.
Approximate randomization calculates significance by randomly drawing
from a sample of the possible permutations. At the limit of the number
of possible permutations, the significance level is exact. The
approximate significance level is the sample mean number of times the
statistic of the permutated lists varies from the actual statistic of
the unpermuted argument lists.
:return: a tuple containing an approximate significance level, the count
of the number of times the pseudo-statistic varied from the
actual statistic, and the number of shuffles
:rtype: tuple
:param a: a list of test values
:type a: list
:param b: another list of independently generated test values
:type b: list
"""
shuffles = kwargs.get('shuffles', 999)
# there's no point in trying to shuffle beyond all possible permutations
shuffles = \
min(shuffles, reduce(operator.mul, xrange(1, len(a) + len(b) + 1)))
stat = kwargs.get('statistic', lambda lst: float(sum(lst)) / len(lst))
verbose = kwargs.get('verbose', False)
if verbose:
print('shuffles: %d' % shuffles)
actual_stat = fabs(stat(a) - stat(b))
if verbose:
print('actual statistic: %f' % actual_stat)
print('-' * 60)
c = 1e-100
lst = LazyConcatenation([a, b])
indices = range(len(a) + len(b))
for i in range(shuffles):
if verbose and i % 10 == 0:
print('shuffle: %d' % i)
shuffle(indices)
pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[:len(a)]))
pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a):]))
pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b)
if pseudo_stat >= actual_stat:
c += 1
if verbose and i % 10 == 0:
print('pseudo-statistic: %f' % pseudo_stat)
print('significance: %f' % (float(c + 1) / (i + 1)))
print('-' * 60)
significance = float(c + 1) / (shuffles + 1)
if verbose:
print('significance: %f' % significance)
if betai:
for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:
print("prob(phi<=%f): %f" % (phi, betai(c, shuffles, phi)))
return (significance, c, shuffles)
def demo():
print('-'*75)
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print('Reference =', reference)
print('Test =', test)
print('Accuracy:', accuracy(reference, test))
print('-'*75)
reference_set = set(reference)
test_set = set(test)
print('Reference =', reference_set)
print('Test = ', test_set)
print('Precision:', precision(reference_set, test_set))
print(' Recall:', recall(reference_set, test_set))
print('F-Measure:', f_measure(reference_set, test_set))
print('-'*75)
if __name__ == '__main__':
demo()
| apache-2.0 |
damdam-s/OpenUpgrade | addons/mass_mailing/tests/test_mail.py | 388 | 1221 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
class test_message_compose(TestMail):
def test_OO_mail_mail_tracking(self):
""" Tests designed for mail_mail tracking (opened, replied, bounced) """
pass
| agpl-3.0 |
youprofit/zato | code/zato-common/src/zato/common/util.py | 6 | 44974 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import copy, errno, gc, inspect, json, linecache, logging, os, random, re, signal, string, threading, traceback, sys
from ast import literal_eval
from contextlib import closing
from cStringIO import StringIO
from datetime import datetime
from glob import glob
from hashlib import sha256
from importlib import import_module
from itertools import ifilter, izip, izip_longest, tee
from operator import itemgetter
from os import getuid
from os.path import abspath, isabs, join
from pprint import pprint as _pprint, PrettyPrinter
from pwd import getpwuid
from random import getrandbits
from socket import gethostname, getfqdn
from string import Template
from tempfile import NamedTemporaryFile
from threading import current_thread
from traceback import format_exc
from urlparse import urlparse
# alembic
from alembic import op
# anyjson
from anyjson import dumps, loads
# base32_crockford
from base32_crockford import encode as b32_crockford_encode
# Bunch
from bunch import Bunch, bunchify
# ConfigObj
from configobj import ConfigObj
# dateutil
from dateutil.parser import parse
# gevent
from gevent.greenlet import Greenlet
from gevent.hub import Hub
# lxml
from lxml import etree, objectify
# OpenSSL
from OpenSSL import crypto
# Paste
from paste.util.converters import asbool
# pip
from pip.download import is_archive_file, unpack_file_url
# portalocker
import portalocker
# psutil
import psutil
# pytz
import pytz
# requests
import requests
# Spring Python
from springpython.context import ApplicationContext
from springpython.remoting.http import CAValidatingHTTPSConnection
from springpython.remoting.xmlrpc import SSLClientTransport
# SQLAlchemy
import sqlalchemy as sa
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy import orm
# Texttable
from texttable import Texttable
# validate
from validate import is_boolean, is_integer, VdtTypeError
# Zato
from zato.common import CHANNEL, DATA_FORMAT, engine_def, engine_def_sqlite, KVDB, MISC, SECRET_SHADOW, SIMPLE_IO, soap_body_path, \
soap_body_xpath, TLS, TRACE1, ZatoException, ZATO_NOT_GIVEN
from zato.common.broker_message import SERVICE
from zato.common.crypto import CryptoManager
from zato.common.odb.model import HTTPBasicAuth, HTTPSOAP, IntervalBasedJob, Job, Server, Service
from zato.common.odb.query import _service as _service
logger = logging.getLogger(__name__)
logging.addLevelName(TRACE1, "TRACE1")
_repr_template = Template('<$class_name at $mem_loc$attrs>')
_uncamelify_re = re.compile(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))')
_epoch = datetime.utcfromtimestamp(0) # Start of UNIX epoch
random.seed()
# ################################################################################################################################
TLS_KEY_TYPE = {
crypto.TYPE_DSA: 'DSA',
crypto.TYPE_RSA: 'RSA'
}
# ################################################################################################################################
def absolutize_path(base, path):
""" Turns a path into an absolute path if it's relative to the base
location. If the path is already an absolute path, it is returned as-is.
"""
if isabs(path):
return path
return abspath(join(base, path))
def current_host():
return gethostname() + '/' + getfqdn()
def pprint(obj):
""" Pretty-print an object into a string buffer.
"""
# Get dicts' items.
if hasattr(obj, "items"):
obj = sorted(obj.items())
buf = StringIO()
_pprint(obj, buf)
value = buf.getvalue()
buf.close()
return value
def encrypt(data, priv_key, b64=True):
""" Encrypt data using a public key derived from the private key.
data - data to be encrypted
priv_key - private key to use (as a PEM string)
b64 - should the encrypted data be BASE64-encoded before being returned, defaults to True
"""
cm = CryptoManager(priv_key=priv_key)
cm.load_keys()
return cm.encrypt(data, b64)
def decrypt(data, priv_key, b64=True):
""" Decrypts data using the given private key.
data - data to be encrypted
priv_key - private key to use (as a PEM string)
b64 - should the data be BASE64-decoded before being decrypted, defaults to True
"""
cm = CryptoManager(priv_key=priv_key)
cm.load_keys()
return cm.decrypt(data, b64)
def get_executable():
""" Returns the wrapper buildout uses for executing Zato commands. This has
all the dependencies added to PYTHONPATH.
"""
return os.path.join(os.path.dirname(sys.executable), 'py')
def get_zato_command():
""" Returns the full path to the 'zato' command' in a buildout environment.
"""
return os.path.join(os.path.dirname(sys.executable), 'zato')
# Based on
# http://stackoverflow.com/questions/384076/how-can-i-make-the-python-logging-output-to-be-colored
class ColorFormatter(logging.Formatter):
# TODO: Make it all configurable
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED,
'TRACE1': YELLOW
}
def __init__(self, fmt):
self.use_color = True
msg = self.formatter_msg(fmt, self.use_color)
logging.Formatter.__init__(self, msg)
def formatter_msg(self, msg, use_color=True):
if use_color:
msg = msg.replace("$RESET", self.RESET_SEQ).replace("$BOLD", self.BOLD_SEQ)
else:
msg = msg.replace("$RESET", "").replace("$BOLD", "")
return msg
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in self.COLORS:
fore_color = 30 + self.COLORS[levelname]
levelname_color = self.COLOR_SEQ % fore_color + levelname + self.RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
def object_attrs(_object, ignore_double_underscore, to_avoid_list, sort):
attrs = dir(_object)
if ignore_double_underscore:
attrs = ifilter(lambda elem: not elem.startswith("__"), attrs)
_to_avoid_list = getattr(_object, to_avoid_list, None) # Don't swallow exceptions
if _to_avoid_list is not None:
attrs = ifilter(lambda elem: not elem in _to_avoid_list, attrs)
if sort:
attrs = sorted(attrs)
return attrs
def make_repr(_object, ignore_double_underscore=True, to_avoid_list='repr_to_avoid', sort=True):
""" Makes a nice string representation of an object, suitable for logging purposes.
"""
attrs = object_attrs(_object, ignore_double_underscore, to_avoid_list, sort)
buff = StringIO()
for attr in attrs:
attr_obj = getattr(_object, attr)
if not callable(attr_obj):
buff.write(' ')
buff.write('%s:`%r`' % (attr, attr_obj))
out = _repr_template.safe_substitute(
class_name=_object.__class__.__name__, mem_loc=hex(id(_object)), attrs=buff.getvalue())
buff.close()
return out
def to_form(_object):
""" Reads public attributes of an object and creates a dictionary out of it;
handy for providing initial data to a Django form which isn't backed by
a true Django model.
"""
out = {}
attrs = object_attrs(_object, True, "repr_to_avoid", False)
for attr in attrs:
out[attr] = getattr(_object, attr)
return out
def get_lb_client(lb_host, lb_agent_port, ssl_ca_certs, ssl_key_file, ssl_cert_file, timeout):
""" Returns an SSL XML-RPC client to the load-balancer.
"""
from zato.agent.load_balancer.client import LoadBalancerAgentClient
agent_uri = "https://{host}:{port}/RPC2".format(host=lb_host, port=lb_agent_port)
# See the 'Problems with XML-RPC over SSL' thread for details
# https://lists.springsource.com/archives/springpython-users/2011-June/000480.html
if sys.version_info >= (2, 7):
class Python27CompatTransport(SSLClientTransport):
def make_connection(self, host):
return CAValidatingHTTPSConnection(
host, strict=self.strict, ca_certs=self.ca_certs,
keyfile=self.keyfile, certfile=self.certfile, cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version, timeout=self.timeout)
transport = Python27CompatTransport
else:
transport = None
return LoadBalancerAgentClient(
agent_uri, ssl_ca_certs, ssl_key_file, ssl_cert_file, transport=transport, timeout=timeout)
def tech_account_password(password_clear, salt):
return sha256(password_clear+ ':' + salt).hexdigest()
def new_cid():
""" Returns a new 128-bit correlation identifier. It's *not* safe to use the ID
for any cryptographical purposes, it's only meant to be used as a conveniently
formatted ticket attached to each of the requests processed by Zato servers.
Changed in 2.0: The number is now 28 characters long not 40, like in previous versions.
"""
# The number below (27) needs to be kept in sync with zato.common.log_message.CID_LENGTH.
# There is nothing special in the 'K' prefix, it's just so that a CID always
# begins with a letter and 'K' seems like something
# that can't be taken for some other ASCII letter (e.g. is it Z or 2 etc.)
return 'K{0:0>27}'.format(b32_crockford_encode(getrandbits(127) + (1 << 127)))
def get_config(repo_location, config_name, bunchified=True):
""" Returns the configuration object. Will load additional user-defined config files,
if any are available at all.
"""
conf = ConfigObj(os.path.join(repo_location, config_name))
conf = bunchify(conf) if bunchified else conf
conf.user_config_items = {}
# user_config is new in 2.0
user_config = conf.get('user_config')
if user_config:
for name, path in user_config.items():
if not isabs(path):
path = os.path.expanduser(path)
if not isabs(path):
path = os.path.normpath(os.path.join(repo_location, path))
if not os.path.exists(path):
logger.warn('User config not found `%s`, name:`%s`', path, name)
else:
user_conf = ConfigObj(path)
user_conf = bunchify(user_conf) if bunchified else user_conf
conf.user_config_items[name] = user_conf
return conf
def _get_ioc_config(location, config_class):
""" Instantiates an Inversion of Control container from the given location
if the location exists at all.
"""
stat = os.stat(location)
if stat.st_size:
config = config_class(location)
else:
config = None
return config
def get_app_context(config):
""" Returns the Zato's Inversion of Control application context.
"""
ctx_class_path = config['spring']['context_class']
ctx_class_path = ctx_class_path.split('.')
mod_name, class_name = '.'.join(ctx_class_path[:-1]), ctx_class_path[-1:][0]
mod = import_module(mod_name)
class_ = getattr(mod, class_name)()
return ApplicationContext(class_)
def get_crypto_manager(repo_location, app_context, config, load_keys=True):
""" Returns a tool for crypto manipulations.
"""
crypto_manager = app_context.get_object('crypto_manager')
priv_key_location = config['crypto']['priv_key_location']
cert_location = config['crypto']['cert_location']
ca_certs_location = config['crypto']['ca_certs_location']
priv_key_location = absolutize_path(repo_location, priv_key_location)
cert_location = absolutize_path(repo_location, cert_location)
ca_certs_location = absolutize_path(repo_location, ca_certs_location)
crypto_manager.priv_key_location = priv_key_location
crypto_manager.cert_location = cert_location
crypto_manager.ca_certs_location = ca_certs_location
if load_keys:
crypto_manager.load_keys()
return crypto_manager
def get_current_user():
return getpwuid(getuid()).pw_name
def service_name_from_impl(impl_name):
""" Turns a Zato internal service's implementation name into a shorter
service name
"""
return impl_name.replace('server.service.internal.', '')
def deployment_info(method, object_, timestamp, fs_location, remote_host='', remote_user=''):
""" Returns a JSON document containing information who deployed a service
onto a server, where from and when it was.
"""
return {
'method': method,
'object': object_,
'timestamp': timestamp,
'fs_location':fs_location,
'remote_host': remote_host,
'remote_user': remote_user,
'current_host': current_host(),
'current_user': get_current_user(),
}
def get_body_payload(body):
body_children_count = body[0].countchildren()
if body_children_count == 0:
body_payload = None
elif body_children_count == 1:
body_payload = body[0].getchildren()[0]
else:
body_payload = body[0].getchildren()
return body_payload
def payload_from_request(cid, request, data_format, transport):
""" Converts a raw request to a payload suitable for usage with SimpleIO.
"""
if request is not None:
if data_format == DATA_FORMAT.XML:
if transport == 'soap':
if isinstance(request, objectify.ObjectifiedElement):
soap = request
else:
soap = objectify.fromstring(request)
body = soap_body_xpath(soap)
if not body:
raise ZatoException(cid, 'Client did not send the [{}] element'.format(soap_body_path))
payload = get_body_payload(body)
else:
if isinstance(request, objectify.ObjectifiedElement):
payload = request
else:
payload = objectify.fromstring(request)
elif data_format in(DATA_FORMAT.DICT, DATA_FORMAT.JSON):
if not request:
return ''
if isinstance(request, basestring) and data_format == DATA_FORMAT.JSON:
payload = loads(request)
else:
payload = request
else:
payload = request
else:
payload = request
return payload
def is_python_file(name):
""" Is it a Python file we can import Zato services from?
"""
for suffix in('py', 'pyw'):
if name.endswith(suffix):
return True
def fs_safe_name(value):
return re.sub('[{}]'.format(string.punctuation + string.whitespace), '_', value)
def fs_safe_now():
""" Returns a UTC timestamp with any characters unsafe for filesystem names
removed.
"""
return fs_safe_name(str(datetime.utcnow()))
class _DummyLink(object):
""" A dummy class for staying consistent with pip's API in certain places
below.
"""
def __init__(self, url):
self.url = url
def decompress(archive, dir_name):
""" Decompresses an archive into a directory, the directory must already exist.
"""
unpack_file_url(_DummyLink('file:' + archive), dir_name)
def visit_py_source(dir_name):
for pattern in('*.py', '*.pyw'):
glob_path = os.path.join(dir_name, pattern)
for py_path in sorted(glob(glob_path)):
yield py_path
def _os_remove(path):
""" A helper function so it's easier to mock it in unittests.
"""
return os.remove(path)
def hot_deploy(parallel_server, file_name, path, delete_path=True, notify=True):
""" Hot-deploys a package if it looks like a Python module or archive.
"""
if is_python_file(file_name) or is_archive_file(file_name):
logger.debug('About to hot-deploy [{}]'.format(path))
now = datetime.utcnow()
di = dumps(deployment_info('hot-deploy', file_name, now.isoformat(), path))
# Insert the package into the DB ..
package_id = parallel_server.odb.hot_deploy(
now, di, file_name, open(path, 'rb').read(), parallel_server.id)
# .. and optionally notify all the servers they're to pick up a delivery
if notify:
parallel_server.notify_new_package(package_id)
if delete_path:
_os_remove(path)
return package_id
else:
logger.warn('Ignoring {}'.format(path))
# As taken from http://wiki.python.org/moin/SortingListsOfDictionaries
def multikeysort(items, columns):
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
# From http://docs.python.org/release/2.7/library/itertools.html#recipes
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def translation_name(system1, key1, value1, system2, key2):
return KVDB.SEPARATOR.join((KVDB.TRANSLATION, system1, key1, value1, system2, key2))
def dict_item_name(system, key, value):
return KVDB.SEPARATOR.join((system, key, value))
# From http://docs.python.org/release/2.7/library/itertools.html#recipes
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def from_local_to_utc(dt, tz_name, dayfirst=True):
""" What is the UTC time given the local time and the timezone's name?
"""
if not isinstance(dt, datetime):
dt = parse(dt, dayfirst=dayfirst)
dt = pytz.timezone(tz_name).localize(dt)
utc_dt = pytz.utc.normalize(dt.astimezone(pytz.utc))
return utc_dt
def from_utc_to_local(dt, tz_name):
""" What is the local time in the user-provided time zone name?
"""
if not isinstance(dt, datetime):
dt = parse(dt)
local_tz = pytz.timezone(tz_name)
dt = local_tz.normalize(dt.astimezone(local_tz))
return dt
# ##############################################################################
def _utcnow():
""" See zato.common.util.utcnow for docstring.
"""
return datetime.utcnow()
def utcnow():
""" A thin wrapper around datetime.utcnow added so that tests can mock it
out and return their own timestamps at will.
"""
return _utcnow()
def _now(tz):
""" See zato.common.util.utcnow for docstring.
"""
return datetime.now(tz)
def now(tz=None):
""" A thin wrapper around datetime.now added so that tests can mock it
out and return their own timestamps at will.
"""
return _now(tz)
def datetime_to_seconds(dt):
""" Converts a datetime object to a number of seconds since UNIX epoch.
"""
return (dt - _epoch).total_seconds()
# ##############################################################################
def clear_locks(kvdb, server_token, kvdb_config=None, decrypt_func=None):
""" Clears out any KVDB locks held by Zato servers.
"""
if kvdb_config:
kvdb.config = kvdb_config
if decrypt_func:
kvdb.decrypt_func = decrypt_func
kvdb.init()
for name in kvdb.conn.keys('{}*{}*'.format(KVDB.LOCK_PREFIX, server_token)):
value = kvdb.conn.get(name)
logger.debug('Deleting lock:[{}], value:[{}]'.format(name, value))
kvdb.conn.delete(name)
kvdb.close()
# Inspired by http://stackoverflow.com/a/9283563
def uncamelify(s, separator='-', elem_func=unicode.lower):
""" Converts a CamelCaseName into a more readable one, e.g.
will turn ILikeToReadWSDLDocsNotReallyNOPENotMeQ into
i-like-to-read-wsdl-docs-not-really-nope-not-me-q or a similar one,
depending on the value of separator and elem_func.
"""
return separator.join(elem_func(elem) for elem in re.sub(_uncamelify_re, r' \1', s).split())
def get_component_name(prefix='parallel'):
""" Returns a name of the component issuing a given request so it's possible
to trace which Zato component issued it.
"""
return '{}/{}/{}/{}'.format(prefix, current_host(), os.getpid(), current_thread().name)
def dotted_getattr(o, path):
return reduce(getattr, path.split('.'), o)
def get_service_by_name(session, cluster_id, name):
logger.debug('Looking for name:[{}] in cluster_id:[{}]'.format(name, cluster_id))
return _service(session, cluster_id).\
filter(Service.name==name).\
one()
def add_startup_jobs(cluster_id, odb, stats_jobs):
""" Adds one of the interval jobs to the ODB. Note that it isn't being added
directly to the scheduler because we want users to be able to fine-tune the job's
settings.
"""
with closing(odb.session()) as session:
for item in stats_jobs:
try:
service_id = get_service_by_name(session, cluster_id, item['service'])[0]
now = datetime.utcnow()
job = Job(None, item['name'], True, 'interval_based', now, item.get('extra', '').encode('utf-8'),
cluster_id=cluster_id, service_id=service_id)
kwargs = {}
for name in('seconds', 'minutes'):
if name in item:
kwargs[name] = item[name]
ib_job = IntervalBasedJob(None, job, **kwargs)
session.add(job)
session.add(ib_job)
session.commit()
except(IntegrityError, ProgrammingError), e:
session.rollback()
logger.debug('Caught an expected error, carrying on anyway, e:[%s]', format_exc(e).decode('utf-8'))
def hexlify(item):
""" Returns a nice hex version of a string given on input.
"""
return ' '.join([elem1+elem2 for (elem1, elem2) in grouper(2, item.encode('hex'))])
def validate_input_dict(cid, *validation_info):
""" Checks that input belongs is one of allowed values.
"""
for key_name, key, source in validation_info:
if not source.has(key):
msg = 'Invalid {}:[{}]'.format(key_name, key)
log_msg = '{} (attrs: {})'.format(msg, source.attrs)
logger.warn(log_msg)
raise ZatoException(cid, msg)
# ################################################################################################################################
# Code below taken from tripod https://github.com/shayne/tripod/blob/master/tripod/sampler.py and slightly modified
# under the terms of LGPL (see LICENSE.txt file for details).
class SafePrettyPrinter(PrettyPrinter, object):
def format(self, obj, context, maxlevels, level):
try:
return super(SafePrettyPrinter, self).format(
obj, context, maxlevels, level)
except Exception:
return object.__repr__(obj)[:-1] + ' (bad repr)>', True, False
def spformat(obj, depth=None):
return SafePrettyPrinter(indent=1, width=76, depth=depth).pformat(obj)
def formatvalue(v):
s = spformat(v, depth=1).replace('\n', '')
if len(s) > 12500:
s = object.__repr__(v)[:-1] + ' (really long repr)>'
return '=' + s
def get_stack(f, with_locals=False):
limit = getattr(sys, 'tracebacklimit', None)
frames = []
n = 0
while f is not None and (limit is None or n < limit):
lineno, co = f.f_lineno, f.f_code
name, filename = co.co_name, co.co_filename
args = inspect.getargvalues(f)
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
frames.append((filename, lineno, name, line, f.f_locals, args))
f = f.f_back
n += 1
frames.reverse()
out = []
for filename, lineno, name, line, localvars, args in frames:
out.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
out.append(' %s' % line.strip())
if with_locals:
args = inspect.formatargvalues(formatvalue=formatvalue, *args)
out.append('\n Arguments: %s%s' % (name, args))
if with_locals and localvars:
out.append(' Local variables:\n')
try:
reprs = spformat(localvars)
except Exception:
reprs = "failed to format local variables"
out += [' ' + l for l in reprs.splitlines()]
out.append('')
return '\n'.join(out)
# ################################################################################################################################
def get_threads_traceback(pid):
result = {}
id_name = dict([(th.ident, th.name) for th in threading.enumerate()])
for thread_id, frame in sys._current_frames().items():
key = '{}:{}'.format(pid, id_name.get(thread_id, '(No name)'))
result[key] = get_stack(frame, True)
return result
def get_greenlets_traceback(pid):
result = {}
for item in gc.get_objects():
if not isinstance(item, (Greenlet, Hub)):
continue
if not item:
continue
key = '{}:{}'.format(pid, repr(item))
result[key] = ''.join(get_stack(item.gr_frame, True))
return result
def dump_stacks(*ignored):
pid = os.getpid()
table = Texttable()
table.set_cols_width((30, 90))
table.set_cols_dtype(['t', 't'])
rows = [['Proc:Thread/Greenlet', 'Traceback']]
rows.extend(sorted(get_threads_traceback(pid).items()))
rows.extend(sorted(get_greenlets_traceback(pid).items()))
table.add_rows(rows)
logger.info('\n' + table.draw())
# Taken from https://stackoverflow.com/a/16589622
def get_full_stack():
exc = sys.exc_info()[0]
stack = traceback.extract_stack()[:-1] # last one would be full_stack()
if not exc is None: # i.e. if an exception is present
del stack[-1] # remove call of full_stack, the printed exception
# will contain the caught exception caller instead
trc = 'Traceback (most recent call last):\n'
stackstr = trc + ''.join(traceback.format_list(stack))
if not exc is None:
stackstr += ' ' + traceback.format_exc().decode('utf-8').lstrip(trc)
return stackstr
def register_diag_handlers():
""" Registers diagnostic handlers dumping stacks, threads and greenlets on receiving a signal.
"""
signal.signal(signal.SIGURG, dump_stacks)
# ################################################################################################################################
def parse_extra_into_dict(lines, convert_bool=True):
""" Creates a dictionary out of key=value lines.
"""
_extra = {}
if lines:
extra = ';'.join(lines.splitlines())
for line in extra.split(';'):
original_line = line
if line:
line = line.split('=')
if not len(line) == 2:
raise ValueError('Each line must be a single key=value entry, not [{}]'.format(original_line))
key, value = line
value = value.strip()
if convert_bool:
try:
value = is_boolean(value)
except VdtTypeError:
# It's cool, not a boolean
pass
try:
value = is_integer(value)
except VdtTypeError:
# OK, not an integer
pass
# Could be a dict or another simple type then
try:
value = literal_eval(value)
except Exception:
pass
# OK, let's just treat it as string
_extra[key.strip()] = value
return _extra
# ################################################################################################################################
# Taken from http://plumberjack.blogspot.cz/2009/09/how-to-treat-logger-like-output-stream.html
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
# ################################################################################################################################
def validate_xpath(expr):
""" Evaluates an XPath expression thus confirming it is correct.
"""
etree.XPath(expr)
return True
# ################################################################################################################################
# Taken from http://grodola.blogspot.com/2014/04/reimplementing-netstat-in-cpython.html
def is_port_taken(port):
for conn in psutil.net_connections(kind='tcp'):
if conn.laddr[1] == port and conn.status == psutil.CONN_LISTEN:
return True
return False
# ################################################################################################################################
def get_haproxy_pidfile(component_dir):
json_config = json.loads(open(os.path.join(component_dir, 'config', 'repo', 'lb-agent.conf')).read())
return os.path.abspath(os.path.join(component_dir, json_config['pid_file']))
def store_pidfile(component_dir):
open(os.path.join(component_dir, MISC.PIDFILE), 'w').write('{}'.format(os.getpid()))
# ################################################################################################################################
def get_kvdb_config_for_log(config):
config = copy.deepcopy(config)
if config.shadow_password_in_logs:
config.password = SECRET_SHADOW
return config
def has_redis_sentinels(config):
return asbool(config.get('use_redis_sentinels', False))
# ################################################################################################################################
def alter_column_nullable_false(table_name, column_name, default_value, column_type):
column = sa.sql.table(table_name, sa.sql.column(column_name))
op.execute(column.update().values({column_name:default_value}))
op.alter_column(table_name, column_name, type_=column_type, existing_type=column_type, nullable=False)
# ################################################################################################################################
def validate_tls_from_payload(payload, is_key=False):
with NamedTemporaryFile(prefix='zato-tls-') as tf:
tf.write(payload)
tf.flush()
pem = open(tf.name).read()
cert_info = crypto.load_certificate(crypto.FILETYPE_PEM, pem)
cert_info = sorted(dict(cert_info.get_subject().get_components()).items())
cert_info = '; '.join('{}={}'.format(k, v) for k, v in cert_info)
if is_key:
key_info = crypto.load_privatekey(crypto.FILETYPE_PEM, pem)
key_info = '{}; {} bits'.format(TLS_KEY_TYPE[key_info.type()], key_info.bits())
return '{}; {}'.format(key_info, cert_info)
else:
return cert_info
get_tls_from_payload = validate_tls_from_payload
def get_tls_full_path(root_dir, component, info):
return os.path.join(root_dir, component, fs_safe_name(info) + '.pem')
def get_tls_ca_cert_full_path(root_dir, info):
return get_tls_full_path(root_dir, TLS.DIR_CA_CERTS, info)
def get_tls_key_cert_full_path(root_dir, info):
return get_tls_full_path(root_dir, TLS.DIR_KEYS_CERTS, info)
def store_tls(root_dir, payload, is_key=False):
# Raises exception if it's not really a certificate.
info = get_tls_from_payload(payload, is_key)
pem_file_path = get_tls_full_path(root_dir, TLS.DIR_KEYS_CERTS if is_key else TLS.DIR_CA_CERTS, info)
pem_file = open(pem_file_path, 'w')
try:
portalocker.lock(pem_file, portalocker.LOCK_EX)
pem_file.write(payload)
pem_file.close()
os.chmod(pem_file_path, 0o640)
return pem_file_path
except portalocker.LockException:
pass # It's OK, something else is doing the same thing right now
# ################################################################################################################################
def replace_private_key(orig_payload):
if isinstance(orig_payload, basestring):
for item in TLS.BEGIN_END:
begin = '-----BEGIN {}PRIVATE KEY-----'.format(item)
if begin in orig_payload:
end = '-----END {}PRIVATE KEY-----'.format(item)
begin_last_idx = orig_payload.find(begin) + len(begin) + 1
end_preceeding_idx = orig_payload.find(end) -1
return orig_payload[0:begin_last_idx] + SECRET_SHADOW + orig_payload[end_preceeding_idx:]
# No private key at all in payload
return orig_payload
# ################################################################################################################################
def delete_tls_material_from_fs(server, info, full_path_func):
try:
os.remove(full_path_func(server.tls_dir, info))
except OSError, e:
if e.errno == errno.ENOENT:
# It's ok - some other worker must have deleted it already
pass
else:
raise
# ################################################################################################################################
def ping_solr(config):
result = urlparse(config.address)
requests.get('{}://{}{}'.format(result.scheme, result.netloc, config.ping_path))
# ################################################################################################################################
def ping_odoo(conn):
user_model = conn.get_model('res.users')
ids = user_model.search([('login', '=', conn.login)])
user_model.read(ids[0], ['login'])['login']
# ################################################################################################################################
class StaticConfig(Bunch):
def __init__(self, path):
super(StaticConfig, self).__init__()
self.read(path)
def read(self, path):
for item in os.listdir(path):
f = open(os.path.join(path, item))
value = f.read()
f.close()
self[item] = value
# ################################################################################################################################
def add_scheduler_jobs(server, spawn=True):
for(id, name, is_active, job_type, start_date, extra, service_name, _,
_, weeks, days, hours, minutes, seconds, repeats, cron_definition)\
in server.odb.get_job_list(server.cluster_id):
if is_active:
job_data = Bunch({'id':id, 'name':name, 'is_active':is_active,
'job_type':job_type, 'start_date':start_date,
'extra':extra, 'service':service_name, 'weeks':weeks,
'days':days, 'hours':hours, 'minutes':minutes,
'seconds':seconds, 'repeats':repeats,
'cron_definition':cron_definition})
server.singleton_server.scheduler.create_edit('create', job_data, spawn=spawn)
# ################################################################################################################################
def get_basic_auth_credentials(auth):
if not auth:
return None, None
prefix = 'Basic '
if not auth.startswith(prefix):
return None, None
_, auth = auth.split(prefix)
auth = auth.strip().decode('base64')
return auth.split(':', 1)
# ################################################################################################################################
def parse_tls_channel_security_definition(value):
if not value:
raise ValueError('No definition given `{}`'.format(repr(value)))
for line in value.splitlines():
line = line.strip()
if not line:
continue
if not '=' in line:
raise ValueError("Line `{}` has no '=' key/value separator".format(line))
# It's possible we will have multiple '=' symbols.
sep_index = line.find('=')
key, value = line[:sep_index], line[sep_index+1:]
if not key:
raise ValueError('Key missing in line `{}`'.format(line))
if not value:
raise ValueError('Value missing in line `{}`'.format(line))
yield 'HTTP_X_ZATO_TLS_{}'.format(key.upper()), value
# ################################################################################################################################
def get_http_json_channel(name, service, cluster, security):
return HTTPSOAP(None, '{}.json'.format(name), True, True, 'channel', 'plain_http', None, '/zato/json/{}'.format(name),
None, '', None, SIMPLE_IO.FORMAT.JSON, service=service, cluster=cluster, security=security)
def get_http_soap_channel(name, service, cluster, security):
return HTTPSOAP(None, name, True, True, 'channel', 'soap', None, '/zato/soap', None, name, '1.1',
SIMPLE_IO.FORMAT.XML, service=service, cluster=cluster, security=security)
# ################################################################################################################################
def get_engine(args):
return sa.create_engine(get_engine_url(args))
def get_session(engine):
session = orm.sessionmaker() # noqa
session.configure(bind=engine)
return session()
# ################################################################################################################################
def get_crypto_manager_from_server_config(config, repo_dir):
priv_key_location = os.path.abspath(os.path.join(repo_dir, config.crypto.priv_key_location))
cm = CryptoManager(priv_key_location=priv_key_location)
cm.load_keys()
return cm
# ################################################################################################################################
def get_odb_session_from_server_config(config, cm):
engine_args = Bunch()
engine_args.odb_type = config.odb.engine
engine_args.odb_user = config.odb.username
engine_args.odb_password = cm.decrypt(config.odb.password) if config.odb.password else ''
engine_args.odb_host = config.odb.host
engine_args.odb_port = config.odb.port
engine_args.odb_db_name = config.odb.db_name
return get_session(get_engine(engine_args))
# ################################################################################################################################
def get_server_client_auth(config, repo_dir):
""" Returns credentials to authenticate with against Zato's own /zato/admin/invoke channel.
"""
session = get_odb_session_from_server_config(config, get_crypto_manager_from_server_config(config, repo_dir))
with closing(session) as session:
cluster = session.query(Server).\
filter(Server.token == config.main.token).\
one().cluster
channel = session.query(HTTPSOAP).\
filter(HTTPSOAP.cluster_id == cluster.id).\
filter(HTTPSOAP.url_path == '/zato/admin/invoke').\
filter(HTTPSOAP.connection== 'channel').\
one()
if channel.security_id:
security = session.query(HTTPBasicAuth).\
filter(HTTPBasicAuth.id == channel.security_id).\
first()
if security:
return (security.username, security.password)
def get_client_from_server_conf(server_dir):
from zato.client import get_client_from_server_conf as client_get_client_from_server_conf
return client_get_client_from_server_conf(server_dir, get_server_client_auth, get_config)
# ################################################################################################################################
django_sa_mappings = {
'NAME': 'db_name',
'HOST': 'host',
'PORT': 'port',
'USER': 'username',
'PASSWORD': 'password',
'odb_type': 'engine',
'db_type': 'engine',
}
cli_sa_mappings = {
'odb_db_name': 'db_name',
'odb_host': 'host',
'odb_port': 'port',
'odb_user': 'username',
'odb_password': 'password',
'odb_type': 'engine',
}
def get_engine_url(args):
attrs = {}
is_sqlite = False
is_django = 'NAME' in args
has_get = getattr(args, 'get', False)
odb_type = getattr(args, 'odb_type', None)
if odb_type:
is_sqlite = odb_type == 'sqlite'
else:
is_sqlite = args.get('engine') == 'sqlite' or args.get('db_type') == 'sqlite'
names = ('engine', 'username', 'password', 'host', 'port', 'name', 'db_name', 'db_type', 'sqlite_path', 'odb_type',
'odb_user', 'odb_password', 'odb_host', 'odb_port', 'odb_db_name', 'odb_type', 'ENGINE', 'NAME', 'HOST', 'USER',
'PASSWORD', 'PORT')
for name in names:
if has_get:
attrs[name] = args.get(name, '')
else:
attrs[name] = getattr(args, name, '')
# Re-map Django params into SQLAlchemy params
if is_django:
for name in django_sa_mappings:
value = attrs.get(name, ZATO_NOT_GIVEN)
if value != ZATO_NOT_GIVEN:
if not value and (name in 'db_type', 'odb_type'):
continue
attrs[django_sa_mappings[name]] = value
# Zato CLI to SQLAlchemy
if not attrs.get('engine'):
for name in cli_sa_mappings:
value = attrs.get(name, ZATO_NOT_GIVEN)
if value != ZATO_NOT_GIVEN:
attrs[cli_sa_mappings[name]] = value
# Re-map server ODB params into SQLAlchemy params
if attrs['engine'] == 'sqlite':
db_name = attrs.get('db_name')
sqlite_path = attrs.get('sqlite_path')
if db_name:
attrs['sqlite_path'] = db_name
if sqlite_path:
attrs['db_name'] = sqlite_path
return (engine_def_sqlite if is_sqlite else engine_def).format(**attrs)
# ################################################################################################################################
def startup_service_payload_from_path(name, value, repo_location):
""" Reads payload from a local file. Abstracted out to ease in testing.
"""
orig_path = value.replace('file://', '')
if not os.path.isabs(orig_path):
path = os.path.normpath(os.path.join(repo_location, orig_path))
else:
path = orig_path
try:
payload = open(path).read()
except Exception, e:
logger.warn(
'Could not open payload path:`%s` `%s`, skipping startup service:`%s`, e:`%s`', orig_path, path, name, format_exc(e))
else:
return payload
def invoke_startup_services(
source, key, fs_server_config, repo_location, broker_client=None, service_name=None, skip_include=True, worker_store=None):
""" Invoked when we are the first worker and we know we have a broker client and all the other config ready
so we can publish the request to execute startup services. In the worst case the requests will get back to us but it's
also possible that other workers are already running. In short, there is no guarantee that any server or worker in particular
will receive the requests, only that there will be exactly one.
"""
for name, payload in fs_server_config.get(key, {}).items():
if service_name:
# We are to skip this service:
if skip_include:
if name == service_name:
continue
# We are to include this service only, any other is rejected
else:
if name != service_name:
continue
if payload.startswith('file://'):
payload = startup_service_payload_from_path(name, payload, repo_location)
if not payload:
continue
cid = new_cid()
msg = {}
msg['action'] = SERVICE.PUBLISH.value
msg['service'] = name
msg['payload'] = payload
msg['cid'] = cid
msg['channel'] = CHANNEL.STARTUP_SERVICE
if broker_client:
broker_client.invoke_async(msg)
else:
worker_store.on_message_invoke_service(msg, msg['channel'], msg['action'])
# ################################################################################################################################
| gpl-3.0 |
rohitwaghchaure/frappe-alec | frappe/website/permissions.py | 28 | 2434 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
def remove_empty_permissions():
permissions_cache_to_be_cleared = frappe.db.sql_list("""select distinct user
from `tabWebsite Route Permission`
where ifnull(`read`, 0)=0 and ifnull(`write`, 0)=0 and ifnull(`admin`, 0)=0""")
frappe.db.sql("""delete from `tabWebsite Route Permission`
where ifnull(`read`, 0)=0 and ifnull(`write`, 0)=0 and ifnull(`admin`, 0)=0""")
clear_permissions(permissions_cache_to_be_cleared)
def get_access(doc, pathname, user=None):
user = user or frappe.session.user
key = "website_route_permissions:{}".format(user)
cache = frappe.cache()
permissions = cache.get_value(key) or {}
if not permissions.get(doc.name):
permissions[doc.name] = _get_access(doc, pathname, user)
cache.set_value(key, permissions)
return permissions.get(doc.name)
def _get_access(doc, pathname, user):
read = write = admin = private_read = 0
if user == "Guest":
return { "read": doc.public_read, "write": 0, "admin": 0 }
if doc.public_write:
read = write = 1
elif doc.public_read:
read = 1
for perm in frappe.db.sql("""select
`tabWebsite Route Permission`.`read`,
`tabWebsite Route Permission`.`write`,
`tabWebsite Route Permission`.`admin`,
`tabWebsite Group`.lft,
`tabWebsite Group`.rgt
from
`tabWebsite Route Permission`, `tabWebsite Group`
where
`tabWebsite Route Permission`.website_route = %s and
`tabWebsite Route Permission`.user = %s and
`tabWebsite Route Permission`.reference = `tabWebsite Group`.name
order by `tabWebsite Group`.lft asc""", (user, pathname), as_dict=True):
if perm.lft <= doc.lft and perm.rgt >= doc.rgt:
if not (doc.public_read or private_read):
private_read = perm.read
if not read: read = perm.read
if not write: write = perm.write
if not admin: admin = perm.admin
if write: read = write
if read and write and admin:
break
else:
read = write = admin = private_read = 1
return { "read": read, "write": write, "admin": admin, "private_read": private_read }
def clear_permissions(users=None):
if isinstance(users, basestring):
users = [users]
elif users is None:
users = frappe.db.sql_list("""select name from `tabUser`""")
cache = frappe.cache()
for user in users:
cache.delete_value("website_route_permissions:{}".format(user))
| mit |
nexusz99/boto | boto/ses/exceptions.py | 151 | 1830 | """
Various exceptions that are specific to the SES module.
"""
from boto.exception import BotoServerError
class SESError(BotoServerError):
"""
Sub-class all SES-related errors from here. Don't raise this error
directly from anywhere. The only thing this gets us is the ability to
catch SESErrors separately from the more generic, top-level
BotoServerError exception.
"""
pass
class SESAddressNotVerifiedError(SESError):
"""
Raised when a "Reply-To" address has not been validated in SES yet.
"""
pass
class SESIdentityNotVerifiedError(SESError):
"""
Raised when an identity (domain or address) has not been verified in SES yet.
"""
pass
class SESDomainNotConfirmedError(SESError):
"""
"""
pass
class SESAddressBlacklistedError(SESError):
"""
After you attempt to send mail to an address, and delivery repeatedly
fails, said address is blacklisted for at least 24 hours. The blacklisting
eventually expires, and you are able to attempt delivery again. If you
attempt to send mail to a blacklisted email, this is raised.
"""
pass
class SESDailyQuotaExceededError(SESError):
"""
Your account's daily (rolling 24 hour total) allotment of outbound emails
has been exceeded.
"""
pass
class SESMaxSendingRateExceededError(SESError):
"""
Your account's requests/second limit has been exceeded.
"""
pass
class SESDomainEndsWithDotError(SESError):
"""
Recipient's email address' domain ends with a period/dot.
"""
pass
class SESLocalAddressCharacterError(SESError):
"""
An address contained a control or whitespace character.
"""
pass
class SESIllegalAddressError(SESError):
"""
Raised when an illegal address is encountered.
"""
pass
| mit |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/production_ml/labs/samples/contrib/azure-samples/databricks-pipelines/databricks_notebook_pipeline.py | 3 | 5367 | """Import a notebook into a Databricks workspace and submit a job run to execute it in a cluster.
Notebook will accept some parameters and access a file in DBFS and some secrets in a secret scope.
"""
from pathlib import Path
import base64
import kfp.dsl as dsl
import kfp.compiler as compiler
import databricks
def create_dbfsblock(block_name):
return databricks.CreateDbfsBlockOp(
name="createdbfsblock",
block_name=block_name,
data="QWxlamFuZHJvIENhbXBvcyBNYWdlbmNpbw==",
path="/data/foo.txt"
)
def create_secretscope(scope_name):
return databricks.CreateSecretScopeOp(
name="createsecretscope",
scope_name=scope_name,
initial_manage_principal="users",
secrets=[
{
"key": "string-secret",
"string_value": "helloworld"
},
{
"key": "byte-secret",
"byte_value": "aGVsbG93b3JsZA=="
},
{
"key": "ref-secret",
"value_from": {
"secret_key_ref": {
"name": "mysecret",
"key": "username"
}
}
}
]
)
def import_workspace_item(item_name, user):
current_path = Path(__file__).parent
notebook_file_name = current_path.joinpath("notebooks", "ScalaExampleNotebook")
notebook = open(notebook_file_name).read().encode("utf-8")
notebook_base64 = base64.b64encode(notebook)
return databricks.ImportWorkspaceItemOp(
name="importworkspaceitem",
item_name=item_name,
content=notebook_base64,
path=f"/Users/{user}/ScalaExampleNotebook",
language="SCALA",
file_format="SOURCE"
)
def create_cluster(cluster_name):
return databricks.CreateClusterOp(
name="createcluster",
cluster_name=cluster_name,
spark_version="5.3.x-scala2.11",
node_type_id="Standard_D3_v2",
spark_conf={
"spark.speculation": "true"
},
num_workers=2
)
def create_job(job_name, cluster_id, user):
return databricks.CreateJobOp(
name="createjob",
job_name=job_name,
existing_cluster_id=cluster_id,
notebook_task={
"notebook_path": f"/Users/{user}/ScalaExampleNotebook"
}
)
def submit_run(run_name, job_name, parameter1, parameter2):
return databricks.SubmitRunOp(
name="submitrun",
run_name=run_name,
job_name=job_name,
notebook_params={
"param1": parameter1,
"param2": parameter2
}
)
def delete_run(run_name):
return databricks.DeleteRunOp(
name="deleterun",
run_name=run_name
)
def delete_job(job_name):
return databricks.DeleteJobOp(
name="deletejob",
job_name=job_name
)
def delete_cluster(cluster_name):
return databricks.DeleteClusterOp(
name="deletecluster",
cluster_name=cluster_name
)
def delete_workspace_item(item_name):
return databricks.DeleteWorkspaceItemOp(
name="deleteworkspaceitem",
item_name=item_name
)
def delete_secretscope(scope_name):
return databricks.DeleteSecretScopeOp(
name="deletesecretscope",
scope_name=scope_name
)
def delete_dbfsblock(block_name):
return databricks.DeleteDbfsBlockOp(
name="deletedbfsblock",
block_name=block_name
)
@dsl.pipeline(
name="Databrick",
description="A toy pipeline that runs a sample notebook in a Databricks cluster."
)
def calc_pipeline(
dbfsblock_name="test-block",
secretescope_name="test-scope",
workspaceitem_name="test-item",
cluster_name="test-cluster",
job_name="test-job",
run_name="test-run",
user="[email protected]",
parameter1="38",
parameter2="43"):
create_dbfsblock_task = create_dbfsblock(dbfsblock_name)
create_secretscope_task = create_secretscope(secretescope_name)
import_workspace_item_task = import_workspace_item(workspaceitem_name, user)
create_cluster_task = create_cluster(cluster_name)
create_job_task = create_job(job_name, create_cluster_task.outputs["cluster_id"], user)
submit_run_task = submit_run(run_name, job_name, parameter1, parameter2)
submit_run_task.after(create_dbfsblock_task)
submit_run_task.after(create_secretscope_task)
submit_run_task.after(import_workspace_item_task)
submit_run_task.after(create_job_task)
delete_run_task = delete_run(run_name)
delete_run_task.after(submit_run_task)
delete_job_task = delete_job(job_name)
delete_job_task.after(delete_run_task)
delete_cluster_task = delete_cluster(cluster_name)
delete_cluster_task.after(delete_job_task)
delete_workspace_item_task = delete_workspace_item(workspaceitem_name)
delete_workspace_item_task.after(submit_run_task)
delete_secretscope_task = delete_secretscope(secretescope_name)
delete_secretscope_task.after(submit_run_task)
delete_dbfsblock_task = delete_dbfsblock(dbfsblock_name)
delete_dbfsblock_task.after(submit_run_task)
if __name__ == "__main__":
compiler.Compiler()._create_and_write_workflow(
pipeline_func=calc_pipeline,
package_path=__file__ + ".tar.gz")
| apache-2.0 |
celiafish/VisTrails | vistrails/core/modules/abstraction.py | 2 | 8400 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import os
import re
from itertools import chain
from vistrails.core import debug
from vistrails.core.configuration import get_vistrails_configuration
from vistrails.core.modules.vistrails_module import Module, ModuleError
from vistrails.core.modules.sub_module import read_vistrail, new_abstraction, \
get_abstraction_dependencies, save_abstraction
import vistrails.core.modules.module_registry
from vistrails.core.system import vistrails_version, get_vistrails_directory
from vistrails.core.utils import InvalidPipeline
name = 'My SubWorkflows'
version = '1.6'
identifier = 'local.abstractions'
my_vistrails = {}
def initialize(*args, **kwargs):
import vistrails.core.packagemanager
manager = vistrails.core.packagemanager.get_package_manager()
reg = vistrails.core.modules.module_registry.get_module_registry()
abs_vistrails = my_vistrails
last_count = len(my_vistrails) + 1
missing_depends = {}
cannot_load = {}
while len(abs_vistrails) > 0 and len(abs_vistrails) < last_count:
new_vistrails = {}
for (abs_name, abs_info) in abs_vistrails.iteritems():
(abs_vistrail, abs_fname, abs_depends) = abs_info
packages = get_abstraction_dependencies(abs_vistrail)
add_abstraction = True
for package, inter_depends in packages.iteritems():
if package != identifier:
if not manager.has_package(package):
add_abstraction = False
cannot_load[abs_name] = (abs_vistrail, "Missing package dependency: %s" % package)
break
else:
for descriptor_info in inter_depends:
if not reg.has_descriptor_with_name(*descriptor_info):
add_abstraction = False
new_vistrails[abs_name] = abs_info
missing_depends[abs_name] = "Missing module '%s:%s'"\
% (descriptor_info[0],
descriptor_info[1])
break
if add_abstraction:
abstraction = None
try:
abstraction = \
new_abstraction(abs_name, abs_vistrail, abs_fname)
except InvalidPipeline, e:
# handle_invalid_pipeline will raise it's own InvalidPipeline
# exception if it fails
try:
import vistrails.core.vistrail.controller
module_version = abs_vistrail.get_latest_version()
# Use a "dummy" controller to handle the upgrade
controller = vistrails.core.vistrail.controller.VistrailController(abs_vistrail)
(new_version, new_pipeline) = \
controller.handle_invalid_pipeline(e, long(module_version),
abs_vistrail, False, True)
del controller
save_abstraction(abs_vistrail, abs_fname)
abstraction = new_abstraction(abs_name, abs_vistrail, abs_fname,
new_version, new_pipeline)
except Exception, _e:
cannot_load[abs_name] = (abs_vistrail, _e)
except Exception, e:
cannot_load[abs_name] = (abs_vistrail, e)
if abstraction is not None:
options = {'namespace': abstraction.uuid,
'hide_namespace': True,
'version': str(abstraction.internal_version)}
reg.auto_add_module((abstraction, options))
reg.auto_add_ports(abstraction)
# print "Added subworkflow", abs_name, abstraction.uuid
elif abs_name not in cannot_load:
cannot_load[abs_name] = (abs_vistrail, '')
last_count = len(abs_vistrails)
abs_vistrails = new_vistrails
for abs_name, (_, e) in cannot_load.iteritems():
debug.critical("Cannot load subworkflow '%s'" % abs_name)
if e:
debug.critical("- %s" % e)
for abs_name in abs_vistrails:
if abs_name in missing_depends:
debug.critical("Cannot load subworkflow '%s'" % abs_name,
missing_depends[abs_name])
else:
debug.critical("Cannot load subworkflow '%s'" % abs_name)
def package_dependencies():
import vistrails.core.packagemanager
manager = vistrails.core.packagemanager.get_package_manager()
reg = vistrails.core.modules.module_registry.get_module_registry()
conf = get_vistrails_configuration()
abstraction_dir = get_vistrails_directory("subworkflowsDir")
if abstraction_dir is None:
debug.log("Subworkflows directory unset, cannot add any abstractions")
return []
p = re.compile(r".*\.xml")
all_packages = set()
for abstraction in os.listdir(abstraction_dir):
if p.match(abstraction):
abs_fname = os.path.join(abstraction_dir, abstraction)
vistrail = read_vistrail(abs_fname)
try:
dependencies = get_abstraction_dependencies(vistrail)
except vistrails.core.modules.module_registry.MissingPackage, e:
dependencies = {e._identifier: set()}
add_abstraction = True
inter_depends = []
for package, depends in dependencies.iteritems():
if package != identifier:
if not manager.has_package(package):
add_abstraction = False
break
else:
inter_depends.append(depends)
if add_abstraction:
# print 'adding', abstraction[:-4]
all_packages.update(p for p in dependencies.iterkeys()
if p != identifier)
my_vistrails[abstraction[:-4]] = \
(vistrail, abs_fname, inter_depends)
else:
debug.critical(("Subworkflow '%s' is missing packages it " +
"depends on") % abstraction)
# print 'all_packages:', all_packages
return list(all_packages)
| bsd-3-clause |
shivarammysore/faucet | faucet/check_faucet_config.py | 7 | 2275 | #!/usr/bin/env python
"""Standalone script to check FAUCET configuration, return 0 if provided config OK."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pprint
import sys
from faucet import valve
from faucet.config_parser import dp_parser
from faucet.conf import InvalidConfigError
def check_config(conf_files, debug_level, check_output_file):
"""Return True and successful config dict, if all config can be parsed."""
logname = os.devnull
logger = logging.getLogger('%s.config' % logname)
logger_handler = logging.StreamHandler(stream=sys.stderr)
logger.addHandler(logger_handler)
logger.propagate = 0
logger.setLevel(debug_level)
check_output = []
if conf_files:
for conf_file in conf_files:
check_result = False
try:
_, _, dps, _ = dp_parser(conf_file, logname)
if dps is not None:
dps_conf = [(valve.valve_factory(dp), dp.to_conf()) for dp in dps]
check_output.extend([conf for _, conf in dps_conf])
check_result = True
continue
except InvalidConfigError as config_err:
check_output = [config_err]
break
else:
check_result = False
check_output = ['no files specified']
pprint.pprint(check_output, stream=check_output_file)
return check_result
def main():
"""Mainline."""
sys.exit(not check_config(sys.argv[1:], logging.DEBUG, sys.stdout))
if __name__ == '__main__':
main()
| apache-2.0 |
eos87/Booktype | lib/booktype/convert/assets.py | 7 | 2181 | # This file is part of Booktype.
# Copyright (c) 2013 Borko Jandras <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
import os
import requests
from django.template.defaultfilters import slugify
from booktype.utils import config
class AssetCollection(object):
def __init__(self, base_path):
self.base_path = base_path
self.files = {}
def add_files(self, files):
for (asset_id, file_path) in files.iteritems():
self.files[asset_id] = AssetFile(asset_id, file_path)
def add_urls(self, urls):
for (asset_id, url) in urls.iteritems():
file_path = os.path.join(self.base_path, slugify(asset_id))
download(url, file_path)
self.files[asset_id] = AssetFile(asset_id, file_path, original_url=url)
def get(self, asset_id):
return self.files.get(asset_id)
def __repr__(self):
return repr(self.files)
class AssetFile(object):
def __init__(self, asset_id, file_path, original_url=None):
self.asset_id = asset_id
self.file_path = file_path
self.file_url = "file://" + file_path
self.original_url = original_url
def __repr__(self):
return "<%s %s: %s>" % ("AssetFile", repr(self.asset_id), repr(self.file_path))
def download(src_url, dst_file):
req = requests.get(src_url, stream=True, verify=config.get_configuration('REQUESTS_VERIFY_SSL_CERT'))
if req.status_code == 200:
with open(dst_file, 'wb') as dst:
for chunk in req:
dst.write(chunk)
| agpl-3.0 |
sajeeshcs/nested_quota_final | nova/ipv6/account_identifier.py | 97 | 1952 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""IPv6 address generation with account identifier embedded."""
import hashlib
import netaddr
from nova.i18n import _
def to_global(prefix, mac, project_id):
project_hash = netaddr.IPAddress(
int(hashlib.sha1(project_id).hexdigest()[:8], 16) << 32)
static_num = netaddr.IPAddress(0xff << 24)
try:
mac_suffix = netaddr.EUI(mac).words[3:]
int_addr = int(''.join(['%02x' % i for i in mac_suffix]), 16)
mac_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (project_hash ^ static_num ^ mac_addr | maskIP).format()
except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
except TypeError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
except NameError:
raise TypeError(_('Bad project_id for to_global_ipv6: %s') %
project_id)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ff:ffff')
mac = netaddr.EUI(int(address & mask1)).words
return ':'.join(['02', '16', '3e'] + ['%02x' % i for i in mac[3:6]])
| apache-2.0 |
krkini16/Floascope | server.py | 1 | 1533 | from flask import Flask
import eventlet
eventlet.monkey_patch()
from flask import render_template
from sniffer import Sniffer
from flask_socketio import SocketIO, emit
import argparse
PORT = 8000
app = Flask(__name__)
app.config['SECRET_KEY'] = 'kmh_floascope'
socketio = SocketIO(app, async_mode="eventlet")
app.debug = True
thread = None
@app.route("/")
def sankey():
"""
The default route will serve sankey.html.
"""
return render_template("sankey.html")
@app.route("/ts")
def timeseries():
"""
The default route will serve sankey.html.
"""
return render_template("timeseries.html")
@app.route("/<path:path>")
def static_proxy(path):
"""
Serves static files (e.g. CSS, JS) from static/ directory.
"""
return app.send_static_file(path)
@socketio.on('connect', namespace='/')
def test_connect():
print("Got a connection")
global thread
global pcap_file
if thread is None:
thread = socketio.start_background_task(target=lambda: Sniffer(socketio, pcap_file=pcap_file).run())
@socketio.on('disconnect', namespace='/')
def test_disconnect():
print('Client disconnected')
@socketio.on('custom_message')
def handle_my_custom_event(json):
print('received json: ' + str(json))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Floascope.')
parser.add_argument('--pcap', help='Read data from .pcap file.')
args = parser.parse_args()
global pcap_file
pcap_file = args.pcap
socketio.run(app, port=PORT)
| mit |
TwinkleChawla/nova | nova/api/openstack/compute/extended_status.py | 24 | 2633 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Status Admin API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "os-extended-status"
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class ExtendedStatusController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedStatusController, self).__init__(*args, **kwargs)
def _extend_server(self, server, instance):
# Note(gmann): Removed 'locked_by' from extended status
# to make it same as V2. If needed it can be added with
# microversion.
for state in ['task_state', 'vm_state', 'power_state']:
key = "%s:%s" % ('OS-EXT-STS', state)
server[key] = instance[state]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class ExtendedStatus(extensions.V21APIExtensionBase):
"""Extended Status support."""
name = "ExtendedStatus"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ExtendedStatusController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
sysadminmatmoz/odoo-clearcorp | product_category_code/__init__.py | 3 | 1059 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_category_code | agpl-3.0 |
alexanderturner/ansible | lib/ansible/modules/cloud/amazon/ec2.py | 10 | 62187 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2
description:
- Creates or terminates ec2 instances.
- C(state=restarted) was added in 2.2
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
required: false
default: null
aliases: ['keypair']
id:
version_added: "1.1"
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false
default: null
aliases: []
group:
description:
- security group (or list of groups) to use with the instance
required: false
default: null
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
required: false
default: null
aliases: []
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
required: false
default: null
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
required: true
default: null
aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
choices: [ "default", "dedicated" ]
aliases: []
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
required: false
default: null
aliases: []
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
required: false
default: "one-time"
choices: [ "one-time", "persistent" ]
aliases: []
image:
description:
- I(ami) ID to use for the instance
required: true
default: null
aliases: []
kernel:
description:
- kernel I(eki) to use for the instance
required: false
default: null
aliases: []
ramdisk:
description:
- ramdisk I(eri) to use for the instance
required: false
default: null
aliases: []
wait:
description:
- wait for the instance to reach its desired state before returning. Does not wait for SSH, see 'wait_for' example for details.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
spot_wait_timeout:
version_added: "1.5"
description:
- how long to wait for the spot instance request to be fulfilled
default: 600
aliases: []
count:
description:
- number of instances to launch
required: False
default: 1
aliases: []
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
required: false
default: null
choices: [ "yes", "no" ]
aliases: []
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
aliases: []
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
required: false
default: null
aliases: []
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
required: false
default: null
aliases: []
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
default: null
choices: [ "yes", "no" ]
aliases: []
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
required: false
default: null
aliases: []
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile to use. Boto library must be 2.5.0+
required: false
default: null
aliases: []
instance_ids:
version_added: "1.3"
description:
- "list of instance ids, currently used for states: absent, running, stopped"
required: false
default: null
aliases: ['instance_id']
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false
default: yes
choices: [ "yes", "no" ]
termination_protection:
version_added: "2.0"
description:
- Enable or Disable the Termination Protection
required: false
default: no
choices: [ "yes", "no" ]
instance_initiated_shutdown_behavior:
version_added: "2.2"
description:
- Set whether AWS will Stop or Terminate an instance on shutdown
required: false
default: 'stop'
choices: [ "stop", "terminate" ]
state:
version_added: "1.3"
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
volumes:
version_added: "1.5"
description:
- a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str), encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
required: false
default: null
aliases: []
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
required: false
default: 'false'
exact_count:
version_added: "1.5"
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
required: false
default: null
aliases: []
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option.
required: false
default: null
aliases: []
network_interfaces:
version_added: "2.0"
description:
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.)
required: false
default: null
aliases: ['network_interface']
spot_launch_group:
version_added: "2.1"
description:
- Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group)
required: false
default: null
author:
- "Tim Gerla (@tgerla)"
- "Lester Wade (@lwade)"
- "Seth Vidal"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ec2:
key_name: mykey
instance_type: t2.micro
image: ami-123456
wait: yes
group: webserver
count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch
- ec2:
key_name: mykey
group: databases
instance_type: t2.micro
image: ami-123456
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with ssd gp2 root volume
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/xvda
volume_type: gp2
volume_size: 8
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
exact_count: 1
# Multiple groups example
- ec2:
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
spot_launch_group: report_generators
# Examples using pre-existing network interfaces
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interface: eni-deadbeef
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
key_name: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
- name: Add new instance to host group
add_host:
hostname: "{{ item.public_ip }}"
groupname: launched
with_items: "{{ ec2.instances }}"
- name: Wait for SSH to come up
wait_for:
host: "{{ item.public_dns_name }}"
port: 22
delay: 60
timeout: 320
state: started
with_items: "{{ ec2.instances }}"
- name: Configure instance(s)
hosts: launched
become: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
ec2:
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
roles:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Start stopped instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: running
#
# Restart instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: restarted
#
# Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# count_tag complex argument examples
#
# instances with tag foo
count_tag:
foo:
# instances with tag foo=bar
count_tag:
foo: bar
# instances with tags foo=bar & baz
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
count_tag:
- foo
- bar
- baz: bang
'''
import time
from ast import literal_eval
from ansible.module_utils.six import iteritems
from ansible.module_utils.six import get_function_code
from distutils.version import LooseVersion
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
from boto.vpc import VPCConnection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None):
# get reservations for instances that match tag(s) and are running
reservations = get_reservations(module, ec2, tags=count_tag, state="running", zone=zone)
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result:
if isinstance(result[k], dict):
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, tags=None, state=None, zone=None):
# TODO: filters do not work with tags that have underscores
filters = dict()
if tags is not None:
if isinstance(tags, str):
try:
tags = literal_eval(tags)
except:
pass
# if string, we only care that a tag of that name exists
if isinstance(tags, str):
filters.update({"tag-key": tags})
# if list, append each item to filters
if isinstance(tags, list):
for x in tags:
if isinstance(x, dict):
x = _set_none_to_blank(x)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(x)))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if isinstance(tags, dict):
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(tags)))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
if zone:
filters.update({'availability-zone': zone})
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
"""
Retrieves instance information from an instance
ID and returns it as a dictionary
"""
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor,
'tags': inst.tags,
'groups': dict((group.id, group.name) for group in inst.groups),
}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
try:
instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
except AttributeError:
instance_info['ebs_optimized'] = False
try:
bdm_dict = {}
bdm = getattr(inst, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination
}
instance_info['block_device_mapping'] = bdm_dict
except AttributeError:
instance_info['block_device_mapping'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
"""
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
class. Added in Boto 2.13.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accepts associate_public_ip_address argument, else false
"""
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
# device_type has been used historically to represent volume_type,
# however ec2_vol uses volume_type, as does the BlockDeviceType, so
# we add handling for either/or but not both
if all(key in volume for key in ['device_type','volume_type']):
module.fail_json(msg = 'device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
# get whichever one is set, or NoneType if neither are set
volume_type = volume.get('device_type') or volume.get('volume_type')
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if volume_type == 'io1' and 'iops' not in volume:
module.fail_json(msg = 'io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'encrypted' in volume:
module.fail_json(msg = 'You can not set encryption when creating a volume from a snapshot')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
if boto_supports_volume_encryption():
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'),
encrypted=volume.get('encrypted', None))
else:
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
ec2: authenticated ec2 connection object
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
return param in get_function_code(method).co_varnames
def await_spot_requests(module, ec2, spot_requests, count):
"""
Wait for a group of spot requests to be fulfilled, or fail.
module: Ansible module object
ec2: authenticated ec2 connection object
spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
count: Total number of instances to be created by the spot requests
Returns:
list of instance ID's created by the spot request(s)
"""
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
wait_complete = time.time() + spot_wait_timeout
spot_req_inst_ids = dict()
while time.time() < wait_complete:
reqs = ec2.get_all_spot_instance_requests()
for sirb in spot_requests:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id != sirb.id:
continue # this is not our spot instance
if sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
elif sir.state == 'open':
continue # still waiting, nothing to do here
elif sir.state == 'active':
continue # Instance is created already, nothing to do here
elif sir.state == 'failed':
module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
sir.id, sir.status.code, sir.fault.code, sir.fault.message))
elif sir.state == 'cancelled':
module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
elif sir.state == 'closed':
# instance is terminating or marked for termination
# this may be intentional on the part of the operator,
# or it may have been terminated by AWS due to capacity,
# price, or group constraints in this case, we'll fail
# the module if the reason for the state is anything
# other than termination by user. Codes are documented at
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
if sir.status.code == 'instance-terminated-by-user':
# do nothing, since the user likely did this on purpose
pass
else:
spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
return spot_req_inst_ids.values()
module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
def enforce_count(module, ec2, vpc):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
# fail here if the exact count was specified without filtering
# on a tag, as this may lead to a undesired removal of instances
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone)
changed = None
checkmode = False
instance_dict_array = []
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, vpc, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([ x.id for x in instances ])
remove_ids = all_instance_ids[0:to_remove]
instances = [ x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
if not isinstance(inst, dict):
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, vpc, override_count=None):
"""
Creates new instances
module : AnsibleModule object
ec2: authenticated ec2 connection object
Returns:
A list of dictionaries with instance information
about the instances that were launched
"""
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
termination_protection = module.boolean(module.params.get('termination_protection'))
network_interfaces = module.params.get('network_interfaces')
spot_launch_group = module.params.get('spot_launch_group')
instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
# group_id and group_name are exclusive of each other
if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
vpc_id = None
if vpc_subnet_id:
if not vpc:
module.fail_json(msg="region must be specified")
else:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
else:
vpc_id = None
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
if vpc_id:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
else:
grp_details = ec2.get_all_security_groups()
if isinstance(group_name, basestring):
group_name = [group_name]
unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
if len(unmatched) > 0:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
# Now we try to lookup the group id testing if group exists.
elif group_id:
#wrap the group_id in a list if it's not one already
if isinstance(group_id, basestring):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id is not None:
filter_dict = {'client-token':id, 'instance-state-name' : 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'monitoring_enabled': monitoring,
'placement': zone,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'user_data': user_data}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
if not spot_price:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
if network_interfaces:
if isinstance(network_interfaces, basestring):
network_interfaces = [network_interfaces]
interfaces = []
for i, network_interface_id in enumerate(network_interfaces):
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
network_interface_id=network_interface_id,
device_index=i)
interfaces.append(interface)
params['network_interfaces'] = \
boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg = 'Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
# check to see if we're using spot pricing first before starting instances
if not spot_price:
if assign_public_ip and private_ip:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
))
else:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
private_ip_address = private_ip,
))
# For ordinary (not spot) instances, we can select 'stop'
# (the default) or 'terminate' here.
params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
res = ec2.run_instances(**params)
instids = [ i.id for i in res.instances ]
while True:
try:
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg = str(e))
# The instances returned through ec2.run_instances above can be in
# terminated state due to idempotency. See commit 7f11c3d for a complete
# explanation.
terminated_instances = [
str(instance.id) for instance in res.instances if instance.state == 'terminated'
]
if terminated_instances:
module.fail_json(msg = "Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, 'placement_group'):
params['placement_group'] = placement_group
elif placement_group :
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
# You can't tell spot instances to 'stop'; they will always be
# 'terminate'd. For convenience, we'll ignore the latter value.
if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
module.fail_json(
msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
if spot_launch_group and isinstance(spot_launch_group, basestring):
params['launch_group'] = spot_launch_group
params.update(dict(
count = count_remaining,
type = spot_type,
))
res = ec2.request_spot_instances(spot_price, **params)
# Now we have to do the intermediate waiting
if wait:
instids = await_spot_requests(module, ec2, res, count)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
num_running = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_running < len(instids):
try:
res_list = ec2.get_all_instances(instids)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstanceID.NotFound':
time.sleep(1)
continue
else:
raise
num_running = 0
for res in res_list:
num_running += len([ i for i in res.instances if i.state=='running' ])
if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
#We do this after the loop ends so that we end up with one list
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by AWS
if source_dest_check is False:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
# Disabled by default by AWS
if termination_protection is True:
for inst in res.instances:
inst.modify_attribute('disableApiTermination', True)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
inst.update()
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
"""
Terminates a list of instances
module: Ansible module object
ec2: authenticated ec2 connection object
termination_list: a list of instances to terminate in the form of
[ {id: <inst-id>}, ..]
Returns a dictionary of instance information
about the instances terminated.
If the instance to be terminated is running
"changed" will be set to False.
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError as e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
# wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = sum([len(res.instances) for res in response])
except Exception as e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
#Lets get the current state of the instances after terminating - issue600
instance_dict_array = []
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids,\
filters={'instance-state-name':'terminated'}):
for inst in res.instances:
instance_dict_array.append(get_instance_info(inst))
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state, instance_tags):
"""
Starts or stops a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("running" or "stopped")
Returns a dictionary of instance information
about the instances started/stopped.
If the instance was not able to change state,
"changed" will be set to False.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
existing_instances_array = []
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
changed = True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
changed = True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc(exc))
# Check "termination_protection" attribute
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
existing_instances_array.append(inst.id)
instance_ids = list(set(existing_instances_array + (instance_ids or [])))
## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
instance_dict_array = []
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
instance_dict_array.append(get_instance_info(i))
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def restart_instances(module, ec2, instance_ids, state, instance_tags):
"""
Restarts a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("restarted")
Returns a dictionary of instance information
about the instances.
If the instance was not able to change state,
"changed" will be set to False.
Wait will not apply here as this is a OS level operation.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two.
"""
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
changed = True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
changed = True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc(exc))
# Check "termination_protection" attribute
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
inst.reboot()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
return (changed, instance_dict_array, instance_ids)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
key_name = dict(aliases = ['keypair']),
id = dict(),
group = dict(type='list', aliases=['groups']),
group_id = dict(type='list'),
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
spot_price = dict(),
spot_type = dict(default='one-time', choices=["one-time", "persistent"]),
spot_launch_group = dict(),
image = dict(),
kernel = dict(),
count = dict(type='int', default='1'),
monitoring = dict(type='bool', default=False),
ramdisk = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
spot_wait_timeout = dict(default=600),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(type='dict'),
vpc_subnet_id = dict(),
assign_public_ip = dict(type='bool', default=False),
private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list', aliases=['instance_id']),
source_dest_check = dict(type='bool', default=True),
termination_protection = dict(type='bool', default=None),
state = dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
ebs_optimized = dict(type='bool', default=False),
tenancy = dict(default='default'),
network_interfaces = dict(type='list', aliases=['network_interface'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids'],
['network_interfaces', 'assign_public_ip'],
['network_interfaces', 'group'],
['network_interfaces', 'group_id'],
['network_interfaces', 'private_ip'],
['network_interfaces', 'vpc_subnet_id'],
],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
ec2 = ec2_connect(module)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if region:
try:
vpc = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
else:
vpc = None
tagged_instances = []
state = module.params['state']
if state == 'absent':
instance_ids = module.params['instance_ids']
if not instance_ids:
module.fail_json(msg='instance_ids list is required for absent state')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
elif state in ('restarted'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/IPython/lib/inputhook.py | 12 | 23594 | # coding: utf-8
"""
Deprecated since IPython 5.0
Inputhook management for GUI event loop integration.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
import ctypes
except ImportError:
ctypes = None
except SystemError: # IronPython issue, 2/8/2014
ctypes = None
import os
import platform
import sys
from distutils.version import LooseVersion as V
from warnings import warn
warn("`IPython.lib.inputhook` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def _stdin_ready_posix():
"""Return True if there's something to read on stdin (posix version)."""
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
return bool(infds)
def _stdin_ready_nt():
"""Return True if there's something to read on stdin (nt version)."""
return msvcrt.kbhit()
def _stdin_ready_other():
"""Return True, assuming there's something to read on stdin."""
return True
def _use_appnope():
"""Should we use appnope for dealing with OS X app nap?
Checks if we are on OS X 10.9 or greater.
"""
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _ignore_CTRL_C_posix():
"""Ignore CTRL+C (SIGINT)."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _allow_CTRL_C_posix():
"""Take CTRL+C into account (SIGINT)."""
signal.signal(signal.SIGINT, signal.default_int_handler)
def _ignore_CTRL_C_other():
"""Ignore CTRL+C (not implemented)."""
pass
def _allow_CTRL_C_other():
"""Take CTRL+C into account (not implemented)."""
pass
if os.name == 'posix':
import select
import signal
stdin_ready = _stdin_ready_posix
ignore_CTRL_C = _ignore_CTRL_C_posix
allow_CTRL_C = _allow_CTRL_C_posix
elif os.name == 'nt':
import msvcrt
stdin_ready = _stdin_ready_nt
ignore_CTRL_C = _ignore_CTRL_C_other
allow_CTRL_C = _allow_CTRL_C_other
else:
stdin_ready = _stdin_ready_other
ignore_CTRL_C = _ignore_CTRL_C_other
allow_CTRL_C = _allow_CTRL_C_other
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""DEPRECATED since IPython 5.0
Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
if ctypes is None:
warn("IPython GUI event loop requires ctypes, %gui will not be available")
else:
self.PYFUNC = ctypes.PYFUNCTYPE(ctypes.c_int)
self.guihooks = {}
self.aliases = {}
self.apps = {}
self._reset()
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._installed = False
self._current_gui = None
def get_pyos_inputhook(self):
"""DEPRECATED since IPython 5.0
Return the current PyOS_InputHook as a ctypes.c_void_p."""
warn("`get_pyos_inputhook` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
return ctypes.c_void_p.in_dll(ctypes.pythonapi,"PyOS_InputHook")
def get_pyos_inputhook_as_func(self):
"""DEPRECATED since IPython 5.0
Return the current PyOS_InputHook as a ctypes.PYFUNCYPE."""
warn("`get_pyos_inputhook_as_func` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
return self.PYFUNC.in_dll(ctypes.pythonapi,"PyOS_InputHook")
def set_inputhook(self, callback):
"""DEPRECATED since IPython 5.0
Set PyOS_InputHook to callback and return the previous one."""
# On platforms with 'readline' support, it's all too likely to
# have a KeyboardInterrupt signal delivered *even before* an
# initial ``try:`` clause in the callback can be executed, so
# we need to disable CTRL+C in this situation.
ignore_CTRL_C()
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original
def clear_inputhook(self, app=None):
"""DEPRECATED since IPython 5.0
Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
warn("`clear_inputhook` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
allow_CTRL_C()
self._reset()
return original
def clear_app_refs(self, gui=None):
"""DEPRECATED since IPython 5.0
Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
warn("`clear_app_refs` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if gui is None:
self.apps = {}
elif gui in self.apps:
del self.apps[gui]
def register(self, toolkitname, *aliases):
"""DEPRECATED since IPython 5.0
Register a class to provide the event loop for a given GUI.
This is intended to be used as a class decorator. It should be passed
the names with which to register this GUI integration. The classes
themselves should subclass :class:`InputHookBase`.
::
@inputhook_manager.register('qt')
class QtInputHook(InputHookBase):
def enable(self, app=None):
...
"""
warn("`register` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
def decorator(cls):
if ctypes is not None:
inst = cls(self)
self.guihooks[toolkitname] = inst
for a in aliases:
self.aliases[a] = toolkitname
return cls
return decorator
def current_gui(self):
"""DEPRECATED since IPython 5.0
Return a string indicating the currently active GUI or None."""
warn("`current_gui` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
return self._current_gui
def enable_gui(self, gui=None, app=None):
"""DEPRECATED since IPython 5.0
Switch amongst GUI input hooks by name.
This is a higher level method than :meth:`set_inputhook` - it uses the
GUI name to look up a registered object which enables the input hook
for that GUI.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
warn("`enable_gui` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if gui in (None, GUI_NONE):
return self.disable_gui()
if gui in self.aliases:
return self.enable_gui(self.aliases[gui], app)
try:
gui_hook = self.guihooks[gui]
except KeyError:
e = "Invalid GUI request {!r}, valid ones are: {}"
raise ValueError(e.format(gui, ', '.join(self.guihooks)))
self._current_gui = gui
app = gui_hook.enable(app)
if app is not None:
app._in_event_loop = True
self.apps[gui] = app
return app
def disable_gui(self):
"""DEPRECATED since IPython 5.0
Disable GUI event loop integration.
If an application was registered, this sets its ``_in_event_loop``
attribute to False. It then calls :meth:`clear_inputhook`.
"""
warn("`disable_gui` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
gui = self._current_gui
if gui in self.apps:
self.apps[gui]._in_event_loop = False
return self.clear_inputhook()
class InputHookBase(object):
"""DEPRECATED since IPython 5.0
Base class for input hooks for specific toolkits.
Subclasses should define an :meth:`enable` method with one argument, ``app``,
which will either be an instance of the toolkit's application class, or None.
They may also define a :meth:`disable` method with no arguments.
"""
def __init__(self, manager):
self.manager = manager
def disable(self):
pass
inputhook_manager = InputHookManager()
@inputhook_manager.register('osx')
class NullInputHook(InputHookBase):
"""DEPRECATED since IPython 5.0
A null inputhook that doesn't need to do anything"""
def enable(self, app=None):
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
@inputhook_manager.register('wx')
class WxInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import wx
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from IPython.lib.inputhookwx import inputhook_wx
self.manager.set_inputhook(inputhook_wx)
if _use_appnope():
from appnope import nope
nope()
import wx
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
return app
def disable(self):
"""DEPRECATED since IPython 5.0
Disable event loop integration with wxPython.
This restores appnapp on OS X
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if _use_appnope():
from appnope import nap
nap()
@inputhook_manager.register('qt', 'qt4')
class Qt4InputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
from IPython.lib.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self.manager, app)
self.manager.set_inputhook(inputhook_qt4)
if _use_appnope():
from appnope import nope
nope()
return app
def disable_qt4(self):
"""DEPRECATED since IPython 5.0
Disable event loop integration with PyQt4.
This restores appnapp on OS X
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if _use_appnope():
from appnope import nap
nap()
@inputhook_manager.register('qt5')
class Qt5InputHook(Qt4InputHook):
def enable(self, app=None):
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
os.environ['QT_API'] = 'pyqt5'
return Qt4InputHook.enable(self, app)
@inputhook_manager.register('gtk')
class GtkInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import gtk
try:
gtk.set_interactive(True)
except AttributeError:
# For older versions of gtk, use our own ctypes version
from IPython.lib.inputhookgtk import inputhook_gtk
self.manager.set_inputhook(inputhook_gtk)
@inputhook_manager.register('tk')
class TkInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if app is None:
try:
from tkinter import Tk # Py 3
except ImportError:
from Tkinter import Tk # Py 2
app = Tk()
app.withdraw()
self.manager.apps[GUI_TK] = app
return app
@inputhook_manager.register('glut')
class GlutInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import OpenGL.GLUT as glut
from IPython.lib.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self.manager.apps:
glut.glutInit( sys.argv )
glut.glutInitDisplayMode( glut_display_mode )
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption( glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS )
glut.glutCreateWindow( sys.argv[0] )
glut.glutReshapeWindow( 1, 1 )
glut.glutHideWindow( )
glut.glutWMCloseFunc( glut_close )
glut.glutDisplayFunc( glut_display )
glut.glutIdleFunc( glut_idle )
else:
glut.glutWMCloseFunc( glut_close )
glut.glutDisplayFunc( glut_display )
glut.glutIdleFunc( glut_idle)
self.manager.set_inputhook( inputhook_glut )
def disable(self):
"""DEPRECATED since IPython 5.0
Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
super(GlutInputHook, self).disable()
@inputhook_manager.register('pyglet')
class PygletInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
from IPython.lib.inputhookpyglet import inputhook_pyglet
self.manager.set_inputhook(inputhook_pyglet)
return app
@inputhook_manager.register('gtk3')
class Gtk3InputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
from IPython.lib.inputhookgtk3 import inputhook_gtk3
self.manager.set_inputhook(inputhook_gtk3)
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
enable_gui = inputhook_manager.enable_gui
disable_gui = inputhook_manager.disable_gui
register = inputhook_manager.register
guis = inputhook_manager.guihooks
def _deprecated_disable():
warn("This function is deprecated since IPython 4.0 use disable_gui() instead",
DeprecationWarning, stacklevel=2)
inputhook_manager.disable_gui()
disable_wx = disable_qt4 = disable_gtk = disable_gtk3 = disable_glut = \
disable_pyglet = disable_osx = _deprecated_disable
| mit |
davidbuzz/ardupilot | Tools/ardupilotwaf/boards.py | 1 | 30037 | #!/usr/bin/env python
# encoding: utf-8
from collections import OrderedDict
import sys, os
import fnmatch
import waflib
from waflib import Utils
from waflib.Configure import conf
_board_classes = {}
_board = None
class BoardMeta(type):
def __init__(cls, name, bases, dct):
super(BoardMeta, cls).__init__(name, bases, dct)
if 'abstract' not in cls.__dict__:
cls.abstract = False
if cls.abstract:
return
if not hasattr(cls, 'toolchain'):
cls.toolchain = 'native'
board_name = getattr(cls, 'name', name)
if board_name in _board_classes:
raise Exception('board named %s already exists' % board_name)
_board_classes[board_name] = cls
class Board:
abstract = True
def __init__(self):
self.with_can = False
def configure(self, cfg):
cfg.env.TOOLCHAIN = cfg.options.toolchain or self.toolchain
cfg.env.ROMFS_FILES = []
cfg.load('toolchain')
cfg.load('cxx_checks')
env = waflib.ConfigSet.ConfigSet()
self.configure_env(cfg, env)
# Setup scripting, had to defer this to allow checking board size
if ((not cfg.options.disable_scripting) and
(not cfg.env.DISABLE_SCRIPTING) and
((cfg.env.BOARD_FLASH_SIZE is None) or
(cfg.env.BOARD_FLASH_SIZE == []) or
(cfg.env.BOARD_FLASH_SIZE > 1024))):
env.DEFINES.update(
ENABLE_SCRIPTING = 1,
LUA_32BITS = 1,
)
env.AP_LIBRARIES += [
'AP_Scripting',
'AP_Scripting/lua/src',
]
else:
cfg.options.disable_scripting = True
# allow GCS disable for AP_DAL example
if cfg.options.no_gcs:
env.CXXFLAGS += ['-DHAL_NO_GCS=1']
d = env.get_merged_dict()
# Always prepend so that arguments passed in the command line get
# the priority.
for k, val in d.items():
# Dictionaries (like 'DEFINES') are converted to lists to
# conform to waf conventions.
if isinstance(val, dict):
keys = list(val.keys())
if not isinstance(val, OrderedDict):
keys.sort()
val = ['%s=%s' % (vk, val[vk]) for vk in keys]
if k in cfg.env and isinstance(cfg.env[k], list):
cfg.env.prepend_value(k, val)
else:
cfg.env[k] = val
cfg.ap_common_checks()
cfg.env.prepend_value('INCLUDES', [
cfg.srcnode.find_dir('libraries/AP_Common/missing').abspath()
])
def cc_version_gte(self, cfg, want_major, want_minor):
(major, minor, patchlevel) = cfg.env.CC_VERSION
return (int(major) > want_major or
(int(major) == want_major and int(minor) >= want_minor))
def configure_env(self, cfg, env):
# Use a dictionary instead of the convetional list for definitions to
# make easy to override them. Convert back to list before consumption.
env.DEFINES = {}
env.CFLAGS += [
'-ffunction-sections',
'-fdata-sections',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Werror=format',
'-Wpointer-arith',
'-Wcast-align',
'-Wno-missing-field-initializers',
'-Wno-unused-parameter',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Wno-trigraphs',
'-Werror=shadow',
'-Werror=return-type',
'-Werror=unused-result',
'-Werror=unused-variable',
'-Werror=narrowing',
'-Werror=attributes',
'-Werror=overflow',
'-Werror=parentheses',
'-Werror=format-extra-args',
'-Werror=ignored-qualifiers',
'-Werror=undef',
'-DARDUPILOT_BUILD',
]
if cfg.options.scripting_checks:
env.DEFINES.update(
AP_SCRIPTING_CHECKS = 1,
)
cfg.msg("CXX Compiler", "%s %s" % (cfg.env.COMPILER_CXX, ".".join(cfg.env.CC_VERSION)))
if 'clang' in cfg.env.COMPILER_CC:
env.CFLAGS += [
'-fcolor-diagnostics',
'-Wno-gnu-designator',
'-Wno-inconsistent-missing-override',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
'-Werror=implicit-fallthrough',
]
else:
env.CFLAGS += [
'-Wno-format-contains-nul',
]
if self.cc_version_gte(cfg, 7, 4):
env.CXXFLAGS += [
'-Werror=implicit-fallthrough',
]
if cfg.env.DEBUG:
env.CFLAGS += [
'-g',
'-O0',
]
env.DEFINES.update(
HAL_DEBUG_BUILD = 1,
)
if cfg.options.bootloader:
# don't let bootloaders try and pull scripting in
cfg.options.disable_scripting = True
else:
env.DEFINES.update(
ENABLE_HEAP = 1,
)
if cfg.options.enable_math_check_indexes:
env.CXXFLAGS += ['-DMATH_CHECK_INDEXES']
env.CXXFLAGS += [
'-std=gnu++11',
'-fdata-sections',
'-ffunction-sections',
'-fno-exceptions',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Wpointer-arith',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-reorder',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Wno-expansion-to-defined',
'-Werror=cast-align',
'-Werror=attributes',
'-Werror=format-security',
'-Werror=format-extra-args',
'-Werror=enum-compare',
'-Werror=format',
'-Werror=array-bounds',
'-Werror=uninitialized',
'-Werror=init-self',
'-Werror=narrowing',
'-Werror=return-type',
'-Werror=switch',
'-Werror=sign-compare',
'-Werror=type-limits',
'-Werror=undef',
'-Werror=unused-result',
'-Werror=shadow',
'-Werror=unused-value',
'-Werror=unused-variable',
'-Werror=delete-non-virtual-dtor',
'-Wfatal-errors',
'-Wno-trigraphs',
'-Werror=parentheses',
'-DARDUPILOT_BUILD',
]
if 'clang++' in cfg.env.COMPILER_CXX:
env.CXXFLAGS += [
'-fcolor-diagnostics',
'-Werror=address-of-packed-member',
'-Werror=inconsistent-missing-override',
'-Werror=overloaded-virtual',
# catch conversion issues:
'-Werror=bitfield-enum-conversion',
'-Werror=bool-conversion',
'-Werror=constant-conversion',
'-Werror=enum-conversion',
'-Werror=int-conversion',
'-Werror=literal-conversion',
'-Werror=non-literal-null-conversion',
'-Werror=null-conversion',
'-Werror=objc-literal-conversion',
# '-Werror=shorten-64-to-32', # ARRAY_SIZE() creates this all over the place as the caller typically takes a uint32_t not a size_t
'-Werror=string-conversion',
# '-Werror=sign-conversion', # can't use as we assign into AP_Int8 from uint8_ts
'-Wno-gnu-designator',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
'-Werror=implicit-fallthrough',
]
else:
env.CXXFLAGS += [
'-Wno-format-contains-nul',
'-Werror=unused-but-set-variable'
]
if self.cc_version_gte(cfg, 5, 2):
env.CXXFLAGS += [
'-Werror=suggest-override',
]
if self.cc_version_gte(cfg, 7, 4):
env.CXXFLAGS += [
'-Werror=implicit-fallthrough',
]
if cfg.options.Werror:
errors = ['-Werror',
'-Werror=missing-declarations',
'-Werror=float-equal',
'-Werror=undef',
]
env.CFLAGS += errors
env.CXXFLAGS += errors
if cfg.env.DEBUG:
env.CXXFLAGS += [
'-g',
'-O0',
]
if cfg.env.DEST_OS == 'darwin':
env.LINKFLAGS += [
'-Wl,-dead_strip',
]
else:
env.LINKFLAGS += [
'-Wl,--gc-sections',
]
if self.with_can:
env.AP_LIBRARIES += [
'AP_UAVCAN',
'modules/uavcan/libuavcan/src/**/*.cpp'
]
env.CXXFLAGS += [
'-Wno-error=cast-align',
]
env.DEFINES.update(
UAVCAN_CPP_VERSION = 'UAVCAN_CPP03',
UAVCAN_NO_ASSERTIONS = 1,
UAVCAN_NULLPTR = 'nullptr'
)
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath()
]
if cfg.options.build_dates:
env.build_dates = True
# We always want to use PRI format macros
cfg.define('__STDC_FORMAT_MACROS', 1)
if cfg.options.disable_ekf2:
env.CXXFLAGS += ['-DHAL_NAVEKF2_AVAILABLE=0']
if cfg.options.disable_ekf3:
env.CXXFLAGS += ['-DHAL_NAVEKF3_AVAILABLE=0']
if cfg.options.osd or cfg.options.osd_fonts:
env.CXXFLAGS += ['-DOSD_ENABLED=1', '-DHAL_MSP_ENABLED=1']
if cfg.options.osd_fonts:
for f in os.listdir('libraries/AP_OSD/fonts'):
if fnmatch.fnmatch(f, "font*bin"):
env.ROMFS_FILES += [(f,'libraries/AP_OSD/fonts/'+f)]
def pre_build(self, bld):
'''pre-build hook that gets called before dynamic sources'''
if bld.env.ROMFS_FILES:
self.embed_ROMFS_files(bld)
def build(self, bld):
bld.ap_version_append_str('GIT_VERSION', bld.git_head_hash(short=True))
import time
ltime = time.localtime()
if bld.env.build_dates:
bld.ap_version_append_int('BUILD_DATE_YEAR', ltime.tm_year)
bld.ap_version_append_int('BUILD_DATE_MONTH', ltime.tm_mon)
bld.ap_version_append_int('BUILD_DATE_DAY', ltime.tm_mday)
def embed_ROMFS_files(self, ctx):
'''embed some files using AP_ROMFS'''
import embed
header = ctx.bldnode.make_node('ap_romfs_embedded.h').abspath()
if not embed.create_embedded_h(header, ctx.env.ROMFS_FILES, ctx.env.ROMFS_UNCOMPRESSED):
ctx.fatal("Failed to created ap_romfs_embedded.h")
Board = BoardMeta('Board', Board.__bases__, dict(Board.__dict__))
def add_dynamic_boards():
'''add boards based on existance of hwdef.dat in subdirectories for ChibiOS'''
dirname, dirlist, filenames = next(os.walk('libraries/AP_HAL_ChibiOS/hwdef'))
for d in dirlist:
if d in _board_classes.keys():
continue
hwdef = os.path.join(dirname, d, 'hwdef.dat')
if os.path.exists(hwdef):
newclass = type(d, (chibios,), {'name': d})
def get_boards_names():
add_dynamic_boards()
return sorted(list(_board_classes.keys()), key=str.lower)
def get_removed_boards():
'''list of boards which have been removed'''
return sorted(['px4-v1', 'px4-v2', 'px4-v3', 'px4-v4', 'px4-v4pro'])
@conf
def get_board(ctx):
global _board
if not _board:
if not ctx.env.BOARD:
ctx.fatal('BOARD environment variable must be set before first call to get_board()')
if ctx.env.BOARD in get_removed_boards():
ctx.fatal('''
The board target %s has been removed from ArduPilot with the removal of NuttX support and HAL_PX4.
Please use a replacement build as follows:
px4-v2 Use Pixhawk1 build
px4-v3 Use Pixhawk1 or CubeBlack builds
px4-v4 Use Pixracer build
px4-v4pro Use DrotekP3Pro build
''' % ctx.env.BOARD)
boards = _board_classes.keys()
if not ctx.env.BOARD in boards:
ctx.fatal("Invalid board '%s': choices are %s" % (ctx.env.BOARD, ', '.join(sorted(boards, key=str.lower))))
_board = _board_classes[ctx.env.BOARD]()
return _board
# NOTE: Keeping all the board definitions together so we can easily
# identify opportunities to simplify common flags. In the future might
# be worthy to keep board definitions in files of their own.
class sitl(Board):
def __init__(self):
if Utils.unversioned_sys_platform().startswith("linux"):
self.with_can = True
else:
self.with_can = False
def configure_env(self, cfg, env):
super(sitl, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_SITL',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_NONE',
AP_SCRIPTING_CHECKS = 1, # SITL should always do runtime scripting checks
)
if self.with_can:
cfg.define('HAL_NUM_CAN_IFACES', 2)
cfg.define('UAVCAN_EXCEPTIONS', 0)
env.CXXFLAGS += [
'-Werror=float-equal'
]
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
if 'clang++' in cfg.env.COMPILER_CXX and cfg.options.asan:
env.CXXFLAGS += [
'-fsanitize=address',
'-fno-omit-frame-pointer',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
cfg.check_feenableexcept()
env.LINKFLAGS += ['-pthread',]
if cfg.env.DEBUG and 'clang++' in cfg.env.COMPILER_CXX and cfg.options.asan:
env.LINKFLAGS += ['-fsanitize=address']
env.AP_LIBRARIES += [
'AP_HAL_SITL',
]
if not cfg.env.AP_PERIPH:
env.AP_LIBRARIES += [
'SITL',
]
if cfg.options.enable_sfml:
if not cfg.check_SFML(env):
cfg.fatal("Failed to find SFML libraries")
if cfg.options.enable_sfml_joystick:
if not cfg.check_SFML(env):
cfg.fatal("Failed to find SFML libraries")
env.CXXFLAGS += ['-DSFML_JOYSTICK']
if cfg.options.sitl_osd:
env.CXXFLAGS += ['-DWITH_SITL_OSD','-DOSD_ENABLED=1']
for f in os.listdir('libraries/AP_OSD/fonts'):
if fnmatch.fnmatch(f, "font*bin"):
env.ROMFS_FILES += [(f,'libraries/AP_OSD/fonts/'+f)]
for f in os.listdir('Tools/autotest/models'):
if fnmatch.fnmatch(f, "*.json") or fnmatch.fnmatch(f, "*.parm"):
env.ROMFS_FILES += [('models/'+f,'Tools/autotest/models/'+f)]
# embed any scripts from ROMFS/scripts
if os.path.exists('ROMFS/scripts'):
for f in os.listdir('ROMFS/scripts'):
if fnmatch.fnmatch(f, "*.lua"):
env.ROMFS_FILES += [('scripts/'+f,'ROMFS/scripts/'+f)]
if len(env.ROMFS_FILES) > 0:
env.CXXFLAGS += ['-DHAL_HAVE_AP_ROMFS_EMBEDDED_H']
if cfg.options.sitl_rgbled:
env.CXXFLAGS += ['-DWITH_SITL_RGBLED']
if cfg.options.enable_sfml_audio:
if not cfg.check_SFML_Audio(env):
cfg.fatal("Failed to find SFML Audio libraries")
env.CXXFLAGS += ['-DWITH_SITL_TONEALARM']
if cfg.options.sitl_flash_storage:
env.CXXFLAGS += ['-DSTORAGE_USE_FLASH=1']
if cfg.env.DEST_OS == 'cygwin':
env.LIB += [
'winmm',
]
if Utils.unversioned_sys_platform() == 'cygwin':
env.CXXFLAGS += ['-DCYGWIN_BUILD']
if 'clang++' in cfg.env.COMPILER_CXX:
print("Disabling SLP for clang++")
env.CXXFLAGS += [
'-fno-slp-vectorize' # compiler bug when trying to use SLP
]
def srcpath(path):
return cfg.srcnode.make_node(path).abspath()
env.SRCROOT = srcpath('')
class sitl_periph_gps(sitl):
def configure_env(self, cfg, env):
cfg.env.AP_PERIPH = 1
cfg.env.DISABLE_SCRIPTING = 1
super(sitl_periph_gps, self).configure_env(cfg, env)
env.DEFINES.update(
HAL_BUILD_AP_PERIPH = 1,
PERIPH_FW = 1,
CAN_APP_NODE_NAME = '"org.ardupilot.ap_periph_gps"',
HAL_PERIPH_ENABLE_GPS = 1,
HAL_WITH_DSP = 1,
HAL_CAN_DEFAULT_NODE_ID = 0,
HAL_RAM_RESERVE_START = 0,
APJ_BOARD_ID = 100,
HAL_NO_GCS = 1,
HAL_NO_LOGGING = 1,
)
# libcanard is written for 32bit platforms
env.CXXFLAGS += [
'-m32',
]
env.CFLAGS += [
'-m32',
]
env.LDFLAGS += [
'-m32',
]
class chibios(Board):
abstract = True
toolchain = 'arm-none-eabi'
def configure_env(self, cfg, env):
super(chibios, self).configure_env(cfg, env)
cfg.load('chibios')
env.BOARD = self.name
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_CHIBIOS',
HAVE_STD_NULLPTR_T = 0,
USE_LIBC_REALLOC = 0,
)
env.AP_LIBRARIES += [
'AP_HAL_ChibiOS',
]
# make board name available for USB IDs
env.CHIBIOS_BOARD_NAME = 'HAL_BOARD_NAME="%s"' % self.name
env.CFLAGS += cfg.env.CPU_FLAGS + [
'-Wno-cast-align',
'-Wlogical-op',
'-Wframe-larger-than=1300',
'-fsingle-precision-constant',
'-Wno-attributes',
'-fno-exceptions',
'-Wall',
'-Wextra',
'-Wno-sign-compare',
'-Wfloat-equal',
'-Wpointer-arith',
'-Wmissing-declarations',
'-Wno-unused-parameter',
'-Werror=array-bounds',
'-Wfatal-errors',
'-Werror=uninitialized',
'-Werror=init-self',
'-Werror=unused-but-set-variable',
'-Wno-missing-field-initializers',
'-Wno-trigraphs',
'-fno-strict-aliasing',
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-fno-strength-reduce',
'-fno-builtin-printf',
'-fno-builtin-fprintf',
'-fno-builtin-vprintf',
'-fno-builtin-vfprintf',
'-fno-builtin-puts',
'-mno-thumb-interwork',
'-mthumb',
'--specs=nano.specs',
'-specs=nosys.specs',
'-DCHIBIOS_BOARD_NAME="%s"' % self.name,
'-D__USE_CMSIS',
'-Werror=deprecated-declarations'
]
if not cfg.options.Werror:
env.CFLAGS += [
'-Wno-error=double-promotion',
'-Wno-error=missing-declarations',
'-Wno-error=float-equal',
'-Wno-error=undef',
'-Wno-error=cpp',
]
env.CXXFLAGS += env.CFLAGS + [
'-fno-rtti',
'-fno-threadsafe-statics',
]
env.CFLAGS += [
'-std=c11'
]
if Utils.unversioned_sys_platform() == 'cygwin':
env.CXXFLAGS += ['-DCYGWIN_BUILD']
bldnode = cfg.bldnode.make_node(self.name)
env.BUILDROOT = bldnode.make_node('').abspath()
env.LINKFLAGS = cfg.env.CPU_FLAGS + [
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-u_port_lock',
'-u_port_unlock',
'-u_exit',
'-u_kill',
'-u_getpid',
'-u_errno',
'-uchThdExit',
'-fno-common',
'-nostartfiles',
'-mno-thumb-interwork',
'-mthumb',
'-specs=nano.specs',
'-specs=nosys.specs',
'-L%s' % env.BUILDROOT,
'-L%s' % cfg.srcnode.make_node('modules/ChibiOS/os/common/startup/ARMCMx/compilers/GCC/ld/').abspath(),
'-L%s' % cfg.srcnode.make_node('libraries/AP_HAL_ChibiOS/hwdef/common/').abspath(),
'-Wl,--gc-sections,--no-warn-mismatch,--library-path=/ld,--script=ldscript.ld,--defsym=__process_stack_size__=%s,--defsym=__main_stack_size__=%s' % (cfg.env.PROCESS_STACK, cfg.env.MAIN_STACK)
]
if cfg.env.DEBUG:
env.CFLAGS += [
'-gdwarf-4',
'-g3',
]
env.LINKFLAGS += [
'-gdwarf-4',
'-g3',
]
if cfg.env.ENABLE_ASSERTS:
cfg.msg("Enabling ChibiOS asserts", "yes")
env.CFLAGS += [ '-DHAL_CHIBIOS_ENABLE_ASSERTS' ]
env.CXXFLAGS += [ '-DHAL_CHIBIOS_ENABLE_ASSERTS' ]
else:
cfg.msg("Enabling ChibiOS asserts", "no")
if cfg.env.ENABLE_MALLOC_GUARD:
cfg.msg("Enabling malloc guard", "yes")
env.CFLAGS += [ '-DHAL_CHIBIOS_ENABLE_MALLOC_GUARD' ]
env.CXXFLAGS += [ '-DHAL_CHIBIOS_ENABLE_MALLOC_GUARD' ]
else:
cfg.msg("Enabling malloc guard", "no")
env.LIB += ['gcc', 'm']
env.GIT_SUBMODULES += [
'ChibiOS',
]
env.INCLUDES += [
cfg.srcnode.find_dir('libraries/AP_GyroFFT/CMSIS_5/include').abspath()
]
# whitelist of compilers which we should build with -Werror
gcc_whitelist = [
('4','9','3'),
('6','3','1'),
('9','2','1'),
('9','3','1'),
]
if cfg.options.Werror or cfg.env.CC_VERSION in gcc_whitelist:
cfg.msg("Enabling -Werror", "yes")
if '-Werror' not in env.CXXFLAGS:
env.CXXFLAGS += [ '-Werror' ]
else:
cfg.msg("Enabling -Werror", "no")
try:
import intelhex
env.HAVE_INTEL_HEX = True
cfg.msg("Checking for intelhex module:", 'OK')
except Exception:
cfg.msg("Checking for intelhex module:", 'disabled', color='YELLOW')
env.HAVE_INTEL_HEX = False
def build(self, bld):
super(chibios, self).build(bld)
bld.ap_version_append_str('CHIBIOS_GIT_VERSION', bld.git_submodule_head_hash('ChibiOS', short=True))
bld.load('chibios')
def pre_build(self, bld):
'''pre-build hook that gets called before dynamic sources'''
from waflib.Context import load_tool
module = load_tool('chibios', [], with_sys_path=True)
fun = getattr(module, 'pre_build', None)
if fun:
fun(bld)
super(chibios, self).pre_build(bld)
class linux(Board):
def configure_env(self, cfg, env):
super(linux, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_LINUX',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NONE',
)
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
cfg.check_lttng(env)
cfg.check_libdl(env)
cfg.check_libiio(env)
env.LINKFLAGS += ['-pthread',]
env.AP_LIBRARIES += [
'AP_HAL_Linux',
]
if self.with_can:
cfg.define('UAVCAN_EXCEPTIONS', 0)
if cfg.options.apstatedir:
cfg.define('AP_STATEDIR', cfg.options.apstatedir)
def build(self, bld):
super(linux, self).build(bld)
if bld.options.upload:
waflib.Options.commands.append('rsync')
# Avoid infinite recursion
bld.options.upload = False
class navigator(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navigator, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE='HAL_BOARD_SUBTYPE_LINUX_NAVIGATOR',
)
class erleboard(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erleboard, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBOARD',
)
class navio(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO',
)
class navio2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO2',
)
class edge(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_can = True
def configure_env(self, cfg, env):
super(edge, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_EDGE',
)
class zynq(linux):
toolchain = 'arm-xilinx-linux-gnueabi'
def configure_env(self, cfg, env):
super(zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ZYNQ',
)
class ocpoc_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(ocpoc_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_OCPOC_ZYNQ',
)
class bbbmini(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_can = True
def configure_env(self, cfg, env):
super(bbbmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BBBMINI',
)
class blue(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_can = True
def configure_env(self, cfg, env):
super(blue, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BLUE',
)
class pocket(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_can = True
def configure_env(self, cfg, env):
super(pocket, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_POCKET',
)
class pxf(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxf, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXF',
)
class bebop(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bebop, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BEBOP',
)
class disco(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(disco, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DISCO',
)
class erlebrain2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erlebrain2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBRAIN2',
)
class bhat(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bhat, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BH',
)
class dark(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(dark, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DARK',
)
class pxfmini(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxfmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXFMINI',
)
class aero(linux):
def __init__(self):
self.with_can = True
def configure_env(self, cfg, env):
super(aero, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_AERO',
)
class rst_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(rst_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_RST_ZYNQ',
)
class SITL_static(sitl):
def configure_env(self, cfg, env):
super(SITL_static, self).configure_env(cfg, env)
cfg.env.STATIC_LINKING = True
class SITL_x86_64_linux_gnu(SITL_static):
toolchain = 'x86_64-linux-gnu'
class SITL_arm_linux_gnueabihf(SITL_static):
toolchain = 'arm-linux-gnueabihf'
| gpl-3.0 |
Harunx9/Transaltors | Lexer/Lexer/env/Lib/site-packages/pip/_vendor/html5lib/html5parser.py | 310 | 117029 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| mit |
Alwnikrotikz/visvis.dev | core/events.py | 5 | 18145 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" Module events
Defines the events and a timer class.
"""
import sys
import time
import traceback
import weakref
class CallableObject:
""" CallableObject(callable)
A class to hold a callable using weak references.
It can distinguish between functions and methods.
"""
def __init__(self, c):
if hasattr(c,'im_func'):
self._func = weakref.ref(c.im_func)
self._ob = weakref.ref(c.im_self)
else:
self._func = weakref.ref(c)
self._ob = None
def isdead(self):
""" Get whether the weak ref is dead.
"""
if self._func() is None or (self._ob and self._ob() is None):
return True
else:
return False
def call(self, *args):
""" Call the callable.
"""
func = self._func()
if self._ob:
return func(self._ob(), *args)
else:
return func(*args)
def compare(self, other):
""" compare this instance with another.
"""
# compare func
if self._func() is not other._func():
return False
# compare object
if self._ob and other._ob and self._ob() is other._ob():
return True
elif self._ob is None and other._ob is None:
return True
else:
return False
def __str__(self):
return self._func().__str__()
class BaseEvent:
""" The BaseEvent is the simplest type of event.
The purpose of the event class is to provide a way to bind/unbind
to events and to fire them. At the same time, it is the place where
the properties of the event are stored (such mouse location, key
being pressed, ...).
One can Bind() or Unbind() a callable to the event. When fired, all
handlers that are bind to this event are called, until the event is
handled (a handler returns True). The handlers are called with the
event object as an argument. The event.owner provides a reference of
what wobject/wibject fired the event.
"""
def __init__(self, owner):
# users should not change type, owner or handlers.
self._owner = weakref.ref(owner)
self._handlers = []
self._modifiers = ()
def Set(self, modifiers=()):
""" Set(modifiers)
Set the event properties before firing it. In the base event
the only property is the modifiers state, a tuple of the
modifier keys currently pressed.
"""
self._modifiers = modifiers
@property
def owner(self):
""" The object that this event belongs to.
"""
return self._owner()
@property
def type(self):
""" The type (__class__) of this event.
"""
return self.__class__
@property
def modifiers(self):
""" The modifier keys active when the event occurs.
"""
return self._modifiers
def Bind(self, func):
""" Bind(func)
Add an eventhandler to this event.
The callback/handler (func) must be a callable. It is called
with one argument: the event instance, which contains the mouse
location for the mouse event and the keycode for the key event.
"""
# check
if not hasattr(func, '__call__'):
raise ValueError('Warning: can only bind callables.')
# make callable object
cnew = CallableObject(func)
# check -> warn
for c in self._handlers:
if cnew.compare(c):
print "Warning: handler %s already present for %s" %(func, self)
return
# add the handler
self._handlers.append( cnew )
def Unbind(self, func=None):
""" Unbind(func=None)
Unsubscribe a handler, If func is None, remove all handlers.
"""
if func is None:
self._handlers[:] = []
else:
cref = CallableObject(func)
for c in [c for c in self._handlers]:
# remove if callable matches func or object is destroyed
if c.compare(cref) or c.isdead():
self._handlers.remove( c )
def Fire(self):
""" Fire()
Fire the event, calling all functions that are bound
to it, untill the event is handled (a handler returns True).
"""
# remove dead weakrefs
for c in [c for c in self._handlers]:
if c.isdead():
self._handlers.remove( c )
# get list of callable functions
L = self._handlers
# call event handlers. Call last added first!
handled = False
for func in reversed( L ):
if handled:
break
try:
handled = func.call(self)
except Exception:
# get easier func name
s = str(func)
i = s.find("function")
if i<0:
i = s.find("method")
if i>= 0:
i1 = s.find(" ",i+1)
i2 = s.find(" ",i1+1)
if i1>=0 and i2>=0:
s = s[i1+1:i2]
# get traceback and store
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
# Show traceback
tblist = traceback.extract_tb(tb)
list = traceback.format_list(tblist[2:]) # remove "Fire"
list.extend( traceback.format_exception_only(type, value) )
# print
print "ERROR calling '%s':" % s
tmp = ""
for i in list:
tmp += i
print tmp
class MouseEvent(BaseEvent):
""" MouseEvent(owner)
A MouseEvent is an event for things that happen with the mouse.
"""
def __init__(self, owner):
BaseEvent.__init__(self, owner)
self._x, self._y = 0, 0
self._x2d, self._y2d = 0, 0
self._but = 0
def Set(self, absx, absy, but, modifiers=()):
""" Set(absx, absy, but)
Set the event properties before firing it.
"""
BaseEvent.Set(self, modifiers)
# Set properties we can alway set
self._absx = absx
self._absy = absy
self._but = but
# Init other properties
self._x = absx
self._y = absy
self._x2d = 0
self._y2d = 0
# Try getting more information on the owning object
owner = self._owner()
if owner:
# Can we Set the event at all?
if owner._destroyed:
return
# Determine axes (for Wobjects)
axes = None
if hasattr(owner, 'GetAxes'):
axes = owner.GetAxes()
if not axes:
return
if not hasattr(axes, '_cameras'):
axes = None # For example a legend
if hasattr(owner, 'position'):
# A Wibject: use relative coordinates if not a figure
if owner.parent:
self._x -= owner.position.absLeft
self._y -= owner.position.absTop
elif axes:
# A Wobject: use axes coordinates
self._x -= axes.position.absLeft
self._y -= axes.position.absTop
if axes or hasattr(owner, '_cameras'):
# Also give 2D coordinates
if axes:
cam = axes._cameras['TwoDCamera']
else:
cam = owner._cameras['TwoDCamera']
if owner.parent: # or screen to world cannot be calculated
self._x2d, self._y2d = cam.ScreenToWorld((self._x, self._y))
@property
def absx(self):
""" The absolute x position in screen coordinates when the event
happened.
"""
return self._absx
@property
def absy(self):
""" The absolute y position in screen coordinates when the event
happened.
"""
return self._absy
@property
def x(self):
""" The x position in screen coordinates relative to the owning object
when the event happened. (For Wobjects, relative to the Axes.)
"""
return self._x
@property
def y(self):
""" The y position in screen coordinates relative to the owning object
when the event happened. (For Wobjects, relative to the Axes.)
"""
return self._y
@property
def x2d(self):
""" The x position in 2D world coordinates when the event happened.
This is only valid when the used camera is 2D.
"""
return self._x2d
@property
def y2d(self):
""" The y position in 2D world coordinates when the event happened.
This is only valid when the used camera is 2D.
"""
return self._y2d
@property
def button(self):
""" The The mouse button that was pressed, 0=none, 1=left, 2=right.
"""
return self._but
class KeyEvent(BaseEvent):
""" KeyEvent(owner)
A KeyEvent event is an event for things that happen with the keyboard.
"""
def __init__(self, owner):
BaseEvent.__init__(self, owner)
self._key = 0
self._text = ''
def Set(self, key, text='', modifiers=()):
""" Set(key, text='')
Set the event properties before firing it.
"""
BaseEvent.Set(self, modifiers)
self._key = key
self._text = text
@property
def key(self):
""" The integer keycode of the key.
"""
return self._key
@property
def text(self):
""" The text that the key represents (if available).
"""
return self._text
## Specific events for all objects
# Make classes for each specific event that is standard for all object,
# to help introspection.
class EventMouseDown(MouseEvent):
""" EventMouseDown(owner)
Fired when the mouse is pressed down on this object. (Also
fired the first click of a double click.)
"""
pass
class EventMouseUp(MouseEvent):
""" EventMouseUp(owner)
Fired when the mouse is released after having been clicked down
on this object (even if the mouse is now not over the object). (Also
fired on the first click of a double click.)
"""
pass
class EventDoubleClick(MouseEvent):
""" EventDoubleClick(owner)
Fired when the mouse is double-clicked on this object.
"""
pass
class EventEnter(MouseEvent):
""" EventEnter(owner)
Fired when the mouse enters this object or one of its children.
"""
pass
class EventLeave(MouseEvent):
""" EventLeave(owner)
Fired when the mouse leaves this object (and is also not over any
of it's children).
"""
pass
class EventMotion(MouseEvent):
""" EventMotion(owner)
Fired when the mouse is moved anywhere in the figure.
"""
pass
class EventKeyDown(KeyEvent):
""" EventKeyDown(owner)
Fired when a key is pressed down while the figure is active.
"""
pass
class EventKeyUp(KeyEvent):
""" EventKeyUp(owner)
Fired when a key is released while the figure is active.
"""
pass
## Only for wibjects
class EventPosition(BaseEvent):
""" EventPosition(owner)
Fired when the position (or size) of this wibject changes.
"""
pass
## Processing events + timers
# For callLater function
_callLater_callables = {}
def processVisvisEvents():
""" processVisvisEvents()
Process all visvis events. Checks the status of all timers
and fires the ones that need to be fired. This method
needs to be called every now and then.
All backends implement a timer that periodically calls this function.
To keep a figure responsive while running, periodically call
Figure.DrawNow() or vv.processEvents().
"""
Timer._TestAllTimers()
class Timer(BaseEvent):
""" Timer(owner, interval=1000, oneshot=True)
Timer class. You can bind callbacks to the timer. The timer is
fired when it runs out of time. You can do one-shot runs and
continuous runs.
Setting timer.nolag to True will prevent the timer from falling
behind. If the previous Fire() was a bit too late the next Fire
will take place sooner. This will make that at an interval of
1000, 3600 events will have been fired in one hour.
"""
_timers = []
def __init__(self, owner, interval=1000, oneshot=True):
# register
Timer._timers.append( weakref.ref(self) )
# store info being an event
self._owner = weakref.ref(owner)
self._handlers = []
# store Timer specific properties
self.interval = interval
self.oneshot = oneshot
self.nolag = False
self._running = False
self._timestamp = 0
def Start(self, interval=None, oneshot=None):
""" Start(interval=None, oneshot=None)
Start the timer. If interval end oneshot are not given,
their current values are used.
"""
# set properties?
if interval is not None:
self.interval = interval
if oneshot is not None:
self.oneshot = oneshot
# put on
self._running = True
self._timestamp = time.time() + (self.interval/1000.0)
def Stop(self):
""" Stop()
Stop the timer from running.
"""
self._running = False
def Destroy(self):
""" Destroy()
Destroy the timer, preventing it from ever fyring again.
"""
self.Stop()
tmp = weakref.ref(self)
if tmp in Timer._timers:
Timer._timers.remove(tmp)
@property
def isRunning(self):
""" Get whether the timer is running.
"""
return self._running
@classmethod
def _TestAllTimers(self):
""" Method used to test all timers whether they should be
fired. If so, it fires them.
"""
# test calLaters first
for calltime in _callLater_callables.keys():
if calltime < time.time():
callable, args, kwargs = _callLater_callables.pop(calltime)
callable(*args, **kwargs)
timersToRemove = []
for timerRef in Timer._timers:
timer = timerRef()
# check if timer exists, otherwise remove.
if timer is None:
timersToRemove.append(timerRef)
continue
# is it on?
if not timer._running:
continue
# has the time passed yet?
if time.time() > timer._timestamp:
timer.Fire()
else:
continue
# do we need to stop it?
if timer.oneshot:
timer._running = False
else:
if timer.nolag:
timer._timestamp += (timer.interval/1000.0)
else:
timer._timestamp = time.time() + (timer.interval/1000.0)
# clean up any dead references
for timerRef in timersToRemove:
try:
Timer._timers.remove(timerRef)
except Exception:
pass
class App:
""" App()
The App class wraps a GUI backend with a simple interface that is
the same for all backends. It can be used to start the GUI toolkit's
main-loop, or process all pending events.
To obtain an instance of this class, the user should call vv.use().
"""
def Create(self):
""" Create()
Create the native application object. When embedding visvis in an
application, call this method before instantiating the main window.
"""
# Make sure the app exists
self._GetNativeApp()
def ProcessEvents(self):
""" ProcessEvents()
Process all pending GUI events. This should be done regularly
to keep the visualization interactive and to keep the visvis
event system running.
When using IPython or IEP with the right settings, GUI events
will be processed automatically. However, in a running script,
this is not the case; be then regularly calling this method,
the figures will stay responsive.
"""
self._ProcessEvents()
def Run(self):
""" Run()
Enter the native GUI event loop.
"""
self._Run()
# Implement these methods. Be using this redirection scheme, we keep
# the documentation intact.
def _GetNativeApp(self):
raise NotImplemented()
def _ProcessEvents(self):
raise NotImplemented()
def _Run(self):
raise NotImplemented()
| bsd-3-clause |
sharoonthomas/fulfil-python-api | fulfil_client/signals.py | 2 | 1553 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop.
:copyright: (c) 2018 Fulfil.IO Inc.
The blinker fallback code is inspired by Armin's implementation
on Flask.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# Namespace for signals
_signals = Namespace()
response_received = _signals.signal('response-received')
| isc |
tximikel/kuma | vendor/packages/logilab/common/deprecation.py | 92 | 7417 | # copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Deprecation utilities."""
__docformat__ = "restructuredtext en"
import sys
from warnings import warn
from logilab.common.changelog import Version
class DeprecationWrapper(object):
"""proxy to print a warning on access to any attribute of the wrapped object
"""
def __init__(self, proxied, msg=None):
self._proxied = proxied
self._msg = msg
def __getattr__(self, attr):
warn(self._msg, DeprecationWarning, stacklevel=2)
return getattr(self._proxied, attr)
def __setattr__(self, attr, value):
if attr in ('_proxied', '_msg'):
self.__dict__[attr] = value
else:
warn(self._msg, DeprecationWarning, stacklevel=2)
setattr(self._proxied, attr, value)
class DeprecationManager(object):
"""Manage the deprecation message handling. Messages are dropped for
versions more recent than the 'compatible' version. Example::
deprecator = deprecation.DeprecationManager("module_name")
deprecator.compatibility('1.3')
deprecator.warn('1.2', "message.")
@deprecator.deprecated('1.2', 'Message')
def any_func():
pass
class AnyClass(object):
__metaclass__ = deprecator.class_deprecated('1.2')
"""
def __init__(self, module_name=None):
"""
"""
self.module_name = module_name
self.compatible_version = None
def compatibility(self, compatible_version):
"""Set the compatible version.
"""
self.compatible_version = Version(compatible_version)
def deprecated(self, version=None, reason=None, stacklevel=2, name=None, doc=None):
"""Display a deprecation message only if the version is older than the
compatible version.
"""
def decorator(func):
message = reason or 'The function "%s" is deprecated'
if '%s' in message:
message %= func.__name__
def wrapped(*args, **kwargs):
self.warn(version, message, stacklevel+1)
return func(*args, **kwargs)
return wrapped
return decorator
def class_deprecated(self, version=None):
class metaclass(type):
"""metaclass to print a warning on instantiation of a deprecated class"""
def __call__(cls, *args, **kwargs):
msg = getattr(cls, "__deprecation_warning__",
"%(cls)s is deprecated") % {'cls': cls.__name__}
self.warn(version, msg, stacklevel=3)
return type.__call__(cls, *args, **kwargs)
return metaclass
def moved(self, version, modpath, objname):
"""use to tell that a callable has been moved to a new module.
It returns a callable wrapper, so that when its called a warning is printed
telling where the object can be found, import is done (and not before) and
the actual object is called.
NOTE: the usage is somewhat limited on classes since it will fail if the
wrapper is use in a class ancestors list, use the `class_moved` function
instead (which has no lazy import feature though).
"""
def callnew(*args, **kwargs):
from logilab.common.modutils import load_module_from_name
message = "object %s has been moved to module %s" % (objname, modpath)
self.warn(version, message)
m = load_module_from_name(modpath)
return getattr(m, objname)(*args, **kwargs)
return callnew
def class_renamed(self, version, old_name, new_class, message=None):
clsdict = {}
if message is None:
message = '%s is deprecated, use %s' % (old_name, new_class.__name__)
clsdict['__deprecation_warning__'] = message
try:
# new-style class
return self.class_deprecated(version)(old_name, (new_class,), clsdict)
except (NameError, TypeError):
# old-style class
warn = self.warn
class DeprecatedClass(new_class):
"""FIXME: There might be a better way to handle old/new-style class
"""
def __init__(self, *args, **kwargs):
warn(version, message, stacklevel=3)
new_class.__init__(self, *args, **kwargs)
return DeprecatedClass
def class_moved(self, version, new_class, old_name=None, message=None):
"""nice wrapper around class_renamed when a class has been moved into
another module
"""
if old_name is None:
old_name = new_class.__name__
if message is None:
message = 'class %s is now available as %s.%s' % (
old_name, new_class.__module__, new_class.__name__)
return self.class_renamed(version, old_name, new_class, message)
def warn(self, version=None, reason="", stacklevel=2):
"""Display a deprecation message only if the version is older than the
compatible version.
"""
if (self.compatible_version is None
or version is None
or Version(version) < self.compatible_version):
if self.module_name and version:
reason = '[%s %s] %s' % (self.module_name, version, reason)
elif self.module_name:
reason = '[%s] %s' % (self.module_name, reason)
elif version:
reason = '[%s] %s' % (version, reason)
warn(reason, DeprecationWarning, stacklevel=stacklevel)
_defaultdeprecator = DeprecationManager()
def deprecated(reason=None, stacklevel=2, name=None, doc=None):
return _defaultdeprecator.deprecated(None, reason, stacklevel, name, doc)
class_deprecated = _defaultdeprecator.class_deprecated()
def moved(modpath, objname):
return _defaultdeprecator.moved(None, modpath, objname)
moved.__doc__ = _defaultdeprecator.moved.__doc__
def class_renamed(old_name, new_class, message=None):
"""automatically creates a class which fires a DeprecationWarning
when instantiated.
>>> Set = class_renamed('Set', set, 'Set is now replaced by set')
>>> s = Set()
sample.py:57: DeprecationWarning: Set is now replaced by set
s = Set()
>>>
"""
return _defaultdeprecator.class_renamed(None, old_name, new_class, message)
def class_moved(new_class, old_name=None, message=None):
return _defaultdeprecator.class_moved(None, new_class, old_name, message)
class_moved.__doc__ = _defaultdeprecator.class_moved.__doc__
| mpl-2.0 |
sbalun/namebench | nb_third_party/dns/tsig.py | 215 | 7851 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TSIG support."""
import hmac
import struct
import dns.exception
import dns.rdataclass
import dns.name
class BadTime(dns.exception.DNSException):
"""Raised if the current time is not within the TSIG's validity time."""
pass
class BadSignature(dns.exception.DNSException):
"""Raised if the TSIG signature fails to verify."""
pass
class PeerError(dns.exception.DNSException):
"""Base class for all TSIG errors generated by the remote peer"""
pass
class PeerBadKey(PeerError):
"""Raised if the peer didn't know the key we used"""
pass
class PeerBadSignature(PeerError):
"""Raised if the peer didn't like the signature we sent"""
pass
class PeerBadTime(PeerError):
"""Raised if the peer didn't like the time we sent"""
pass
class PeerBadTruncation(PeerError):
"""Raised if the peer didn't like amount of truncation in the TSIG we sent"""
pass
default_algorithm = "HMAC-MD5.SIG-ALG.REG.INT"
BADSIG = 16
BADKEY = 17
BADTIME = 18
BADTRUNC = 22
def sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
"""Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
for the input parameters, the HMAC MAC calculated by applying the
TSIG signature algorithm, and the TSIG digest context.
@rtype: (string, string, hmac.HMAC object)
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
(algorithm_name, digestmod) = get_algorithm(algorithm)
if first:
ctx = hmac.new(secret, digestmod=digestmod)
ml = len(request_mac)
if ml > 0:
ctx.update(struct.pack('!H', ml))
ctx.update(request_mac)
id = struct.pack('!H', original_id)
ctx.update(id)
ctx.update(wire[2:])
if first:
ctx.update(keyname.to_digestable())
ctx.update(struct.pack('!H', dns.rdataclass.ANY))
ctx.update(struct.pack('!I', 0))
long_time = time + 0L
upper_time = (long_time >> 32) & 0xffffL
lower_time = long_time & 0xffffffffL
time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
pre_mac = algorithm_name + time_mac
ol = len(other_data)
if ol > 65535:
raise ValueError('TSIG Other Data is > 65535 bytes')
post_mac = struct.pack('!HH', error, ol) + other_data
if first:
ctx.update(pre_mac)
ctx.update(post_mac)
else:
ctx.update(time_mac)
mac = ctx.digest()
mpack = struct.pack('!H', len(mac))
tsig_rdata = pre_mac + mpack + mac + id + post_mac
if multi:
ctx = hmac.new(secret)
ml = len(mac)
ctx.update(struct.pack('!H', ml))
ctx.update(mac)
else:
ctx = None
return (tsig_rdata, mac, ctx)
def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
return sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx, multi, first, algorithm)
def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
tsig_rdlen, ctx=None, multi=False, first=True):
"""Validate the specified TSIG rdata against the other input parameters.
@raises FormError: The TSIG is badly formed.
@raises BadTime: There is too much time skew between the client and the
server.
@raises BadSignature: The TSIG signature did not validate
@rtype: hmac.HMAC object"""
(adcount,) = struct.unpack("!H", wire[10:12])
if adcount == 0:
raise dns.exception.FormError
adcount -= 1
new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
current = tsig_rdata
(aname, used) = dns.name.from_wire(wire, current)
current = current + used
(upper_time, lower_time, fudge, mac_size) = \
struct.unpack("!HIHH", wire[current:current + 10])
time = ((upper_time + 0L) << 32) + (lower_time + 0L)
current += 10
mac = wire[current:current + mac_size]
current += mac_size
(original_id, error, other_size) = \
struct.unpack("!HHH", wire[current:current + 6])
current += 6
other_data = wire[current:current + other_size]
current += other_size
if current != tsig_rdata + tsig_rdlen:
raise dns.exception.FormError
if error != 0:
if error == BADSIG:
raise PeerBadSignature
elif error == BADKEY:
raise PeerBadKey
elif error == BADTIME:
raise PeerBadTime
elif error == BADTRUNC:
raise PeerBadTruncation
else:
raise PeerError('unknown TSIG error code %d' % error)
time_low = time - fudge
time_high = time + fudge
if now < time_low or now > time_high:
raise BadTime
(junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
original_id, error, other_data,
request_mac, ctx, multi, first, aname)
if (our_mac != mac):
raise BadSignature
return ctx
def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
hashes = {}
try:
import hashlib
hashes[dns.name.from_text('hmac-sha224')] = hashlib.sha224
hashes[dns.name.from_text('hmac-sha256')] = hashlib.sha256
hashes[dns.name.from_text('hmac-sha384')] = hashlib.sha384
hashes[dns.name.from_text('hmac-sha512')] = hashlib.sha512
hashes[dns.name.from_text('hmac-sha1')] = hashlib.sha1
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = hashlib.md5
import sys
if sys.hexversion < 0x02050000:
# hashlib doesn't conform to PEP 247: API for
# Cryptographic Hash Functions, which hmac before python
# 2.5 requires, so add the necessary items.
class HashlibWrapper:
def __init__(self, basehash):
self.basehash = basehash
self.digest_size = self.basehash().digest_size
def new(self, *args, **kwargs):
return self.basehash(*args, **kwargs)
for name in hashes:
hashes[name] = HashlibWrapper(hashes[name])
except ImportError:
import md5, sha
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = md5.md5
hashes[dns.name.from_text('hmac-sha1')] = sha.sha
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if algorithm in hashes:
return (algorithm.to_digestable(), hashes[algorithm])
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported")
| apache-2.0 |
adamchainz/ansible | lib/ansible/modules/network/dellos10/dellos10_command.py | 46 | 7522 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos10_command
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Run commands on remote devices running Dell OS10
description:
- Sends arbitrary commands to a Dell OS10 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos10_config) to configure Dell OS10 devices.
extends_documentation_fragment: dellos10
options:
commands:
description:
- List of commands to send to the remote dellos10 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos10_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS10
dellos10_command:
commands: show version
wait_for: result[0] contains OS10
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos10_command:
commands:
- show version
- show interface
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos10_command:
commands:
- show version
- show interface
wait_for:
- result[0] contains OS10
- result[1] contains Ethernet
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.dellos10 import run_commands
from ansible.module_utils.dellos10 import dellos10_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos10_command does not support running config mode '
'commands. Please use dellos10_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos10_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
crdrost/fm-demo | accumulators.py | 1 | 3054 | import itertools
'''
An `accumulator` adds some push-based stream semantics to Python iterators,
which are pull-based (they do stuff when you ask for values, not when data is
available). To do this we define an accumulator as a function from iterators to
iterators, where the output iterator will make exactly one request to the input
iterator before yielding. This allows us to run multiple accumulators on one
input stream without having to buffer the whole list in memory, which is the
`batch` function below: all of the accumulators are run one step, then the cycle
continues again. It also lets us filter the calls to one accumulator based on a
predicate function while still ensuring that it does not get ahead of the batch,
this is the `prefilter` function. Finally `premap` takes an accumulator and
returns another accumulator which transforms the input stream by mapping it
before acting on it.
'''
def batch(*accs):
'''Batch together all of the accumulators `*accs` so that they all act on a
single iterator.'''
def out(iterator):
copies = itertools.tee(iterator, len(accs) + 1)
result_streams = tuple(a(c) for (a, c) in itertools.izip(accs, copies))
for i in copies[-1]:
yield tuple(stream.next() for stream in result_streams)
return out
def prefilter(predicate, acc):
'''Filter the input to an accumulator based on whether it satisfies the
predicate.'''
def out(iterator):
(i1, i2) = itertools.tee(iterator, 2)
output = acc(itertools.ifilter(predicate, i2))
last = None
for i in i1:
# There's a way to write this without duplicating the check but this
# just looks so much cleaner.
if predicate(i):
last = output.next()
yield last
return out
def premap(fn, acc):
'''Map the inputs to this accumulator with the given function before applying
it to them.'''
return lambda iterator: acc(itertools.imap(fn, iterator))
def count(iterator):
'''Count the number of items in the iterator.'''
c = 0
for item in iterator:
c += 1
yield c
def unique(iterator):
'''Count the number of unique items in the iterator.'''
s = set()
for item in iterator:
s.add(item)
yield len(s)
def grouped_count(iterator):
'''Create a dictionary of the counts of how many times each value is seen in the iterator.'''
d = dict()
for item in iterator:
d[item] = d.get(item, 0) + 1
yield d
def scanning(fn):
'''Scan across the iterator with the given function. The output stream will
be `b[0] = a[0]`, `b[1] = fn(b[0], a[1])`, `b[2] = fn(b[1], a[0])`, and so
forth. In other words, this is `scanl1` from Haskell.'''
def out(iterator):
is_set = False
value = None
for item in iterator:
if not is_set:
value = item
is_set = True
else:
value = fn(value, item)
yield value
return out
| mpl-2.0 |
vied12/superdesk | server/superdesk/commands/rebuild_elastic_index.py | 1 | 1856 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.utils import get_random_string
from elasticsearch.helpers import reindex
from eve_elastic import get_es, get_indices
import elasticsearch
import superdesk
class RebuildElasticIndex(superdesk.Command):
"""
Rebuild the elastic index from existing data by creating a new index with
the same alias as the configured index, puts the new mapping and delete the old index.
"""
def run(self):
index_name = superdesk.app.config['ELASTICSEARCH_INDEX']
print('Starting index rebuilding for index: ', index_name)
try:
es = get_es(superdesk.app.config['ELASTICSEARCH_URL'])
clone_name = index_name + '-' + get_random_string()
print('Creating index: ', clone_name)
get_indices(es).create(clone_name)
print('Putting mapping for index: ', clone_name)
superdesk.app.data.elastic.put_mapping(superdesk.app, clone_name)
print('Starting index rebuilding.')
reindex(es, index_name, clone_name)
print('Finished index rebuilding.')
print('Deleting index: ', index_name)
get_indices(es).delete(index_name)
print('Creating alias: ', index_name)
get_indices(es).put_alias(index_name, clone_name)
print('Alias created.')
except elasticsearch.exceptions.NotFoundError as nfe:
print(nfe)
print('Index {0} rebuilt successfully.'.format(index_name))
superdesk.command('app:rebuild_elastic_index', RebuildElasticIndex())
| agpl-3.0 |
kanagasabapathi/python-for-android | python-modules/twisted/twisted/conch/test/test_openssh_compat.py | 60 | 3381 | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.openssh_compat}.
"""
import os
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.python.compat import set
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
OpenSSHFactory = None
else:
from twisted.conch.openssh_compat.factory import OpenSSHFactory
from twisted.conch.test import keydata
from twisted.test.test_process import MockOS
class OpenSSHFactoryTests(TestCase):
"""
Tests for L{OpenSSHFactory}.
"""
if getattr(os, "geteuid", None) is None:
skip = "geteuid/seteuid not available"
elif OpenSSHFactory is None:
skip = "Cannot run without PyCrypto or PyASN1"
def setUp(self):
self.factory = OpenSSHFactory()
self.keysDir = FilePath(self.mktemp())
self.keysDir.makedirs()
self.factory.dataRoot = self.keysDir.path
self.keysDir.child("ssh_host_foo").setContent("foo")
self.keysDir.child("bar_key").setContent("foo")
self.keysDir.child("ssh_host_one_key").setContent(
keydata.privateRSA_openssh)
self.keysDir.child("ssh_host_two_key").setContent(
keydata.privateDSA_openssh)
self.keysDir.child("ssh_host_three_key").setContent(
"not a key content")
self.keysDir.child("ssh_host_one_key.pub").setContent(
keydata.publicRSA_openssh)
self.mockos = MockOS()
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def test_getPublicKeys(self):
"""
L{OpenSSHFactory.getPublicKeys} should return the available public keys
in the data directory
"""
keys = self.factory.getPublicKeys()
self.assertEquals(len(keys), 1)
keyTypes = keys.keys()
self.assertEqual(keyTypes, ['ssh-rsa'])
def test_getPrivateKeys(self):
"""
L{OpenSSHFactory.getPrivateKeys} should return the available private
keys in the data directory.
"""
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_getPrivateKeysAsRoot(self):
"""
L{OpenSSHFactory.getPrivateKeys} should switch to root if the keys
aren't readable by the current user.
"""
keyFile = self.keysDir.child("ssh_host_two_key")
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = os.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.patch(os, "seteuid", seteuid)
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [0, os.geteuid()])
self.assertEquals(self.mockos.setegidCalls, [0, os.getegid()])
| apache-2.0 |
kostyll/usb-flash-network-monitor | server/indexpage.py | 2 | 23167 | import html
from html import *
from copy import deepcopy
import web_face_gen_templatete
from web_face_gen_templatete import render_html
from utils import _
DEBUG = web_face_gen_templatete.DEBUG
class rendered(object):
def __call__(self,func):
def wrapper(class_instance,ctx):
context = deepcopy(class_instance.ctx)
context.update(ctx)
ctx = context
html.context = html.StrContext()
carred_func = lambda *args: func(self,args[0])
res = str(render_html(ctx, carred_func))
# print (res)
return res
return wrapper
class IndexPage(object):
def __init__(self,ctx=None):
if ctx is None:
ctx = {}
self.ctx = ctx
self.pages = {}
@rendered()
def index(self,ctx=None):
print ("ctx=",ctx)
machines_table = "machines_table"
general_serials_table = "general_serials_table"
get_action_name = lambda x: _(" ".join(x.split('_')))
get_button_name = lambda action: action +'_button'
get_action_class = lambda x: "class_"+x
action_remove="machine_remove"
caption_machine_remove = get_action_name(action_remove)
caption_machine_ip=_("Machine IP")
caption_machine_desc=_("Machine description")
caption_machine_actions=_("Actions")
caption_special_serials=_("Special serials")
machines_columns = [
("",dict(key="state",data_checkbox="true",align="center")),
(caption_machine_ip,dict(align="center",key="ip_addr")),
(caption_machine_desc,dict(align="left",key="description")),
# (caption_special_serials,dict(align="left",key="special_serial_numbers")),
(caption_machine_actions,{'align':"center",'key':"actions",'data-formatter':'actions_formatter'}),
# (caption_machine_actions,dict(align="center",href="#",action=[action_remove]))
]
caption_general_serial_number = _("Serial number")
remove_general_serial_action = "remove_general_serial"
add_new_general_serial_action = "add_new_general_serial"
general_serials_columns = [
("",dict(key="state",data_checkbox="true",align="center")),
(caption_general_serial_number,dict(align="left",key="number")),
]
history_columns = [
# (_('Index'),dict(align="center",key="index")),
(_('Date'),dict(align="center",key="date")),
(_('Time'),dict(align="center",key="time")),
(_('Source'),dict(align="center",key="source")),
(_('Action'),dict(align="center",key="action")),
(_('Description'),dict(align="center",key="description")),
]
get_field_name = lambda x: ''.join(x.split(' ')).lower()
with DIV.container_fluid as out:
with DIV.row_fluid:
with DIV(id_="notify"):
pass
with UL.nav.nav_tabs:
with LI:
A(_("Machines"),class_="active",href="#machines",data_toggle="tab")
with LI:
A(_("General serials"),href="#general",data_toggle="tab")
with LI:
A(_("New machine"),href="#new_machine",data_toggle="tab")
with LI:
A(_("History"),href="#history",data_toggle="tab")
with DIV.tab_content:
with DIV(id_="machines").tab_pane.active:
with DIV.row_fluid:
H4(_("Installed machines:"),align="center")
with DIV.row_fluid:
with DIV.span12:
with DIV(id_="custom_machines_toolbar"):
BUTTON(_(get_action_name(action_remove)),
type="submit",
class_="btn btn-primary",
id_="remove_machine_button",
data_method="remove",
)
with TABLE(
id_=machines_table,
data_sort_name="sheduled",
data_sort_order="asc",
data_toggle="table",
width="100%",
align="center",
pagination="true",
data_search="true",
# data_show_refresh="true",
data_show_toggle="true",
data_show_columns="true",
data_toolbar="#custom_machines_toolbar",
striped=True,
# data_url='/ip',
data='get_machines()',
):
with THEAD:
with TR:
for column in machines_columns:
TH(
column[0],
data_field=column[1].get('key',None),
data_sortable="true",
data_align=column[1]['align'],
data_checkbox="true" if column[1].get('data_checkbox',None) == "true" else "false",
data_formatter=column[1].get('data-formatter',''),
)
# with TBODY:
# for client in Client.select():
# with TR:
# for column in machines_columns:
# if column[1].has_key("key"):
# TD(getattr(client,column[1]['key']))
# else:
# with TD:
# for action in column[1]['action']:
# A(
# get_action_name(action),
# class_="btn btn-success "+get_action_class(action),
# )
with DIV(id_="general").tab_pane:
with DIV.row_fluid:
H4(_("General registered serial numbers"))
with DIV.row_fluid:
with DIV.span12:
with DIV(id_="custom_general_serials_toolbar"):
with DIV(role="form").form_inline:
BUTTON(_(get_action_name(remove_general_serial_action)),
type="submit",
class_="btn btn-primary",
id_=get_button_name(remove_general_serial_action),
data_method="remove",
)
INPUT(
id_="general_serial_number",
type="text",
placeholder=_("type here new serial number"),
class_="form-control",
)
BUTTON(
_(get_action_name(add_new_general_serial_action)),
id_=get_button_name(add_new_general_serial_action),
type="submit",
class_="btn btn-primary",
)
with TABLE(
id_=general_serials_table,
data_sort_name="sheduled",
data_sort_order="asc",
data_toggle="table",
width="100%",
align="center",
pagination="true",
data_search="true",
# data_show_refresh="true",
data_show_toggle="true",
data_show_columns="true",
data_toolbar="#custom_general_serials_toolbar",
# striped=True,
data_url='/general'
):
with THEAD:
with TR:
for column in general_serials_columns:
TH(
column[0],
data_field=column[1].get('key',None),
data_sortable="true",
data_align=column[1]['align'],
data_checkbox="true" if column[1].get('data_checkbox',None) == "true" else "false",
)
with DIV(id_="new_machine").tab_pane:
with DIV.row_fluid:
H4(_("Add new machine"),align="center")
with DIV.row_fluid:
with DIV.span4:
with FORM(role="form",action="#"):
with DIV.form_group:
LABEL(_("IP address"),for_="ip_address")
INPUT(
type="text",
id_="machine_ip_address",
placeholder=_("ip address"),
class_="form-control"
)
with DIV.form_group:
LABEL(_("Description"),for_="description")
INPUT(
type="text",
id_="machine_description",
placeholder=_("IP description"),
class_="form-control"
)
A(
_("Add new IP"),
id_="machine_button",
type="submit",
class_="btn btn-primary",
)
with DIV(id_="history").tab_pane:
with DIV.row_fluid:
H4(_("History"),align="center")
with DIV.row_fluid:
with DIV.span10.offset1:
with TABLE(
id="history_table",
data_sort_name="sheduled",
data_sort_order="asc",
data_toggle="table",
width="100%",
align="center",
pagination="true",
data_search="true",
# data_show_refresh="true",
data_show_toggle="true",
data_show_columns="true",
# data_toolbar="#custom_machines_toolbar",
striped=True,
# data_url='/ip',
data='get_history()',
):
with THEAD:
with TR:
for column in history_columns:
TH(
column[0],
data_field=column[1].get('key',None),
data_sortable="true",
data_align=column[1]['align'],
data_checkbox="true" if column[1].get('data_checkbox',None) == "true" else "false",
data_formatter=column[1].get('data-formatter',''),
)
with DIV(id_="edit_machine_modal_form",tabindex="-1", role="dialog",aria_labelledby="edit_machine_modal_form",aria_hidden="true").modal.fade:
with DIV.modal_dialog:
with DIV.modal_content:
with DIV.modal_header:
# List special serial numbers
with TABLE(
id_="edit_machine_table",
data_toggle="table",
):
with THEAD:
with TR:
TH(
_("Serial number"),
data_field="serial_number",
data_align="left",
)
TH(
_("Actions"),
data_field="actions",
data_align="center",
data_formatter='unregister_serial_formatter',
)
with DIV(id_="add_serial_number_for_machine",tabindex="-1", role="dialog",aria_labelledby="add_serial_number_for_machine",aria_hidden="true").modal.fade:
with DIV.modal_dialog:
with DIV.modal_content:
with DIV.modal_header:
H4("Add new serial number")
with FORM(role="form",action="#"):
with DIV.form_group:
LABEL(_("Serial number"),for_="new_registered_serial_number")
INPUT(
type="text",
id_="new_registered_serial_number",
placeholder=_("type here serial number"),
class_="form-control",
)
A(
_("Register serial number"),
id_="button_register_special_serial_number",
type="submit",
class_="btn btn-primary",
)
return out
def get(self,ctx=None):
ctx_hash = hash(str(ctx))
if self.pages.has_key(ctx_hash):
return self.pages[ctx_hash]
else:
result = self.index(ctx)
self.pages[ctx_hash] = result
return result
# <section id="login">
# <div class="container">
# <div class="row">
# <div class="col-xs-12">
# <div class="form-wrap">
# <h1>Log in with your email account</h1>
# <form role="form" action="javascript:;" method="post" id="login-form" autocomplete="off">
# <div class="form-group">
# <label for="email" class="sr-only">Email</label>
# <input type="email" name="email" id="email" class="form-control" placeholder="[email protected]">
# </div>
# <div class="form-group">
# <label for="key" class="sr-only">Password</label>
# <input type="password" name="key" id="key" class="form-control" placeholder="Password">
# </div>
# <div class="checkbox">
# <span class="character-checkbox" onclick="showPassword()"></span>
# <span class="label">Show password</span>
# </div>
# <input type="submit" id="btn-login" class="btn btn-custom btn-lg btn-block" value="Log in">
# </form>
# <a href="javascript:;" class="forget" data-toggle="modal" data-target=".forget-modal">Forgot your password?</a>
# <hr>
# </div>
# </div> <!-- /.col-xs-12 -->
# </div> <!-- /.row -->
# </div> <!-- /.container -->
# </section>
class COFFEE(html.TAG):
name = "script"
attrs = { 'type': 'text/coffeescript'}
def __init__(self,src):
super(COFFEE,self).__init__('', src=src)
class LoginPage(object):
def get(self):
html.context = html.StrContext()
with HTML5 as out:
with HEAD:
if not DEBUG:
CSS(href='http://maxcdn.bootstrapcdn.com/bootstrap/2.3.2/css/bootstrap.min.css')
# CSS(href='static/custom.css')
JS(src='http://code.jquery.com/jquery-git2.js')
JS(src='http://maxcdn.bootstrapcdn.com/bootstrap/2.3.2/js/bootstrap.min.js')
else:
CSS(href='static/bootstrap.css')
# CSS(href='static/custom.css')
JS(src='static/jquery-git2.js')
JS(src='static/bootstrap.js')
CSS(href="static/login.css")
# JS(src="static/coffee-script.js")
JS(src="static/login.js")
with BODY:
with SECTION(id_="login"):
with DIV.container:
with DIV.row:
# with DIV.center:
with DIV.form_wrap:
H1(_("Login with your email account"))
with FORM(role="form",action="javascript:;",method="post",id_="login-form",autocomplete="off"):
with DIV.form_group:
with LABEL(for_="email").sr_only:
out << _("Email")
INPUT(type="email",name="email",id_="email",class_="form-control",placeholder=_("[email protected]"))
with DIV.form_group:
with LABEL(for_="key").sr_only:
out << _("Password")
INPUT(type="password",name="key",id_="key",class_="form-control",placeholder=_("Password"))
with DIV.checkbox:
with SPAN(onclick="showPassword()").character_checkbox:
pass
with SPAN.label:
out << _("Show password")
INPUT(type="submit",id_="btn-login", class_="btn btn-custom btn-lg btn-block", value=_("Log in"))
# with A(href="javascript:;",):
# out << _("Forgot your password")
return str(out)
return """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Bootstrap, from Twitter</title>
<meta name="description" content="">
<meta name="author" content="">
<!-- Le styles -->
<link href="static/bootstrap.css" rel="stylesheet">
<style type="text/css">
/* Override some defaults */
html, body {
background-color: #eee;
}
body {
padding-top: 40px;
}
.container {
width: 300px;
}
/* The white background content wrapper */
.container > .content {
background-color: #fff;
padding: 20px;
margin: 0 -20px;
-webkit-border-radius: 10px 10px 10px 10px;
-moz-border-radius: 10px 10px 10px 10px;
border-radius: 10px 10px 10px 10px;
-webkit-box-shadow: 0 1px 2px rgba(0,0,0,.15);
-moz-box-shadow: 0 1px 2px rgba(0,0,0,.15);
box-shadow: 0 1px 2px rgba(0,0,0,.15);
}
.login-form {
margin-left: 65px;
}
legend {
margin-right: -50px;
font-weight: bold;
color: #404040;
}
</style>
<script src="static/login.js">
</head>
<body>
<div class="container">
<div class="content">
<div class="row">
<div class="login-form">
<h2>Login</h2>
<form action="">
<fieldset>
<div class="clearfix">
<input name="username" type="text" placeholder="Username">
</div>
<div class="clearfix">
<input name="password" type="password" placeholder="Password">
</div>
<button id="login_button" class="btn primary" type="submit">Sign in</button>
</fieldset>
</form>
</div>
</div>
</div>
</div> <!-- /container -->
</body>
</html>"""
| mit |
jchanvfx/bpNodeGraph | NodeGraphQt/widgets/properties_bin.py | 1 | 6926 | #!/usr/bin/python
from NodeGraphQt import QtWidgets, QtCore, QtGui, QtCompat
from NodeGraphQt.widgets.properties import NodePropWidget
class PropertiesDelegate(QtWidgets.QStyledItemDelegate):
def paint(self, painter, option, index):
"""
Args:
painter (QtGui.QPainter):
option (QtGui.QStyleOptionViewItem):
index (QtCore.QModelIndex):
"""
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing, False)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(option.palette.midlight())
painter.drawRect(option.rect)
if option.state & QtWidgets.QStyle.State_Selected:
bdr_clr = option.palette.highlight().color()
painter.setPen(QtGui.QPen(bdr_clr, 1.5))
else:
bdr_clr = option.palette.alternateBase().color()
painter.setPen(QtGui.QPen(bdr_clr, 1))
painter.setBrush(QtCore.Qt.NoBrush)
painter.drawRect(QtCore.QRect(option.rect.x() + 1,
option.rect.y() + 1,
option.rect.width() - 2,
option.rect.height() - 2))
painter.restore()
class PropertiesList(QtWidgets.QTableWidget):
def __init__(self, parent=None):
super(PropertiesList, self).__init__(parent)
self.setItemDelegate(PropertiesDelegate())
self.setColumnCount(1)
self.setShowGrid(False)
QtCompat.QHeaderView.setSectionResizeMode(
self.verticalHeader(), QtWidgets.QHeaderView.ResizeToContents)
self.verticalHeader().hide()
QtCompat.QHeaderView.setSectionResizeMode(
self.horizontalHeader(), 0, QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().hide()
class PropertiesBinWidget(QtWidgets.QWidget):
#: Signal emitted (node_id, prop_name, prop_value)
property_changed = QtCore.Signal(str, str, object)
def __init__(self, parent=None):
super(PropertiesBinWidget, self).__init__(parent)
self.setWindowTitle('Properties Bin')
self._prop_list = PropertiesList()
self._limit = QtWidgets.QSpinBox()
self._limit.setToolTip('Set node limit to display.')
self._limit.setMaximum(10)
self._limit.setMinimum(0)
self._limit.setValue(10)
self._limit.valueChanged.connect(self.__on_limit_changed)
self.resize(400, 400)
btn_clr = QtWidgets.QPushButton('clear')
btn_clr.setToolTip('Clear the properties bin.')
btn_clr.clicked.connect(self.clear_bin)
top_layout = QtWidgets.QHBoxLayout()
top_layout.addWidget(self._limit)
top_layout.addStretch(1)
top_layout.addWidget(btn_clr)
layout = QtWidgets.QVBoxLayout(self)
layout.addLayout(top_layout)
layout.addWidget(self._prop_list, 1)
def __on_prop_close(self, node_id):
items = self._prop_list.findItems(node_id, QtCore.Qt.MatchExactly)
[self._prop_list.removeRow(i.row()) for i in items]
def __on_limit_changed(self, value):
rows = self._prop_list.rowCount()
if rows > value:
self._prop_list.removeRow(rows - 1)
def limit(self):
"""
Returns the limit for how many nodes can be loaded into the bin.
Returns:
int: node limit.
"""
return int(self._limit.value())
def add_node(self, node):
"""
Add node to the properties bin.
Args:
node (NodeGraphQt.Node): node object.
"""
if self.limit() == 0:
return
rows = self._prop_list.rowCount()
if rows >= self.limit():
self._prop_list.removeRow(rows - 1)
itm_find = self._prop_list.findItems(node.id, QtCore.Qt.MatchExactly)
if itm_find:
self._prop_list.removeRow(itm_find[0].row())
self._prop_list.insertRow(0)
prop_widget = NodePropWidget(node=node)
prop_widget.property_changed.connect(self.property_changed.emit)
prop_widget.property_closed.connect(self.__on_prop_close)
self._prop_list.setCellWidget(0, 0, prop_widget)
item = QtWidgets.QTableWidgetItem(node.id)
self._prop_list.setItem(0, 0, item)
self._prop_list.selectRow(0)
def remove_node(self, node):
"""
Remove node from the properties bin.
Args:
node (NodeGraphQt.Node): node object.
"""
self.__on_prop_close(node.id)
def clear_bin(self):
"""
Clear the properties bin.
"""
self._prop_list.setRowCount(0)
def prop_widget(self, node):
"""
Returns the node property widget.
Args:
node (NodeGraphQt.Node): node object.
Returns:
NodePropWidget: node property widget.
"""
itm_find = self._prop_list.findItems(node.id, QtCore.Qt.MatchExactly)
if itm_find:
item = itm_find[0]
return self._prop_list.cellWidget(item.row(), 0)
if __name__ == '__main__':
import sys
from NodeGraphQt import Node, NodeGraph
from NodeGraphQt.constants import (NODE_PROP_QLABEL,
NODE_PROP_QLINEEDIT,
NODE_PROP_QCOMBO,
NODE_PROP_QSPINBOX,
NODE_PROP_COLORPICKER,
NODE_PROP_SLIDER)
class TestNode(Node):
NODE_NAME = 'test node'
def __init__(self):
super(TestNode, self).__init__()
self.create_property('label_test', 'foo bar',
widget_type=NODE_PROP_QLABEL)
self.create_property('text_edit', 'hello',
widget_type=NODE_PROP_QLINEEDIT)
self.create_property('color_picker', (0, 0, 255),
widget_type=NODE_PROP_COLORPICKER)
self.create_property('integer', 10,
widget_type=NODE_PROP_QSPINBOX)
self.create_property('list', 'foo',
items=['foo', 'bar'],
widget_type=NODE_PROP_QCOMBO)
self.create_property('range', 50,
range=(45, 55),
widget_type=NODE_PROP_SLIDER)
def prop_changed(node_id, prop_name, prop_value):
print('-'*100)
print(node_id, prop_name, prop_value)
app = QtWidgets.QApplication(sys.argv)
graph = NodeGraph()
graph.register_node(TestNode)
prop_bin = PropertiesBinWidget()
prop_bin.property_changed.connect(prop_changed)
node = graph.create_node('nodeGraphQt.nodes.TestNode')
prop_bin.add_node(node)
prop_bin.show()
app.exec_()
| mit |
aabadie/scikit-learn | sklearn/utils/tests/test_testing.py | 24 | 7902 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
msegado/edx-platform | lms/djangoapps/instructor/tests/utils.py | 121 | 2732 | """
Utilities for instructor unit tests
"""
import datetime
import json
import random
from django.utils.timezone import utc
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id, 'to_option': sent_to}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = unicode(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.randint(1950, 2000)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=utc)
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
| agpl-3.0 |
ak2703/edx-platform | lms/djangoapps/instructor/features/common.py | 63 | 4609 | """
Define common steps for instructor dashboard acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from lettuce import world, step
from mock import patch
from nose.tools import assert_in # pylint: disable=no-name-in-module
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given I am "([^"]*)" for a very large course')
def make_staff_or_instructor_for_large_course(step, role):
make_large_course(step, role)
@patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0})
def make_large_course(step, role):
i_am_staff_or_instructor(step, role)
@step(u'Given I am "([^"]*)" for a course')
def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument
## In summary: makes a test course, makes a new Staff or Instructor user
## (depending on `role`), and logs that user in to the course
# Store the role
assert_in(role, ['instructor', 'staff'])
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='999',
display_name='Test Course'
)
world.course_key = course.id
world.role = 'instructor'
# Log in as the an instructor or staff for the course
if role == 'instructor':
# Make & register an instructor for the course
world.instructor = InstructorFactory(course_key=world.course_key)
world.enroll_user(world.instructor, world.course_key)
world.log_in(
username=world.instructor.username,
password='test',
email=world.instructor.email,
name=world.instructor.profile.name
)
else:
world.role = 'staff'
# Make & register a staff member
world.staff = StaffFactory(course_key=world.course_key)
world.enroll_user(world.staff, world.course_key)
world.log_in(
username=world.staff.username,
password='test',
email=world.staff.email,
name=world.staff.profile.name
)
def go_to_section(section_name):
# section name should be one of
# course_info, membership, student_admin, data_download, analytics, send_email
world.visit(u'/courses/{}'.format(world.course_key))
world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key))
world.css_click('a[data-section="{0}"]'.format(section_name))
@step(u'I click "([^"]*)"')
def click_a_button(step, button): # pylint: disable=unused-argument
if button == "Generate Grade Report":
# Go to the data download section of the instructor dash
go_to_section("data_download")
# Click generate grade report button
world.css_click('input[name="calculate-grades-csv"]')
# Expect to see a message that grade report is being generated
expected_msg = "The grade report is being created." \
" To view the status of the report, see" \
" Pending Instructor Tasks below."
world.wait_for_visible('#report-request-response')
assert_in(
expected_msg, world.css_text('#report-request-response'),
msg="Could not find grade report generation success message."
)
elif button == "Grading Configuration":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="dump-gradeconf"]')
elif button == "List enrolled students' profile information":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles"]')
elif button == "Download profile information as a CSV":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles-csv"]')
else:
raise ValueError("Unrecognized button option " + button)
@step(u'I visit the "([^"]*)" tab')
def click_a_button(step, tab_name): # pylint: disable=unused-argument
# course_info, membership, student_admin, data_download, analytics, send_email
tab_name_dict = {
'Course Info': 'course_info',
'Membership': 'membership',
'Student Admin': 'student_admin',
'Data Download': 'data_download',
'Analytics': 'analytics',
'Email': 'send_email',
}
go_to_section(tab_name_dict[tab_name])
| agpl-3.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/r-desolve/package.py | 5 | 1817 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDesolve(RPackage):
"""Functions that solve initial value problems of a system of first-order
ordinary differential equations ('ODE'), of partial differential
equations ('PDE'), of differential algebraic equations ('DAE'), and of
delay differential equations."""
homepage = "https://cran.r-project.org/package=deSolve"
url = "https://cran.r-project.org/src/contrib/deSolve_1.20.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/deSolve"
version('1.20', '85c6a2d8568944ae8eef27ac7c35fb25')
| lgpl-2.1 |
Khan/agar | lib/unittest2/test/test_functiontestcase.py | 122 | 5570 | import unittest2
from unittest2.test.support import LoggingResult
class Test_FunctionTestCase(unittest2.TestCase):
# "Return the number of tests represented by the this test object. For
# unittest2.TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest2.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest2.FunctionTestCase(lambda: None)
self.assertIsInstance(test.id(), basestring)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest2.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest2.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
if __name__ == '__main__':
unittest2.main()
| mit |
pheelee/tinynfogen | libs/enzyme/mkv.py | 163 | 30439 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <[email protected]>
# Copyright 2003-2006 Thomas Schueppel <[email protected]>
# Copyright 2003-2006 Dirk Meyer <[email protected]>
# Copyright 2003-2006 Jason Tackaberry <[email protected]>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from exceptions import ParseError
from struct import unpack
import core
import logging
import re
__all__ = ['Parser']
# get logging object
log = logging.getLogger(__name__)
# Main IDs for the Matroska streams
MATROSKA_VIDEO_TRACK = 0x01
MATROSKA_AUDIO_TRACK = 0x02
MATROSKA_SUBTITLES_TRACK = 0x11
MATROSKA_HEADER_ID = 0x1A45DFA3
MATROSKA_TRACKS_ID = 0x1654AE6B
MATROSKA_CUES_ID = 0x1C53BB6B
MATROSKA_SEGMENT_ID = 0x18538067
MATROSKA_SEGMENT_INFO_ID = 0x1549A966
MATROSKA_CLUSTER_ID = 0x1F43B675
MATROSKA_VOID_ID = 0xEC
MATROSKA_CRC_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_DURATION_ID = 0x4489
MATROSKA_CRC32_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_MUXING_APP_ID = 0x4D80
MATROSKA_WRITING_APP_ID = 0x5741
MATROSKA_CODEC_ID = 0x86
MATROSKA_CODEC_PRIVATE_ID = 0x63A2
MATROSKA_FRAME_DURATION_ID = 0x23E383
MATROSKA_VIDEO_SETTINGS_ID = 0xE0
MATROSKA_VIDEO_WIDTH_ID = 0xB0
MATROSKA_VIDEO_HEIGHT_ID = 0xBA
MATROSKA_VIDEO_INTERLACED_ID = 0x9A
MATROSKA_VIDEO_DISPLAY_WIDTH_ID = 0x54B0
MATROSKA_VIDEO_DISPLAY_HEIGHT_ID = 0x54BA
MATROSKA_AUDIO_SETTINGS_ID = 0xE1
MATROSKA_AUDIO_SAMPLERATE_ID = 0xB5
MATROSKA_AUDIO_CHANNELS_ID = 0x9F
MATROSKA_TRACK_UID_ID = 0x73C5
MATROSKA_TRACK_NUMBER_ID = 0xD7
MATROSKA_TRACK_TYPE_ID = 0x83
MATROSKA_TRACK_LANGUAGE_ID = 0x22B59C
MATROSKA_TRACK_OFFSET = 0x537F
MATROSKA_TRACK_FLAG_DEFAULT_ID = 0x88
MATROSKA_TRACK_FLAG_ENABLED_ID = 0xB9
MATROSKA_TITLE_ID = 0x7BA9
MATROSKA_DATE_UTC_ID = 0x4461
MATROSKA_NAME_ID = 0x536E
MATROSKA_CHAPTERS_ID = 0x1043A770
MATROSKA_CHAPTER_UID_ID = 0x73C4
MATROSKA_EDITION_ENTRY_ID = 0x45B9
MATROSKA_CHAPTER_ATOM_ID = 0xB6
MATROSKA_CHAPTER_TIME_START_ID = 0x91
MATROSKA_CHAPTER_TIME_END_ID = 0x92
MATROSKA_CHAPTER_FLAG_ENABLED_ID = 0x4598
MATROSKA_CHAPTER_DISPLAY_ID = 0x80
MATROSKA_CHAPTER_LANGUAGE_ID = 0x437C
MATROSKA_CHAPTER_STRING_ID = 0x85
MATROSKA_ATTACHMENTS_ID = 0x1941A469
MATROSKA_ATTACHED_FILE_ID = 0x61A7
MATROSKA_FILE_DESC_ID = 0x467E
MATROSKA_FILE_NAME_ID = 0x466E
MATROSKA_FILE_MIME_TYPE_ID = 0x4660
MATROSKA_FILE_DATA_ID = 0x465C
MATROSKA_SEEKHEAD_ID = 0x114D9B74
MATROSKA_SEEK_ID = 0x4DBB
MATROSKA_SEEKID_ID = 0x53AB
MATROSKA_SEEK_POSITION_ID = 0x53AC
MATROSKA_TAGS_ID = 0x1254C367
MATROSKA_TAG_ID = 0x7373
MATROSKA_TARGETS_ID = 0x63C0
MATROSKA_TARGET_TYPE_VALUE_ID = 0x68CA
MATROSKA_TARGET_TYPE_ID = 0x63CA
MATRSOKA_TAGS_TRACK_UID_ID = 0x63C5
MATRSOKA_TAGS_EDITION_UID_ID = 0x63C9
MATRSOKA_TAGS_CHAPTER_UID_ID = 0x63C4
MATRSOKA_TAGS_ATTACHMENT_UID_ID = 0x63C6
MATROSKA_SIMPLE_TAG_ID = 0x67C8
MATROSKA_TAG_NAME_ID = 0x45A3
MATROSKA_TAG_LANGUAGE_ID = 0x447A
MATROSKA_TAG_STRING_ID = 0x4487
MATROSKA_TAG_BINARY_ID = 0x4485
# See mkv spec for details:
# http://www.matroska.org/technical/specs/index.html
# Map to convert to well known codes
# http://haali.cs.msu.ru/mkv/codecs.pdf
FOURCCMap = {
'V_THEORA': 'THEO',
'V_SNOW': 'SNOW',
'V_MPEG4/ISO/ASP': 'MP4V',
'V_MPEG4/ISO/AVC': 'AVC1',
'A_AC3': 0x2000,
'A_MPEG/L3': 0x0055,
'A_MPEG/L2': 0x0050,
'A_MPEG/L1': 0x0050,
'A_DTS': 0x2001,
'A_PCM/INT/LIT': 0x0001,
'A_PCM/FLOAT/IEEE': 0x003,
'A_TTA1': 0x77a1,
'A_WAVPACK4': 0x5756,
'A_VORBIS': 0x6750,
'A_FLAC': 0xF1AC,
'A_AAC': 0x00ff,
'A_AAC/': 0x00ff
}
def matroska_date_to_datetime(date):
"""
Converts a date in Matroska's date format to a python datetime object.
Returns the given date string if it could not be converted.
"""
# From the specs:
# The fields with dates should have the following format: YYYY-MM-DD
# HH:MM:SS.MSS [...] To store less accuracy, you remove items starting
# from the right. To store only the year, you would use, "2004". To store
# a specific day such as May 1st, 2003, you would use "2003-05-01".
format = re.split(r'([-:. ])', '%Y-%m-%d %H:%M:%S.%f')
while format:
try:
return datetime.strptime(date, ''.join(format))
except ValueError:
format = format[:-2]
return date
def matroska_bps_to_bitrate(bps):
"""
Tries to convert a free-form bps string into a bitrate (bits per second).
"""
m = re.search('([\d.]+)\s*(\D.*)', bps)
if m:
bps, suffix = m.groups()
if 'kbit' in suffix:
return float(bps) * 1024
elif 'kbyte' in suffix:
return float(bps) * 1024 * 8
elif 'byte' in suffix:
return float(bps) * 8
elif 'bps' in suffix or 'bit' in suffix:
return float(bps)
if bps.replace('.', '').isdigit():
if float(bps) < 30000:
# Assume kilobits and convert to bps
return float(bps) * 1024
return float(bps)
# Used to convert the official matroska tag names (only lower-cased) to core
# attributes. tag name -> attr, filter
TAGS_MAP = {
# From Media core
u'title': ('title', None),
u'subtitle': ('caption', None),
u'comment': ('comment', None),
u'url': ('url', None),
u'artist': ('artist', None),
u'keywords': ('keywords', lambda s: [word.strip() for word in s.split(',')]),
u'composer_nationality': ('country', None),
u'date_released': ('datetime', None),
u'date_recorded': ('datetime', None),
u'date_written': ('datetime', None),
# From Video core
u'encoder': ('encoder', None),
u'bps': ('bitrate', matroska_bps_to_bitrate),
u'part_number': ('trackno', int),
u'total_parts': ('trackof', int),
u'copyright': ('copyright', None),
u'genre': ('genre', None),
u'actor': ('actors', None),
u'written_by': ('writer', None),
u'producer': ('producer', None),
u'production_studio': ('studio', None),
u'law_rating': ('rating', None),
u'summary': ('summary', None),
u'synopsis': ('synopsis', None),
}
class EbmlEntity:
"""
This is class that is responsible to handle one Ebml entity as described in
the Matroska/Ebml spec
"""
def __init__(self, inbuf):
# Compute the EBML id
# Set the CRC len to zero
self.crc_len = 0
# Now loop until we find an entity without CRC
try:
self.build_entity(inbuf)
except IndexError:
raise ParseError()
while self.get_id() == MATROSKA_CRC32_ID:
self.crc_len += self.get_total_len()
inbuf = inbuf[self.get_total_len():]
self.build_entity(inbuf)
def build_entity(self, inbuf):
self.compute_id(inbuf)
if self.id_len == 0:
log.error(u'EBML entity not found, bad file format')
raise ParseError()
self.entity_len, self.len_size = self.compute_len(inbuf[self.id_len:])
self.entity_data = inbuf[self.get_header_len() : self.get_total_len()]
self.ebml_length = self.entity_len
self.entity_len = min(len(self.entity_data), self.entity_len)
# if the data size is 8 or less, it could be a numeric value
self.value = 0
if self.entity_len <= 8:
for pos, shift in zip(range(self.entity_len), range((self.entity_len - 1) * 8, -1, -8)):
self.value |= ord(self.entity_data[pos]) << shift
def add_data(self, data):
maxlen = self.ebml_length - len(self.entity_data)
if maxlen <= 0:
return
self.entity_data += data[:maxlen]
self.entity_len = len(self.entity_data)
def compute_id(self, inbuf):
self.id_len = 0
if len(inbuf) < 1:
return 0
first = ord(inbuf[0])
if first & 0x80:
self.id_len = 1
self.entity_id = first
elif first & 0x40:
if len(inbuf) < 2:
return 0
self.id_len = 2
self.entity_id = ord(inbuf[0]) << 8 | ord(inbuf[1])
elif first & 0x20:
if len(inbuf) < 3:
return 0
self.id_len = 3
self.entity_id = (ord(inbuf[0]) << 16) | (ord(inbuf[1]) << 8) | \
(ord(inbuf[2]))
elif first & 0x10:
if len(inbuf) < 4:
return 0
self.id_len = 4
self.entity_id = (ord(inbuf[0]) << 24) | (ord(inbuf[1]) << 16) | \
(ord(inbuf[2]) << 8) | (ord(inbuf[3]))
self.entity_str = inbuf[0:self.id_len]
def compute_len(self, inbuf):
if not inbuf:
return 0, 0
i = num_ffs = 0
len_mask = 0x80
len = ord(inbuf[0])
while not len & len_mask:
i += 1
len_mask >>= 1
if i >= 8:
return 0, 0
len &= len_mask - 1
if len == len_mask - 1:
num_ffs += 1
for p in range(i):
len = (len << 8) | ord(inbuf[p + 1])
if len & 0xff == 0xff:
num_ffs += 1
if num_ffs == i + 1:
len = 0
return len, i + 1
def get_crc_len(self):
return self.crc_len
def get_value(self):
return self.value
def get_float_value(self):
if len(self.entity_data) == 4:
return unpack('!f', self.entity_data)[0]
elif len(self.entity_data) == 8:
return unpack('!d', self.entity_data)[0]
return 0.0
def get_data(self):
return self.entity_data
def get_utf8(self):
return unicode(self.entity_data, 'utf-8', 'replace')
def get_str(self):
return unicode(self.entity_data, 'ascii', 'replace')
def get_id(self):
return self.entity_id
def get_str_id(self):
return self.entity_str
def get_len(self):
return self.entity_len
def get_total_len(self):
return self.entity_len + self.id_len + self.len_size
def get_header_len(self):
return self.id_len + self.len_size
class Matroska(core.AVContainer):
"""
Matroska video and audio parser. If at least one video stream is
detected it will set the type to MEDIA_AV.
"""
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.file = file
# Read enough that we're likely to get the full seekhead (FIXME: kludge)
buffer = file.read(2000)
if len(buffer) == 0:
# Regular File end
raise ParseError()
# Check the Matroska header
header = EbmlEntity(buffer)
if header.get_id() != MATROSKA_HEADER_ID:
raise ParseError()
log.debug(u'HEADER ID found %08X' % header.get_id())
self.mime = 'video/x-matroska'
self.type = 'Matroska'
self.has_idx = False
self.objects_by_uid = {}
# Now get the segment
self.segment = segment = EbmlEntity(buffer[header.get_total_len():])
# Record file offset of segment data for seekheads
self.segment.offset = header.get_total_len() + segment.get_header_len()
if segment.get_id() != MATROSKA_SEGMENT_ID:
log.debug(u'SEGMENT ID not found %08X' % segment.get_id())
return
log.debug(u'SEGMENT ID found %08X' % segment.get_id())
try:
for elem in self.process_one_level(segment):
if elem.get_id() == MATROSKA_SEEKHEAD_ID:
self.process_elem(elem)
except ParseError:
pass
if not self.has_idx:
log.warning(u'File has no index')
self._set('corrupt', True)
def process_elem(self, elem):
elem_id = elem.get_id()
log.debug(u'BEGIN: process element %r' % hex(elem_id))
if elem_id == MATROSKA_SEGMENT_INFO_ID:
duration = 0
scalecode = 1000000.0
for ielem in self.process_one_level(elem):
ielem_id = ielem.get_id()
if ielem_id == MATROSKA_TIMECODESCALE_ID:
scalecode = ielem.get_value()
elif ielem_id == MATROSKA_DURATION_ID:
duration = ielem.get_float_value()
elif ielem_id == MATROSKA_TITLE_ID:
self.title = ielem.get_utf8()
elif ielem_id == MATROSKA_DATE_UTC_ID:
timestamp = unpack('!q', ielem.get_data())[0] / 10.0 ** 9
# Date is offset 2001-01-01 00:00:00 (timestamp 978307200.0)
self.timestamp = int(timestamp + 978307200)
self.length = duration * scalecode / 1000000000.0
elif elem_id == MATROSKA_TRACKS_ID:
self.process_tracks(elem)
elif elem_id == MATROSKA_CHAPTERS_ID:
self.process_chapters(elem)
elif elem_id == MATROSKA_ATTACHMENTS_ID:
self.process_attachments(elem)
elif elem_id == MATROSKA_SEEKHEAD_ID:
self.process_seekhead(elem)
elif elem_id == MATROSKA_TAGS_ID:
self.process_tags(elem)
elif elem_id == MATROSKA_CUES_ID:
self.has_idx = True
log.debug(u'END: process element %r' % hex(elem_id))
return True
def process_seekhead(self, elem):
for seek_elem in self.process_one_level(elem):
if seek_elem.get_id() != MATROSKA_SEEK_ID:
continue
for sub_elem in self.process_one_level(seek_elem):
if sub_elem.get_id() == MATROSKA_SEEKID_ID:
if sub_elem.get_value() == MATROSKA_CLUSTER_ID:
# Not interested in these.
return
elif sub_elem.get_id() == MATROSKA_SEEK_POSITION_ID:
self.file.seek(self.segment.offset + sub_elem.get_value())
buffer = self.file.read(100)
try:
elem = EbmlEntity(buffer)
except ParseError:
continue
# Fetch all data necessary for this element.
elem.add_data(self.file.read(elem.ebml_length))
self.process_elem(elem)
def process_tracks(self, tracks):
tracksbuf = tracks.get_data()
index = 0
while index < tracks.get_len():
trackelem = EbmlEntity(tracksbuf[index:])
log.debug (u'ELEMENT %X found' % trackelem.get_id())
self.process_track(trackelem)
index += trackelem.get_total_len() + trackelem.get_crc_len()
def process_one_level(self, item):
buf = item.get_data()
index = 0
while index < item.get_len():
if len(buf[index:]) == 0:
break
elem = EbmlEntity(buf[index:])
yield elem
index += elem.get_total_len() + elem.get_crc_len()
def set_track_defaults(self, track):
track.language = 'eng'
def process_track(self, track):
# Collapse generator into a list since we need to iterate over it
# twice.
elements = [x for x in self.process_one_level(track)]
track_type = [x.get_value() for x in elements if x.get_id() == MATROSKA_TRACK_TYPE_ID]
if not track_type:
log.debug(u'Bad track: no type id found')
return
track_type = track_type[0]
track = None
if track_type == MATROSKA_VIDEO_TRACK:
log.debug(u'Video track found')
track = self.process_video_track(elements)
elif track_type == MATROSKA_AUDIO_TRACK:
log.debug(u'Audio track found')
track = self.process_audio_track(elements)
elif track_type == MATROSKA_SUBTITLES_TRACK:
log.debug(u'Subtitle track found')
track = core.Subtitle()
self.set_track_defaults(track)
track.id = len(self.subtitles)
self.subtitles.append(track)
for elem in elements:
self.process_track_common(elem, track)
def process_track_common(self, elem, track):
elem_id = elem.get_id()
if elem_id == MATROSKA_TRACK_LANGUAGE_ID:
track.language = elem.get_str()
log.debug(u'Track language found: %r' % track.language)
elif elem_id == MATROSKA_NAME_ID:
track.title = elem.get_utf8()
elif elem_id == MATROSKA_TRACK_NUMBER_ID:
track.trackno = elem.get_value()
elif elem_id == MATROSKA_TRACK_FLAG_ENABLED_ID:
track.enabled = bool(elem.get_value())
elif elem_id == MATROSKA_TRACK_FLAG_DEFAULT_ID:
track.default = bool(elem.get_value())
elif elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_CODEC_PRIVATE_ID:
track.codec_private = elem.get_data()
elif elem_id == MATROSKA_TRACK_UID_ID:
self.objects_by_uid[elem.get_value()] = track
def process_video_track(self, elements):
track = core.VideoStream()
# Defaults
track.codec = u'Unknown'
track.fps = 0
self.set_track_defaults(track)
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_FRAME_DURATION_ID:
try:
track.fps = 1 / (pow(10, -9) * (elem.get_value()))
except ZeroDivisionError:
pass
elif elem_id == MATROSKA_VIDEO_SETTINGS_ID:
d_width = d_height = None
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_VIDEO_WIDTH_ID:
track.width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_HEIGHT_ID:
track.height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_WIDTH_ID:
d_width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_HEIGHT_ID:
d_height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_INTERLACED_ID:
value = int(settings_elem.get_value())
self._set('interlaced', value)
if None not in [d_width, d_height]:
track.aspect = float(d_width) / d_height
else:
self.process_track_common(elem, track)
# convert codec information
# http://haali.cs.msu.ru/mkv/codecs.pdf
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.endswith('FOURCC') and len(track.codec_private or '') == 40:
track.codec = track.codec_private[16:20]
elif track.codec.startswith('V_REAL/'):
track.codec = track.codec[7:]
elif track.codec.startswith('V_'):
# FIXME: add more video codecs here
track.codec = track.codec[2:]
track.id = len(self.video)
self.video.append(track)
return track
def process_audio_track(self, elements):
track = core.AudioStream()
track.codec = u'Unknown'
self.set_track_defaults(track)
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_AUDIO_SETTINGS_ID:
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_AUDIO_SAMPLERATE_ID:
track.samplerate = settings_elem.get_float_value()
elif settings_elem_id == MATROSKA_AUDIO_CHANNELS_ID:
track.channels = settings_elem.get_value()
else:
self.process_track_common(elem, track)
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.startswith('A_'):
track.codec = track.codec[2:]
track.id = len(self.audio)
self.audio.append(track)
return track
def process_chapters(self, chapters):
elements = self.process_one_level(chapters)
for elem in elements:
if elem.get_id() == MATROSKA_EDITION_ENTRY_ID:
buf = elem.get_data()
index = 0
while index < elem.get_len():
sub_elem = EbmlEntity(buf[index:])
if sub_elem.get_id() == MATROSKA_CHAPTER_ATOM_ID:
self.process_chapter_atom(sub_elem)
index += sub_elem.get_total_len() + sub_elem.get_crc_len()
def process_chapter_atom(self, atom):
elements = self.process_one_level(atom)
chap = core.Chapter()
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CHAPTER_TIME_START_ID:
# Scale timecode to seconds (float)
chap.pos = elem.get_value() / 1000000 / 1000.0
elif elem_id == MATROSKA_CHAPTER_FLAG_ENABLED_ID:
chap.enabled = elem.get_value()
elif elem_id == MATROSKA_CHAPTER_DISPLAY_ID:
# Matroska supports multiple (chapter name, language) pairs for
# each chapter, so chapter names can be internationalized. This
# logic will only take the last one in the list.
for display_elem in self.process_one_level(elem):
if display_elem.get_id() == MATROSKA_CHAPTER_STRING_ID:
chap.name = display_elem.get_utf8()
elif elem_id == MATROSKA_CHAPTER_UID_ID:
self.objects_by_uid[elem.get_value()] = chap
log.debug(u'Chapter %r found', chap.name)
chap.id = len(self.chapters)
self.chapters.append(chap)
def process_attachments(self, attachments):
buf = attachments.get_data()
index = 0
while index < attachments.get_len():
elem = EbmlEntity(buf[index:])
if elem.get_id() == MATROSKA_ATTACHED_FILE_ID:
self.process_attachment(elem)
index += elem.get_total_len() + elem.get_crc_len()
def process_attachment(self, attachment):
elements = self.process_one_level(attachment)
name = desc = mimetype = ""
data = None
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_FILE_NAME_ID:
name = elem.get_utf8()
elif elem_id == MATROSKA_FILE_DESC_ID:
desc = elem.get_utf8()
elif elem_id == MATROSKA_FILE_MIME_TYPE_ID:
mimetype = elem.get_data()
elif elem_id == MATROSKA_FILE_DATA_ID:
data = elem.get_data()
# Right now we only support attachments that could be cover images.
# Make a guess to see if this attachment is a cover image.
if mimetype.startswith("image/") and u"cover" in (name + desc).lower() and data:
self.thumbnail = data
log.debug(u'Attachment %r found' % name)
def process_tags(self, tags):
# Tags spec: http://www.matroska.org/technical/specs/tagging/index.html
# Iterate over Tags children. Tags element children is a
# Tag element (whose children are SimpleTags) and a Targets element
# whose children specific what objects the tags apply to.
for tag_elem in self.process_one_level(tags):
# Start a new dict to hold all SimpleTag elements.
tags_dict = core.Tags()
# A list of target uids this tags dict applies too. If empty,
# tags are global.
targets = []
for sub_elem in self.process_one_level(tag_elem):
if sub_elem.get_id() == MATROSKA_SIMPLE_TAG_ID:
self.process_simple_tag(sub_elem, tags_dict)
elif sub_elem.get_id() == MATROSKA_TARGETS_ID:
# Targets element: if there is no uid child (track uid,
# chapter uid, etc.) then the tags dict applies to the
# whole file (top-level Media object).
for target_elem in self.process_one_level(sub_elem):
target_elem_id = target_elem.get_id()
if target_elem_id in (MATRSOKA_TAGS_TRACK_UID_ID, MATRSOKA_TAGS_EDITION_UID_ID,
MATRSOKA_TAGS_CHAPTER_UID_ID, MATRSOKA_TAGS_ATTACHMENT_UID_ID):
targets.append(target_elem.get_value())
elif target_elem_id == MATROSKA_TARGET_TYPE_VALUE_ID:
# Target types not supported for now. (Unclear how this
# would fit with kaa.metadata.)
pass
if targets:
# Assign tags to all listed uids
for target in targets:
try:
self.objects_by_uid[target].tags.update(tags_dict)
self.tags_to_attributes(self.objects_by_uid[target], tags_dict)
except KeyError:
log.warning(u'Tags assigned to unknown/unsupported target uid %d', target)
else:
self.tags.update(tags_dict)
self.tags_to_attributes(self, tags_dict)
def process_simple_tag(self, simple_tag_elem, tags_dict):
"""
Returns a dict representing the Tag element.
"""
name = lang = value = children = None
binary = False
for elem in self.process_one_level(simple_tag_elem):
elem_id = elem.get_id()
if elem_id == MATROSKA_TAG_NAME_ID:
name = elem.get_utf8().lower()
elif elem_id == MATROSKA_TAG_STRING_ID:
value = elem.get_utf8()
elif elem_id == MATROSKA_TAG_BINARY_ID:
value = elem.get_data()
binary = True
elif elem_id == MATROSKA_TAG_LANGUAGE_ID:
lang = elem.get_utf8()
elif elem_id == MATROSKA_SIMPLE_TAG_ID:
if children is None:
children = core.Tags()
self.process_simple_tag(elem, children)
if children:
# Convert ourselves to a Tags object.
children.value = value
children.langcode = lang
value = children
else:
if name.startswith('date_'):
# Try to convert date to a datetime object.
value = matroska_date_to_datetime(value)
value = core.Tag(value, lang, binary)
if name in tags_dict:
# Multiple items of this tag name.
if not isinstance(tags_dict[name], list):
# Convert to a list
tags_dict[name] = [tags_dict[name]]
# Append to list
tags_dict[name].append(value)
else:
tags_dict[name] = value
def tags_to_attributes(self, obj, tags):
# Convert tags to core attributes.
for name, tag in tags.items():
if isinstance(tag, dict):
# Nested tags dict, recurse.
self.tags_to_attributes(obj, tag)
continue
elif name not in TAGS_MAP:
continue
attr, filter = TAGS_MAP[name]
if attr not in obj._keys and attr not in self._keys:
# Tag is not in any core attribute for this object or global,
# so skip.
continue
# Pull value out of Tag object or list of Tag objects.
value = [item.value for item in tag] if isinstance(tag, list) else tag.value
if filter:
try:
value = [filter(item) for item in value] if isinstance(value, list) else filter(value)
except Exception, e:
log.warning(u'Failed to convert tag to core attribute: %r', e)
# Special handling for tv series recordings. The 'title' tag
# can be used for both the series and the episode name. The
# same is true for trackno which may refer to the season
# and the episode number. Therefore, if we find these
# attributes already set we try some guessing.
if attr == 'trackno' and getattr(self, attr) is not None:
# delete trackno and save season and episode
self.season = self.trackno
self.episode = value
self.trackno = None
continue
if attr == 'title' and getattr(self, attr) is not None:
# store current value of title as series and use current
# value of title as title
self.series = self.title
if attr in obj._keys:
setattr(obj, attr, value)
else:
setattr(self, attr, value)
Parser = Matroska
| gpl-2.0 |
TaiwanStat/real.taiwanstat.com | wind-weather/update_data10hpa.py | 1 | 2850 | import sys
import subprocess
import datetime
'''
http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_1p00.pl?dir=%2Fgfs.2015051406
http://www.nco.ncep.noaa.gov/pmb/products/gfs/gfs.t00z.pgrbf00.grib2.shtml
'''
import os
base_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(base_dir)
i = (datetime.datetime.now() - datetime.timedelta(hours=12))
date = i.strftime('%Y%m%d')
gfs = ''
h = i.hour/6*6
if h == 0:
date = date + '0' + str(h)
gfs = 'gfs.t00z.pgrb2.1p00.anl'
elif h == 6:
date = date + '0' + str(h)
gfs = 'gfs.t06z.pgrb2.1p00.anl'
elif h == 12:
date += str(h)
gfs = 'gfs.t12z.pgrb2.1p00.anl'
elif h == 18:
date += str(h)
gfs = 'gfs.t18z.pgrb2.1p00.anl'
print date
def update_data(url, path):
subprocess.call(['curl', url, '-o', 'tmp.grib2'])
subprocess.call(['../grib2json-0.8.0-SNAPSHOT/bin/grib2json', \
'-d', '-n', '-o', path, 'tmp.grib2'])
data = ''
f = open(path, 'r')
data = f.read().replace(' ', '').replace('\n', '')
f.close()
f = open(path, 'w')
f.write(data)
f.close()
subprocess.call(['rm', 'tmp.grib2'])
temp_url = 'http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_1p00.pl?file=' + gfs + '&lev_10_mb=on&var_TMP=on&leftlon=0&rightlon=360&toplat=90&bottomlat=-90&dir=%2Fgfs.' + date
path = './data/weather/current/current-temp-isobaric-10hPa-gfs-1.0.json'
update_data(temp_url, path)
wind_url = 'http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_1p00.pl?file=' + gfs + '&lev_10_mb=on&var_UGRD=on&var_VGRD=on&leftlon=0&rightlon=360&toplat=90&bottomlat=-90&dir=%2Fgfs.' + date
path = './data/weather/current/current-wind-isobaric-10hPa-gfs-1.0.json'
update_data(wind_url, path)
rh_url = 'http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_1p00.pl?file=' + gfs + '&lev_10_mb=on&var_RH=on&leftlon=0&rightlon=360&toplat=90&bottomlat=-90&dir=%2Fgfs.' + date
path = './data/weather/current/current-relative_humidity-isobaric-10hPa-gfs-1.0.json'
update_data(rh_url, path)
'''pwat_url = 'http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_1p00.pl?file=' + gfs + '&all_lev=on&var_PWAT=on&leftlon=0&rightlon=360&toplat=90&bottomlat=-90&dir=%2Fgfs.' + date
path = './data/weather/current/current-total_precipitable_water-10hPa-gfs-1.0.json'
update_data(pwat_url, path)
cwat_url = 'http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_1p00.pl?file=' + gfs + '&all_lev=on&var_CWAT=on&leftlon=0&rightlon=360&toplat=90&bottomlat=-90&dir=%2Fgfs.' + date
path = './data/weather/current/current-total_cloud_water-gfs-1.0.json'
update_data(cwat_url, path)
mslet_url = 'http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_0p50.pl?file=gfs.t06z.pgrb2full.0p50.f000&lev_mean_sea_level=on&var_MSLET=on&leftlon=0&rightlon=360&toplat=90&bottomlat=-90&dir=%2Fgfs.' + date
path = './data/weather/current/current-mean_sea_level_pressure-gfs-1.0.json'
update_data(mslet_url, path)'''
| mit |
njwilson23/rasterio | examples/total.py | 7 | 1277 | import numpy
import rasterio
import subprocess
with rasterio.drivers(CPL_DEBUG=True):
# Read raster bands directly to Numpy arrays.
with rasterio.open('tests/data/RGB.byte.tif') as src:
r, g, b = src.read()
# Combine arrays using the 'iadd' ufunc. Expecting that the sum will
# exceed the 8-bit integer range, initialize it as 16-bit. Adding other
# arrays to it in-place converts those arrays up and preserves the type
# of the total array.
total = numpy.zeros(r.shape, dtype=rasterio.uint16)
for band in (r, g, b):
total += band
total /= 3
# Write the product as a raster band to a new 8-bit file. For keyword
# arguments, we start with the meta attributes of the source file, but
# then change the band count to 1, set the dtype to uint8, and specify
# LZW compression.
kwargs = src.meta
kwargs.update(
dtype=rasterio.uint8,
count=1,
compress='lzw')
with rasterio.open('example-total.tif', 'w', **kwargs) as dst:
dst.write_band(1, total.astype(rasterio.uint8))
# Dump out gdalinfo's report card and open the image.
info = subprocess.check_output(
['gdalinfo', '-stats', 'example-total.tif'])
print(info)
subprocess.call(['open', 'example-total.tif'])
| bsd-3-clause |
scriptnull/coala | coalib/misc/StringConverter.py | 2 | 4772 | from collections import Iterable, OrderedDict
import re
from coalib.misc import Constants
from coalib.parsing.StringProcessing import (unescaped_split,
unescaped_strip,
unescape)
class StringConverter:
"""
Converts strings to other things as needed. If you need some kind of string
conversion that is not implemented here, consider adding it so everyone
gets something out of it.
"""
def __init__(self,
value,
strip_whitespaces=True,
list_delimiters=None,
dict_delimiter=":",
remove_empty_iter_elements=True):
if list_delimiters is None:
list_delimiters = [",", ";"]
if not isinstance(list_delimiters, Iterable):
raise TypeError("list_delimiters has to be an Iterable.")
if not isinstance(strip_whitespaces, bool):
raise TypeError("strip_whitespaces has to be a bool parameter")
self.__strip_whitespaces = strip_whitespaces
self.__list_delimiters = list_delimiters
self.__dict_delimiter = dict_delimiter
self.__remove_empty_iter_elements = remove_empty_iter_elements
self.__escaped_list = None
self.__unescaped_list = None
self.__dict = None
self.value = value
def __str__(self):
return unescape(self.value)
def __bool__(self):
if str(self).lower() in Constants.TRUE_STRINGS:
return True
if str(self).lower() in Constants.FALSE_STRINGS:
return False
raise ValueError
def __len__(self):
return len(str(self))
def __int__(self):
return int(str(self))
def __float__(self):
return float(str(self))
def __iter__(self, remove_backslashes=True):
"""
Converts the value to a list using the delimiters given at construction
time.
Note that escaped values will be unescaped and escaped list delimiters
will be allowed in values. If you need the escapes you should not
use this routine.
:param remove_backslashes: Whether or not to remove the backslashes
after conversion.
:return: An iterator over all values.
"""
if remove_backslashes:
return iter(self.__unescaped_list)
else:
return iter(self.__escaped_list)
def __getitem__(self, item):
return self.__dict.__getitem__(item)
def keys(self):
return self.__dict.keys()
def __get_raw_list(self):
pattern = ("(?:" +
"|".join(re.escape(v) for v in self.__list_delimiters) +
")")
return list(unescaped_split(pattern,
self.value,
use_regex=True))
def __prepare_list(self):
self.__escaped_list = self.__get_raw_list()
if self.__strip_whitespaces:
self.__escaped_list = [unescaped_strip(elem)
for elem in self.__escaped_list]
self.__unescaped_list = [unescape(elem)
for elem in self.__escaped_list]
if self.__remove_empty_iter_elements:
# Need to do after stripping, cant use builtin functionality of
# split.
while "" in self.__unescaped_list:
self.__unescaped_list.remove("")
while "" in self.__escaped_list:
self.__escaped_list.remove("")
def __prepare_dict(self):
# We must keep order here, user can drop it later.
self.__dict = OrderedDict()
for elem in self.__get_raw_list():
key_val = unescaped_split(self.__dict_delimiter, elem, max_split=1)
if self.__strip_whitespaces:
key_val = [unescaped_strip(item) for item in key_val]
key_val = [unescape(item) for item in key_val]
if not any(item != "" for item in key_val):
continue
if len(key_val) < 2:
self.__dict[key_val[0]] = ""
else:
self.__dict[key_val[0]] = key_val[1]
@property
def value(self):
return self.__value
@value.setter
def value(self, newval):
self.__value = str(newval)
if self.__strip_whitespaces:
self.__value = unescaped_strip(self.__value)
self.__prepare_list()
self.__prepare_dict()
def __eq__(self, other):
return isinstance(other, StringConverter) and self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
| agpl-3.0 |
maclandrol/ete | ete2/nexml/_nexml.py | 3 | 749187 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Mon Jun 27 10:13:44 2011 by generateDS.py version 2.5b.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
STRING_CLEANUP_PAT = re_.compile(r"[\n\r\s]+")
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
# First try with no namespace.
value = attrs.get(attr_name)
if value is None:
# Now try the other possible namespaces.
namespaces = node.nsmap.itervalues()
for namespace in namespaces:
value = attrs.get('{%s}%s' % (namespace, attr_name, ))
if value is not None:
break
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class Base(GeneratedsSuper):
"""The base type for all complexType definitions in the nexml schema.
This type allows a number of special attributes: xml:lang - for
languages codes xml:base - see http://www.w3.org/TR/xmlbase/
xml:id - see http://www.w3.org/TR/xml-id/ xml:space - for
whitespace handling xlink:href - for links Also see
http://www.w3.org/2001/xml.xsd for more information on the xml
and xlink attributes."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if Base.subclass:
return Base.subclass(*args_, **kwargs_)
else:
return Base(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='', name_='Base', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Base')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Base'):
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='', name_='Base', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Base'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Base
class Meta(Base):
subclass = None
superclass = Base
def __init__(self, valueOf_=None):
super(Meta, self).__init__()
pass
def factory(*args_, **kwargs_):
if Meta.subclass:
return Meta.subclass(*args_, **kwargs_)
else:
return Meta(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='Meta', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Meta')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="Meta"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Meta'):
super(Meta, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Meta')
def exportChildren(self, outfile, level, namespace_='', name_='Meta', fromsubclass_=False):
super(Meta, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
super(Meta, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Meta'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Meta, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Meta, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Meta, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Meta, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Meta
class ResourceMeta(Meta):
"""Metadata annotations in which the object is a resource. If this
element contains meta elements as children, then the object of
this annotation is a "blank node"."""
subclass = None
superclass = Meta
def __init__(self, href=None, rel=None, meta=None, valueOf_=None):
super(ResourceMeta, self).__init__()
self.href = _cast(None, href)
self.rel = _cast(None, rel)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if ResourceMeta.subclass:
return ResourceMeta.subclass(*args_, **kwargs_)
else:
return ResourceMeta(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def export(self, outfile, level, namespace_='', name_='ResourceMeta', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceMeta')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="ResourceMeta"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResourceMeta'):
super(ResourceMeta, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceMeta')
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
outfile.write(' href=%s' % (quote_attrib(self.href), ))
if self.rel is not None and 'rel' not in already_processed:
already_processed.append('rel')
outfile.write(' rel=%s' % (quote_attrib(self.rel), ))
def exportChildren(self, outfile, level, namespace_='', name_='ResourceMeta', fromsubclass_=False):
super(ResourceMeta, self).exportChildren(outfile, level, namespace_, name_, True)
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(ResourceMeta, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ResourceMeta'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
showIndent(outfile, level)
outfile.write('href = %s,\n' % (self.href,))
if self.rel is not None and 'rel' not in already_processed:
already_processed.append('rel')
showIndent(outfile, level)
outfile.write('rel = %s,\n' % (self.rel,))
super(ResourceMeta, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ResourceMeta, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.append('href')
self.href = value
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.append('rel')
self.rel = value
super(ResourceMeta, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
super(ResourceMeta, self).buildChildren(child_, node, nodeName_, True)
# end class ResourceMeta
class LiteralMeta(Meta):
"""Metadata annotations in which the object is a literal value. If the
@content attribute is used, then the element should contain no
children."""
subclass = None
superclass = Meta
def __init__(self, datatype=None, content=None, property=None, valueOf_=None):
super(LiteralMeta, self).__init__()
self.datatype = _cast(None, datatype)
self.content = _cast(None, content)
self.property = _cast(None, property)
pass
def factory(*args_, **kwargs_):
if LiteralMeta.subclass:
return LiteralMeta.subclass(*args_, **kwargs_)
else:
return LiteralMeta(*args_, **kwargs_)
factory = staticmethod(factory)
def get_datatype(self): return self.datatype
def set_datatype(self, datatype): self.datatype = datatype
def get_content(self): return self.content
def set_content(self, content): self.content = content
def get_property(self): return self.property
def set_property(self, property): self.property = property
def export(self, outfile, level, namespace_='', name_='LiteralMeta', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LiteralMeta')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="LiteralMeta"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LiteralMeta'):
super(LiteralMeta, self).exportAttributes(outfile, level, already_processed, namespace_, name_='LiteralMeta')
if self.datatype is not None and 'datatype' not in already_processed:
already_processed.append('datatype')
outfile.write(' datatype=%s' % (quote_attrib(self.datatype), ))
if self.content is not None and 'content' not in already_processed:
already_processed.append('content')
outfile.write(' content=%s' % (self.gds_format_string(quote_attrib(self.content).encode(ExternalEncoding), input_name='content'), ))
if self.property is not None and 'property' not in already_processed:
already_processed.append('property')
outfile.write(' property=%s' % (quote_attrib(self.property), ))
def exportChildren(self, outfile, level, namespace_='', name_='LiteralMeta', fromsubclass_=False):
super(LiteralMeta, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
super(LiteralMeta, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='LiteralMeta'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.datatype is not None and 'datatype' not in already_processed:
already_processed.append('datatype')
showIndent(outfile, level)
outfile.write('datatype = %s,\n' % (self.datatype,))
if self.content is not None and 'content' not in already_processed:
already_processed.append('content')
showIndent(outfile, level)
outfile.write('content = "%s",\n' % (self.content,))
if self.property is not None and 'property' not in already_processed:
already_processed.append('property')
showIndent(outfile, level)
outfile.write('property = %s,\n' % (self.property,))
super(LiteralMeta, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(LiteralMeta, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('datatype', node)
if value is not None and 'datatype' not in already_processed:
already_processed.append('datatype')
self.datatype = value
value = find_attr_value_('content', node)
if value is not None and 'content' not in already_processed:
already_processed.append('content')
self.content = value
value = find_attr_value_('property', node)
if value is not None and 'property' not in already_processed:
already_processed.append('property')
self.property = value
super(LiteralMeta, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(LiteralMeta, self).buildChildren(child_, node, nodeName_, True)
pass
# end class LiteralMeta
class attrExtensions(GeneratedsSuper):
"""This element is for use in WSDL 1.1 only. It does not apply to WSDL
2.0 documents. Use in WSDL 2.0 documents is invalid."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if attrExtensions.subclass:
return attrExtensions.subclass(*args_, **kwargs_)
else:
return attrExtensions(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='', name_='attrExtensions', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='attrExtensions')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='attrExtensions'):
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='', name_='attrExtensions', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='attrExtensions'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class attrExtensions
class AbstractMapping(Base):
"""The AbstractMapping type is the superclass for an ambiguity mapping.
In an instance document, a subclass of this type will look like
<member state="st1"/>, i.e. an element called "member" with an
attribute called "state" whose value is an id reference that
refers to an element that subclasses AbstractState. The purpose
of AbstractMapping is to specify which other states may be
implied, e.g. a nucleotide symbol "N" would have mappings to
"A", "C", "G" and "T"."""
subclass = None
superclass = Base
def __init__(self, state=None, valueOf_=None):
super(AbstractMapping, self).__init__()
self.state = _cast(None, state)
pass
def factory(*args_, **kwargs_):
if AbstractMapping.subclass:
return AbstractMapping.subclass(*args_, **kwargs_)
else:
return AbstractMapping(*args_, **kwargs_)
factory = staticmethod(factory)
def get_state(self): return self.state
def set_state(self, state): self.state = state
def export(self, outfile, level, namespace_='', name_='AbstractMapping', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractMapping')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractMapping"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractMapping'):
super(AbstractMapping, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractMapping')
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (self.gds_format_string(quote_attrib(self.state).encode(ExternalEncoding), input_name='state'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractMapping', fromsubclass_=False):
super(AbstractMapping, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
super(AbstractMapping, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractMapping'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = "%s",\n' % (self.state,))
super(AbstractMapping, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractMapping, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
super(AbstractMapping, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractMapping, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractMapping
class DNAMapping(AbstractMapping):
"""An IUPAC ambiguity mapping."""
subclass = None
superclass = AbstractMapping
def __init__(self, state=None, valueOf_=None):
super(DNAMapping, self).__init__(state, )
pass
def factory(*args_, **kwargs_):
if DNAMapping.subclass:
return DNAMapping.subclass(*args_, **kwargs_)
else:
return DNAMapping(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='DNAMapping', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAMapping')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAMapping'):
super(DNAMapping, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAMapping')
def exportChildren(self, outfile, level, namespace_='', name_='DNAMapping', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(DNAMapping, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAMapping'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DNAMapping, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAMapping, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DNAMapping, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class DNAMapping
class AAMapping(AbstractMapping):
"""An IUPAC ambiguity mapping."""
subclass = None
superclass = AbstractMapping
def __init__(self, state=None, valueOf_=None):
super(AAMapping, self).__init__(state, )
pass
def factory(*args_, **kwargs_):
if AAMapping.subclass:
return AAMapping.subclass(*args_, **kwargs_)
else:
return AAMapping(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='AAMapping', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAMapping')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAMapping'):
super(AAMapping, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAMapping')
def exportChildren(self, outfile, level, namespace_='', name_='AAMapping', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(AAMapping, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAMapping'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AAMapping, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAMapping, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AAMapping, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AAMapping
class RNAMapping(AbstractMapping):
"""An IUPAC RNA ambiguity mapping."""
subclass = None
superclass = AbstractMapping
def __init__(self, state=None, valueOf_=None):
super(RNAMapping, self).__init__(state, )
pass
def factory(*args_, **kwargs_):
if RNAMapping.subclass:
return RNAMapping.subclass(*args_, **kwargs_)
else:
return RNAMapping(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='RNAMapping', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAMapping')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAMapping'):
super(RNAMapping, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAMapping')
def exportChildren(self, outfile, level, namespace_='', name_='RNAMapping', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(RNAMapping, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAMapping'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RNAMapping, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAMapping, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RNAMapping, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class RNAMapping
class StandardMapping(AbstractMapping):
"""A standard character ambiguity mapping."""
subclass = None
superclass = AbstractMapping
def __init__(self, state=None, valueOf_=None):
super(StandardMapping, self).__init__(state, )
pass
def factory(*args_, **kwargs_):
if StandardMapping.subclass:
return StandardMapping.subclass(*args_, **kwargs_)
else:
return StandardMapping(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='StandardMapping', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardMapping')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardMapping'):
super(StandardMapping, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardMapping')
def exportChildren(self, outfile, level, namespace_='', name_='StandardMapping', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(StandardMapping, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardMapping'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardMapping, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardMapping, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardMapping, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class StandardMapping
class Annotated(Base):
"""The Annotated complexType is a super class for objects that
optionally have metadata annotations of type Meta."""
subclass = None
superclass = Base
def __init__(self, about=None, meta=None, valueOf_=None):
super(Annotated, self).__init__()
self.about = _cast(None, about)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if Annotated.subclass:
return Annotated.subclass(*args_, **kwargs_)
else:
return Annotated(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_about(self): return self.about
def set_about(self, about): self.about = about
def export(self, outfile, level, namespace_='', name_='Annotated', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Annotated')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="Annotated"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Annotated'):
super(Annotated, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Annotated')
if self.about is not None and 'about' not in already_processed:
already_processed.append('about')
outfile.write(' about=%s' % (quote_attrib(self.about), ))
def exportChildren(self, outfile, level, namespace_='', name_='Annotated', fromsubclass_=False):
super(Annotated, self).exportChildren(outfile, level, namespace_, name_, True)
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(Annotated, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Annotated'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.about is not None and 'about' not in already_processed:
already_processed.append('about')
showIndent(outfile, level)
outfile.write('about = %s,\n' % (self.about,))
super(Annotated, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Annotated, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('about', node)
if value is not None and 'about' not in already_processed:
already_processed.append('about')
self.about = value
super(Annotated, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
super(Annotated, self).buildChildren(child_, node, nodeName_, True)
# end class Annotated
class Nexml(Annotated):
"""The root element for nexml."""
subclass = None
superclass = Annotated
def __init__(self, about=None, meta=None, version=None, generator=None, otus=None, characters=None, trees=None, valueOf_=None):
super(Nexml, self).__init__(about, meta, )
self.version = _cast(None, version)
self.generator = _cast(None, generator)
if otus is None:
self.otus = []
else:
self.otus = otus
if characters is None:
self.characters = []
else:
self.characters = characters
if trees is None:
self.trees = []
else:
self.trees = trees
def factory(*args_, **kwargs_):
if Nexml.subclass:
return Nexml.subclass(*args_, **kwargs_)
else:
return Nexml(*args_, **kwargs_)
factory = staticmethod(factory)
def get_otus(self): return self.otus
def set_otus(self, otus): self.otus = otus
def add_otus(self, value): self.otus.append(value)
def insert_otus(self, index, value): self.otus[index] = value
def get_characters(self): return self.characters
def set_characters(self, characters): self.characters = characters
def add_characters(self, value): self.characters.append(value)
def insert_characters(self, index, value): self.characters[index] = value
def get_trees(self): return self.trees
def set_trees(self, trees): self.trees = trees
def add_trees(self, value): self.trees.append(value)
def insert_trees(self, index, value): self.trees[index] = value
def get_version(self): return self.version
def set_version(self, version): self.version = version
def validate_Nexml1_0(self, value):
# Validate type Nexml1_0, a restriction on xs:decimal.
pass
def get_generator(self): return self.generator
def set_generator(self, generator): self.generator = generator
def export(self, outfile, level, namespace_='', name_='Nexml', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Nexml')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="Nexml"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Nexml'):
super(Nexml, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Nexml')
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
outfile.write(' version=%s' % (quote_attrib(self.version), ))
if self.generator is not None and 'generator' not in already_processed:
already_processed.append('generator')
outfile.write(' generator=%s' % (self.gds_format_string(quote_attrib(self.generator).encode(ExternalEncoding), input_name='generator'), ))
def exportChildren(self, outfile, level, namespace_='', name_='Nexml', fromsubclass_=False):
super(Nexml, self).exportChildren(outfile, level, namespace_, name_, True)
for otus_ in self.otus:
otus_.export(outfile, level, namespace_, name_='otus')
for characters_ in self.get_characters():
characters_.export(outfile, level, namespace_, name_='characters')
for trees_ in self.trees:
trees_.export(outfile, level, namespace_, name_='trees')
def hasContent_(self):
if (
self.otus or
self.characters or
self.trees or
super(Nexml, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Nexml'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
showIndent(outfile, level)
outfile.write('version = %f,\n' % (self.version,))
if self.generator is not None and 'generator' not in already_processed:
already_processed.append('generator')
showIndent(outfile, level)
outfile.write('generator = "%s",\n' % (self.generator,))
super(Nexml, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Nexml, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('otus=[\n')
level += 1
for otus_ in self.otus:
showIndent(outfile, level)
outfile.write('model_.Taxa(\n')
otus_.exportLiteral(outfile, level, name_='Taxa')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('characters=[\n')
level += 1
for characters_ in self.characters:
showIndent(outfile, level)
outfile.write('model_.AbstractBlock(\n')
characters_.exportLiteral(outfile, level, name_='AbstractBlock')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('trees=[\n')
level += 1
for trees_ in self.trees:
showIndent(outfile, level)
outfile.write('model_.Trees(\n')
trees_.exportLiteral(outfile, level, name_='Trees')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.append('version')
try:
self.version = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (version): %s' % exp)
self.validate_Nexml1_0(self.version) # validate type Nexml1_0
value = find_attr_value_('generator', node)
if value is not None and 'generator' not in already_processed:
already_processed.append('generator')
self.generator = value
super(Nexml, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'otus':
obj_ = Taxa.factory()
obj_.build(child_)
self.otus.append(obj_)
elif nodeName_ == 'characters':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <characters> element')
self.characters.append(obj_)
elif nodeName_ == 'trees':
obj_ = Trees.factory()
obj_.build(child_)
self.trees.append(obj_)
super(Nexml, self).buildChildren(child_, node, nodeName_, True)
# end class Nexml
class AbstractObsMatrix(Annotated):
"""The AbstractObsMatrix super class is the abstract type for a
<matrix> element that contains rows which hold granular state
observations."""
subclass = None
superclass = Annotated
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(AbstractObsMatrix, self).__init__(about, meta, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractObsMatrix.subclass:
return AbstractObsMatrix.subclass(*args_, **kwargs_)
else:
return AbstractObsMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractObsMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractObsMatrix')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractObsMatrix"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractObsMatrix'):
super(AbstractObsMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractObsMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractObsMatrix', fromsubclass_=False):
super(AbstractObsMatrix, self).exportChildren(outfile, level, namespace_, name_, True)
for row_ in self.get_row():
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(AbstractObsMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractObsMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractObsMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractObsMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.AbstractObsRow(\n')
row_.exportLiteral(outfile, level, name_='AbstractObsRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractObsMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <row> element')
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractObsMatrix, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractObsMatrix
class AbstractSeqMatrix(Annotated):
"""The AbstractSeqMatrix super class is the abstract type for a
<matrix> element that contains rows which hold raw character
sequences."""
subclass = None
superclass = Annotated
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(AbstractSeqMatrix, self).__init__(about, meta, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractSeqMatrix.subclass:
return AbstractSeqMatrix.subclass(*args_, **kwargs_)
else:
return AbstractSeqMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractSeqMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSeqMatrix')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractSeqMatrix"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractSeqMatrix'):
super(AbstractSeqMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSeqMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractSeqMatrix', fromsubclass_=False):
super(AbstractSeqMatrix, self).exportChildren(outfile, level, namespace_, name_, True)
for row_ in self.get_row():
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(AbstractSeqMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractSeqMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractSeqMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractSeqMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.AbstractSeqRow(\n')
row_.exportLiteral(outfile, level, name_='AbstractSeqRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractSeqMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <row> element')
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractSeqMatrix, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractSeqMatrix
class AbstractFormat(Annotated):
"""The AbstractFormat type is the superclass for the element that
defines the allowed characters and states in a matrix, and their
ambiguity mapping. It may enclose AbstractStates elements that
define states and their mappings, and AbstractChar elements that
specify which AbstractStates apply to which matrix columns."""
subclass = None
superclass = Annotated
def __init__(self, about=None, meta=None, states=None, char=None, set=None, valueOf_=None):
super(AbstractFormat, self).__init__(about, meta, )
if states is None:
self.states = []
else:
self.states = states
if char is None:
self.char = []
else:
self.char = char
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractFormat.subclass:
return AbstractFormat.subclass(*args_, **kwargs_)
else:
return AbstractFormat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_states(self): return self.states
def set_states(self, states): self.states = states
def add_states(self, value): self.states.append(value)
def insert_states(self, index, value): self.states[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def add_char(self, value): self.char.append(value)
def insert_char(self, index, value): self.char[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractFormat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractFormat')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractFormat"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractFormat'):
super(AbstractFormat, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractFormat')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractFormat', fromsubclass_=False):
super(AbstractFormat, self).exportChildren(outfile, level, namespace_, name_, True)
for states_ in self.get_states():
states_.export(outfile, level, namespace_, name_='states')
for char_ in self.get_char():
char_.export(outfile, level, namespace_, name_='char')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.states or
self.char or
self.set or
super(AbstractFormat, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractFormat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractFormat, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractFormat, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('states=[\n')
level += 1
for states_ in self.states:
showIndent(outfile, level)
outfile.write('model_.AbstractStates(\n')
states_.exportLiteral(outfile, level, name_='AbstractStates')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('char=[\n')
level += 1
for char_ in self.char:
showIndent(outfile, level)
outfile.write('model_.AbstractChar(\n')
char_.exportLiteral(outfile, level, name_='AbstractChar')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CharSet(\n')
set_.exportLiteral(outfile, level, name_='CharSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractFormat, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'states':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <states> element')
self.states.append(obj_)
elif nodeName_ == 'char':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <char> element')
self.char.append(obj_)
elif nodeName_ == 'set':
obj_ = CharSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractFormat, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractFormat
class ContinuousObsMatrix(AbstractObsMatrix):
"""A matrix of rows with single character observations."""
subclass = None
superclass = AbstractObsMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(ContinuousObsMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if ContinuousObsMatrix.subclass:
return ContinuousObsMatrix.subclass(*args_, **kwargs_)
else:
return ContinuousObsMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='ContinuousObsMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousObsMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousObsMatrix'):
super(ContinuousObsMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousObsMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousObsMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(ContinuousObsMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousObsMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContinuousObsMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousObsMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.ContinuousMatrixObsRow(\n')
row_.exportLiteral(outfile, level, name_='ContinuousMatrixObsRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContinuousObsMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = ContinuousMatrixObsRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class ContinuousObsMatrix
class ContinuousSeqMatrix(AbstractSeqMatrix):
"""A matrix of rows with seq strings of type continuous."""
subclass = None
superclass = AbstractSeqMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(ContinuousSeqMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if ContinuousSeqMatrix.subclass:
return ContinuousSeqMatrix.subclass(*args_, **kwargs_)
else:
return ContinuousSeqMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='ContinuousSeqMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousSeqMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousSeqMatrix'):
super(ContinuousSeqMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousSeqMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousSeqMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(ContinuousSeqMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousSeqMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContinuousSeqMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousSeqMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.ContinuousMatrixSeqRow(\n')
row_.exportLiteral(outfile, level, name_='ContinuousMatrixSeqRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContinuousSeqMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = ContinuousMatrixSeqRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class ContinuousSeqMatrix
class ContinuousFormat(AbstractFormat):
"""The ContinuousFormat class is the container of continuous column
definitions."""
subclass = None
superclass = AbstractFormat
def __init__(self, about=None, meta=None, states=None, char=None, set=None, valueOf_=None):
super(ContinuousFormat, self).__init__(about, meta, states, char, set, )
if char is None:
self.char = []
else:
self.char = char
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if ContinuousFormat.subclass:
return ContinuousFormat.subclass(*args_, **kwargs_)
else:
return ContinuousFormat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_char(self): return self.char
def set_char(self, char): self.char = char
def add_char(self, value): self.char.append(value)
def insert_char(self, index, value): self.char[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='ContinuousFormat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousFormat')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousFormat'):
super(ContinuousFormat, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousFormat')
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousFormat', fromsubclass_=False):
for char_ in self.char:
char_.export(outfile, level, namespace_, name_='char')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.char or
self.set or
super(ContinuousFormat, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousFormat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContinuousFormat, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousFormat, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('char=[\n')
level += 1
for char_ in self.char:
showIndent(outfile, level)
outfile.write('model_.ContinuousChar(\n')
char_.exportLiteral(outfile, level, name_='ContinuousChar')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CharSet(\n')
set_.exportLiteral(outfile, level, name_='CharSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContinuousFormat, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'char':
obj_ = ContinuousChar.factory()
obj_.build(child_)
self.char.append(obj_)
elif nodeName_ == 'set':
obj_ = CharSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class ContinuousFormat
class Labelled(Annotated):
"""The Labelled complexType is a super class for objects that
optionally have label attributes to use as a (non-unique) name
of type xs:string."""
subclass = None
superclass = Annotated
def __init__(self, about=None, meta=None, label=None, valueOf_=None):
super(Labelled, self).__init__(about, meta, )
self.label = _cast(None, label)
pass
def factory(*args_, **kwargs_):
if Labelled.subclass:
return Labelled.subclass(*args_, **kwargs_)
else:
return Labelled(*args_, **kwargs_)
factory = staticmethod(factory)
def get_label(self): return self.label
def set_label(self, label): self.label = label
def export(self, outfile, level, namespace_='', name_='Labelled', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Labelled')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="Labelled"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Labelled'):
super(Labelled, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Labelled')
if self.label is not None and 'label' not in already_processed:
already_processed.append('label')
outfile.write(' label=%s' % (self.gds_format_string(quote_attrib(self.label).encode(ExternalEncoding), input_name='label'), ))
def exportChildren(self, outfile, level, namespace_='', name_='Labelled', fromsubclass_=False):
super(Labelled, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(Labelled, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Labelled'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.label is not None and 'label' not in already_processed:
already_processed.append('label')
showIndent(outfile, level)
outfile.write('label = "%s",\n' % (self.label,))
super(Labelled, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Labelled, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('label', node)
if value is not None and 'label' not in already_processed:
already_processed.append('label')
self.label = value
super(Labelled, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Labelled, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Labelled
class StandardObsMatrix(AbstractObsMatrix):
"""A matrix of rows with single character observations."""
subclass = None
superclass = AbstractObsMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(StandardObsMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if StandardObsMatrix.subclass:
return StandardObsMatrix.subclass(*args_, **kwargs_)
else:
return StandardObsMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='StandardObsMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardObsMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardObsMatrix'):
super(StandardObsMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardObsMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='StandardObsMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(StandardObsMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardObsMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardObsMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardObsMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.StandardMatrixObsRow(\n')
row_.exportLiteral(outfile, level, name_='StandardMatrixObsRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardObsMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = StandardMatrixObsRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class StandardObsMatrix
class StandardSeqMatrix(AbstractSeqMatrix):
"""A matrix of rows with seq strings of type standard."""
subclass = None
superclass = AbstractSeqMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(StandardSeqMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if StandardSeqMatrix.subclass:
return StandardSeqMatrix.subclass(*args_, **kwargs_)
else:
return StandardSeqMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='StandardSeqMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardSeqMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardSeqMatrix'):
super(StandardSeqMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardSeqMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='StandardSeqMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(StandardSeqMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardSeqMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardSeqMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardSeqMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.StandardMatrixSeqRow(\n')
row_.exportLiteral(outfile, level, name_='StandardMatrixSeqRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardSeqMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = StandardMatrixSeqRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class StandardSeqMatrix
class StandardFormat(AbstractFormat):
"""The StandardFormat class is the container of standard column
definitions."""
subclass = None
superclass = AbstractFormat
def __init__(self, about=None, meta=None, states=None, char=None, set=None, valueOf_=None):
super(StandardFormat, self).__init__(about, meta, states, char, set, )
if states is None:
self.states = []
else:
self.states = states
if char is None:
self.char = []
else:
self.char = char
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if StandardFormat.subclass:
return StandardFormat.subclass(*args_, **kwargs_)
else:
return StandardFormat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_states(self): return self.states
def set_states(self, states): self.states = states
def add_states(self, value): self.states.append(value)
def insert_states(self, index, value): self.states[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def add_char(self, value): self.char.append(value)
def insert_char(self, index, value): self.char[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='StandardFormat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardFormat')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardFormat'):
super(StandardFormat, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardFormat')
def exportChildren(self, outfile, level, namespace_='', name_='StandardFormat', fromsubclass_=False):
for states_ in self.states:
states_.export(outfile, level, namespace_, name_='states')
for char_ in self.char:
char_.export(outfile, level, namespace_, name_='char')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.states or
self.char or
self.set or
super(StandardFormat, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardFormat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardFormat, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardFormat, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('states=[\n')
level += 1
for states_ in self.states:
showIndent(outfile, level)
outfile.write('model_.StandardStates(\n')
states_.exportLiteral(outfile, level, name_='StandardStates')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('char=[\n')
level += 1
for char_ in self.char:
showIndent(outfile, level)
outfile.write('model_.StandardChar(\n')
char_.exportLiteral(outfile, level, name_='StandardChar')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CharSet(\n')
set_.exportLiteral(outfile, level, name_='CharSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardFormat, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'states':
obj_ = StandardStates.factory()
obj_.build(child_)
self.states.append(obj_)
elif nodeName_ == 'char':
obj_ = StandardChar.factory()
obj_.build(child_)
self.char.append(obj_)
elif nodeName_ == 'set':
obj_ = CharSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class StandardFormat
class RNAObsMatrix(AbstractObsMatrix):
"""A matrix of rows with single character observations."""
subclass = None
superclass = AbstractObsMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(RNAObsMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RNAObsMatrix.subclass:
return RNAObsMatrix.subclass(*args_, **kwargs_)
else:
return RNAObsMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RNAObsMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAObsMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAObsMatrix'):
super(RNAObsMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAObsMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='RNAObsMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(RNAObsMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAObsMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RNAObsMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAObsMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.RNAMatrixObsRow(\n')
row_.exportLiteral(outfile, level, name_='RNAMatrixObsRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RNAObsMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = RNAMatrixObsRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RNAObsMatrix
class RNASeqMatrix(AbstractSeqMatrix):
"""A matrix of rows with seq strings of type RNA."""
subclass = None
superclass = AbstractSeqMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(RNASeqMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RNASeqMatrix.subclass:
return RNASeqMatrix.subclass(*args_, **kwargs_)
else:
return RNASeqMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RNASeqMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNASeqMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNASeqMatrix'):
super(RNASeqMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNASeqMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='RNASeqMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(RNASeqMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNASeqMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RNASeqMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNASeqMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.RNAMatrixSeqRow(\n')
row_.exportLiteral(outfile, level, name_='RNAMatrixSeqRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RNASeqMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = RNAMatrixSeqRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RNASeqMatrix
class RNAFormat(AbstractFormat):
"""The RNAFormat class is the container of RNA column definitions."""
subclass = None
superclass = AbstractFormat
def __init__(self, about=None, meta=None, states=None, char=None, set=None, valueOf_=None):
super(RNAFormat, self).__init__(about, meta, states, char, set, )
if states is None:
self.states = []
else:
self.states = states
if char is None:
self.char = []
else:
self.char = char
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RNAFormat.subclass:
return RNAFormat.subclass(*args_, **kwargs_)
else:
return RNAFormat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_states(self): return self.states
def set_states(self, states): self.states = states
def add_states(self, value): self.states.append(value)
def insert_states(self, index, value): self.states[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def add_char(self, value): self.char.append(value)
def insert_char(self, index, value): self.char[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RNAFormat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAFormat')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAFormat'):
super(RNAFormat, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAFormat')
def exportChildren(self, outfile, level, namespace_='', name_='RNAFormat', fromsubclass_=False):
for states_ in self.states:
states_.export(outfile, level, namespace_, name_='states')
for char_ in self.char:
char_.export(outfile, level, namespace_, name_='char')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.states or
self.char or
self.set or
super(RNAFormat, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAFormat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RNAFormat, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAFormat, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('states=[\n')
level += 1
for states_ in self.states:
showIndent(outfile, level)
outfile.write('model_.RNAStates(\n')
states_.exportLiteral(outfile, level, name_='RNAStates')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('char=[\n')
level += 1
for char_ in self.char:
showIndent(outfile, level)
outfile.write('model_.RNAChar(\n')
char_.exportLiteral(outfile, level, name_='RNAChar')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CharSet(\n')
set_.exportLiteral(outfile, level, name_='CharSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RNAFormat, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'states':
obj_ = RNAStates.factory()
obj_.build(child_)
self.states.append(obj_)
elif nodeName_ == 'char':
obj_ = RNAChar.factory()
obj_.build(child_)
self.char.append(obj_)
elif nodeName_ == 'set':
obj_ = CharSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RNAFormat
class RestrictionObsMatrix(AbstractObsMatrix):
"""A matrix of rows with single character observations."""
subclass = None
superclass = AbstractObsMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(RestrictionObsMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RestrictionObsMatrix.subclass:
return RestrictionObsMatrix.subclass(*args_, **kwargs_)
else:
return RestrictionObsMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RestrictionObsMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionObsMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionObsMatrix'):
super(RestrictionObsMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionObsMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionObsMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(RestrictionObsMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionObsMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionObsMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionObsMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.RestrictionMatrixObsRow(\n')
row_.exportLiteral(outfile, level, name_='RestrictionMatrixObsRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionObsMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = RestrictionMatrixObsRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RestrictionObsMatrix
class RestrictionSeqMatrix(AbstractSeqMatrix):
"""A matrix of rows with seq strings of type restriction."""
subclass = None
superclass = AbstractSeqMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(RestrictionSeqMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RestrictionSeqMatrix.subclass:
return RestrictionSeqMatrix.subclass(*args_, **kwargs_)
else:
return RestrictionSeqMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RestrictionSeqMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionSeqMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionSeqMatrix'):
super(RestrictionSeqMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionSeqMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionSeqMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(RestrictionSeqMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionSeqMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionSeqMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionSeqMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.RestrictionMatrixSeqRow(\n')
row_.exportLiteral(outfile, level, name_='RestrictionMatrixSeqRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionSeqMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = RestrictionMatrixSeqRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RestrictionSeqMatrix
class RestrictionFormat(AbstractFormat):
"""The RestrictionFormat class is the container of restriction column
definitions."""
subclass = None
superclass = AbstractFormat
def __init__(self, about=None, meta=None, states=None, char=None, set=None, valueOf_=None):
super(RestrictionFormat, self).__init__(about, meta, states, char, set, )
if states is None:
self.states = []
else:
self.states = states
if char is None:
self.char = []
else:
self.char = char
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RestrictionFormat.subclass:
return RestrictionFormat.subclass(*args_, **kwargs_)
else:
return RestrictionFormat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_states(self): return self.states
def set_states(self, states): self.states = states
def add_states(self, value): self.states.append(value)
def insert_states(self, index, value): self.states[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def add_char(self, value): self.char.append(value)
def insert_char(self, index, value): self.char[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RestrictionFormat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionFormat')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionFormat'):
super(RestrictionFormat, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionFormat')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionFormat', fromsubclass_=False):
for states_ in self.states:
states_.export(outfile, level, namespace_, name_='states')
for char_ in self.char:
char_.export(outfile, level, namespace_, name_='char')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.states or
self.char or
self.set or
super(RestrictionFormat, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionFormat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionFormat, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionFormat, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('states=[\n')
level += 1
for states_ in self.states:
showIndent(outfile, level)
outfile.write('model_.RestrictionStates(\n')
states_.exportLiteral(outfile, level, name_='RestrictionStates')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('char=[\n')
level += 1
for char_ in self.char:
showIndent(outfile, level)
outfile.write('model_.RestrictionChar(\n')
char_.exportLiteral(outfile, level, name_='RestrictionChar')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CharSet(\n')
set_.exportLiteral(outfile, level, name_='CharSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionFormat, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'states':
obj_ = RestrictionStates.factory()
obj_.build(child_)
self.states.append(obj_)
elif nodeName_ == 'char':
obj_ = RestrictionChar.factory()
obj_.build(child_)
self.char.append(obj_)
elif nodeName_ == 'set':
obj_ = CharSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RestrictionFormat
class AAObsMatrix(AbstractObsMatrix):
"""A matrix of rows with single character observations."""
subclass = None
superclass = AbstractObsMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(AAObsMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AAObsMatrix.subclass:
return AAObsMatrix.subclass(*args_, **kwargs_)
else:
return AAObsMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AAObsMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAObsMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAObsMatrix'):
super(AAObsMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAObsMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='AAObsMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(AAObsMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAObsMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AAObsMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAObsMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.AAMatrixObsRow(\n')
row_.exportLiteral(outfile, level, name_='AAMatrixObsRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AAObsMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = AAMatrixObsRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class AAObsMatrix
class AASeqMatrix(AbstractSeqMatrix):
"""A matrix of rows with amino acid data as sequence strings."""
subclass = None
superclass = AbstractSeqMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(AASeqMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AASeqMatrix.subclass:
return AASeqMatrix.subclass(*args_, **kwargs_)
else:
return AASeqMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AASeqMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AASeqMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AASeqMatrix'):
super(AASeqMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AASeqMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='AASeqMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(AASeqMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AASeqMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AASeqMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AASeqMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.AAMatrixSeqRow(\n')
row_.exportLiteral(outfile, level, name_='AAMatrixSeqRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AASeqMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = AAMatrixSeqRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class AASeqMatrix
class AAFormat(AbstractFormat):
"""The AAFormat class is the container of amino acid column
definitions."""
subclass = None
superclass = AbstractFormat
def __init__(self, about=None, meta=None, states=None, char=None, set=None, valueOf_=None):
super(AAFormat, self).__init__(about, meta, states, char, set, )
if states is None:
self.states = []
else:
self.states = states
if char is None:
self.char = []
else:
self.char = char
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AAFormat.subclass:
return AAFormat.subclass(*args_, **kwargs_)
else:
return AAFormat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_states(self): return self.states
def set_states(self, states): self.states = states
def add_states(self, value): self.states.append(value)
def insert_states(self, index, value): self.states[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def add_char(self, value): self.char.append(value)
def insert_char(self, index, value): self.char[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AAFormat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAFormat')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAFormat'):
super(AAFormat, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAFormat')
def exportChildren(self, outfile, level, namespace_='', name_='AAFormat', fromsubclass_=False):
for states_ in self.states:
states_.export(outfile, level, namespace_, name_='states')
for char_ in self.char:
char_.export(outfile, level, namespace_, name_='char')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.states or
self.char or
self.set or
super(AAFormat, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAFormat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AAFormat, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAFormat, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('states=[\n')
level += 1
for states_ in self.states:
showIndent(outfile, level)
outfile.write('model_.AAStates(\n')
states_.exportLiteral(outfile, level, name_='AAStates')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('char=[\n')
level += 1
for char_ in self.char:
showIndent(outfile, level)
outfile.write('model_.AAChar(\n')
char_.exportLiteral(outfile, level, name_='AAChar')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CharSet(\n')
set_.exportLiteral(outfile, level, name_='CharSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AAFormat, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'states':
obj_ = AAStates.factory()
obj_.build(child_)
self.states.append(obj_)
elif nodeName_ == 'char':
obj_ = AAChar.factory()
obj_.build(child_)
self.char.append(obj_)
elif nodeName_ == 'set':
obj_ = CharSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class AAFormat
class DNAObsMatrix(AbstractObsMatrix):
"""A matrix of rows with single character observations."""
subclass = None
superclass = AbstractObsMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(DNAObsMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if DNAObsMatrix.subclass:
return DNAObsMatrix.subclass(*args_, **kwargs_)
else:
return DNAObsMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='DNAObsMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAObsMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAObsMatrix'):
super(DNAObsMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAObsMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='DNAObsMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(DNAObsMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAObsMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DNAObsMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAObsMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.DNAMatrixObsRow(\n')
row_.exportLiteral(outfile, level, name_='DNAMatrixObsRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DNAObsMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = DNAMatrixObsRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class DNAObsMatrix
class DNASeqMatrix(AbstractSeqMatrix):
"""A matrix of rows with seq strings of type DNA."""
subclass = None
superclass = AbstractSeqMatrix
def __init__(self, about=None, meta=None, row=None, set=None, valueOf_=None):
super(DNASeqMatrix, self).__init__(about, meta, row, set, )
if row is None:
self.row = []
else:
self.row = row
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if DNASeqMatrix.subclass:
return DNASeqMatrix.subclass(*args_, **kwargs_)
else:
return DNASeqMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='DNASeqMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNASeqMatrix')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNASeqMatrix'):
super(DNASeqMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNASeqMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='DNASeqMatrix', fromsubclass_=False):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.row or
self.set or
super(DNASeqMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNASeqMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DNASeqMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNASeqMatrix, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row_ in self.row:
showIndent(outfile, level)
outfile.write('model_.DNAMatrixSeqRow(\n')
row_.exportLiteral(outfile, level, name_='DNAMatrixSeqRow')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.RowSet(\n')
set_.exportLiteral(outfile, level, name_='RowSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DNASeqMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'row':
obj_ = DNAMatrixSeqRow.factory()
obj_.build(child_)
self.row.append(obj_)
elif nodeName_ == 'set':
obj_ = RowSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class DNASeqMatrix
class DNAFormat(AbstractFormat):
"""The DNAFormat class is the container of DNA column definitions."""
subclass = None
superclass = AbstractFormat
def __init__(self, about=None, meta=None, states=None, char=None, set=None, valueOf_=None):
super(DNAFormat, self).__init__(about, meta, states, char, set, )
if states is None:
self.states = []
else:
self.states = states
if char is None:
self.char = []
else:
self.char = char
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if DNAFormat.subclass:
return DNAFormat.subclass(*args_, **kwargs_)
else:
return DNAFormat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_states(self): return self.states
def set_states(self, states): self.states = states
def add_states(self, value): self.states.append(value)
def insert_states(self, index, value): self.states[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def add_char(self, value): self.char.append(value)
def insert_char(self, index, value): self.char[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='DNAFormat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAFormat')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAFormat'):
super(DNAFormat, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAFormat')
def exportChildren(self, outfile, level, namespace_='', name_='DNAFormat', fromsubclass_=False):
for states_ in self.states:
states_.export(outfile, level, namespace_, name_='states')
for char_ in self.char:
char_.export(outfile, level, namespace_, name_='char')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.states or
self.char or
self.set or
super(DNAFormat, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAFormat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DNAFormat, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAFormat, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('states=[\n')
level += 1
for states_ in self.states:
showIndent(outfile, level)
outfile.write('model_.DNAStates(\n')
states_.exportLiteral(outfile, level, name_='DNAStates')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('char=[\n')
level += 1
for char_ in self.char:
showIndent(outfile, level)
outfile.write('model_.DNAChar(\n')
char_.exportLiteral(outfile, level, name_='DNAChar')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CharSet(\n')
set_.exportLiteral(outfile, level, name_='CharSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DNAFormat, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'states':
obj_ = DNAStates.factory()
obj_.build(child_)
self.states.append(obj_)
elif nodeName_ == 'char':
obj_ = DNAChar.factory()
obj_.build(child_)
self.char.append(obj_)
elif nodeName_ == 'set':
obj_ = CharSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class DNAFormat
class AbstractObs(Labelled):
"""The AbstractObs type is the superclass for single observations, i.e.
cells in a matrix. A concrete instance of AbstractObs has a
"char" attribute that refers to an explicitly defined character
(e.g. in categorical matrices), and a "state" attribute that
either holds a reference to an explicitly defined state, or a
raw state value (a continuous value)."""
subclass = None
superclass = Labelled
def __init__(self, about=None, meta=None, label=None, char=None, state=None, valueOf_=None):
super(AbstractObs, self).__init__(about, meta, label, )
self.char = _cast(None, char)
self.state = _cast(None, state)
pass
def factory(*args_, **kwargs_):
if AbstractObs.subclass:
return AbstractObs.subclass(*args_, **kwargs_)
else:
return AbstractObs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_char(self): return self.char
def set_char(self, char): self.char = char
def get_state(self): return self.state
def set_state(self, state): self.state = state
def export(self, outfile, level, namespace_='', name_='AbstractObs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractObs')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractObs"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractObs'):
super(AbstractObs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractObs')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (quote_attrib(self.char), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (quote_attrib(self.state), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractObs', fromsubclass_=False):
super(AbstractObs, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractObs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractObs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = %s,\n' % (self.char,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = %s,\n' % (self.state,))
super(AbstractObs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractObs, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
super(AbstractObs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractObs, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractObs
class ContinuousObs(AbstractObs):
"""This is a single cell in a matrix containing a continuous
observation."""
subclass = None
superclass = AbstractObs
def __init__(self, about=None, meta=None, label=None, char=None, state=None, valueOf_=None):
super(ContinuousObs, self).__init__(about, meta, label, char, state, )
self.char = _cast(None, char)
self.state = _cast(None, state)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if ContinuousObs.subclass:
return ContinuousObs.subclass(*args_, **kwargs_)
else:
return ContinuousObs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def get_state(self): return self.state
def set_state(self, state): self.state = state
def validate_ContinuousToken(self, value):
# Validate type ContinuousToken, a restriction on xs:double.
pass
def export(self, outfile, level, namespace_='', name_='ContinuousObs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousObs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousObs'):
super(ContinuousObs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousObs')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (self.gds_format_string(quote_attrib(self.char).encode(ExternalEncoding), input_name='char'), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (quote_attrib(self.state), ))
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousObs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(ContinuousObs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousObs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = %e,\n' % (self.state,))
super(ContinuousObs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousObs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
try:
self.state = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (state): %s' % exp)
self.validate_ContinuousToken(self.state) # validate type ContinuousToken
super(ContinuousObs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class ContinuousObs
class IDTagged(Labelled):
"""The IDTagged complexType is a super class for objects that require
unique id attributes of type xs:ID. The id must be unique within
the XML document."""
subclass = None
superclass = Labelled
def __init__(self, about=None, meta=None, label=None, id=None, valueOf_=None):
super(IDTagged, self).__init__(about, meta, label, )
self.id = _cast(None, id)
pass
def factory(*args_, **kwargs_):
if IDTagged.subclass:
return IDTagged.subclass(*args_, **kwargs_)
else:
return IDTagged(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='IDTagged', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IDTagged')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="IDTagged"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IDTagged'):
super(IDTagged, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IDTagged')
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='IDTagged', fromsubclass_=False):
super(IDTagged, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(IDTagged, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='IDTagged'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
super(IDTagged, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IDTagged, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
super(IDTagged, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IDTagged, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IDTagged
class Taxa(IDTagged):
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, set=None, valueOf_=None):
super(Taxa, self).__init__(about, meta, label, id, )
if otu is None:
self.otu = []
else:
self.otu = otu
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if Taxa.subclass:
return Taxa.subclass(*args_, **kwargs_)
else:
return Taxa(*args_, **kwargs_)
factory = staticmethod(factory)
def get_otu(self): return self.otu
def set_otu(self, otu): self.otu = otu
def add_otu(self, value): self.otu.append(value)
def insert_otu(self, index, value): self.otu[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='Taxa', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Taxa')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="Taxa"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Taxa'):
super(Taxa, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Taxa')
def exportChildren(self, outfile, level, namespace_='', name_='Taxa', fromsubclass_=False):
super(Taxa, self).exportChildren(outfile, level, namespace_, name_, True)
for otu_ in self.otu:
otu_.export(outfile, level, namespace_, name_='otu')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.otu or
self.set or
super(Taxa, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Taxa'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Taxa, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Taxa, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('otu=[\n')
level += 1
for otu_ in self.otu:
showIndent(outfile, level)
outfile.write('model_.Taxon(\n')
otu_.exportLiteral(outfile, level, name_='Taxon')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.TaxonSet(\n')
set_.exportLiteral(outfile, level, name_='TaxonSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Taxa, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'otu':
obj_ = Taxon.factory()
obj_.build(child_)
self.otu.append(obj_)
elif nodeName_ == 'set':
obj_ = TaxonSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(Taxa, self).buildChildren(child_, node, nodeName_, True)
# end class Taxa
class Taxon(IDTagged):
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, valueOf_=None):
super(Taxon, self).__init__(about, meta, label, id, )
pass
def factory(*args_, **kwargs_):
if Taxon.subclass:
return Taxon.subclass(*args_, **kwargs_)
else:
return Taxon(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='Taxon', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Taxon')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="Taxon"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Taxon'):
super(Taxon, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Taxon')
def exportChildren(self, outfile, level, namespace_='', name_='Taxon', fromsubclass_=False):
super(Taxon, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(Taxon, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Taxon'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Taxon, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Taxon, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Taxon, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Taxon, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Taxon
class AbstractTrees(IDTagged):
"""The AbstractTrees superclass is what concrete trees inherit from."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, network=None, tree=None, set=None, valueOf_=None):
super(AbstractTrees, self).__init__(about, meta, label, id, )
if network is None:
self.network = []
else:
self.network = network
if tree is None:
self.tree = []
else:
self.tree = tree
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractTrees.subclass:
return AbstractTrees.subclass(*args_, **kwargs_)
else:
return AbstractTrees(*args_, **kwargs_)
factory = staticmethod(factory)
def get_network(self): return self.network
def set_network(self, network): self.network = network
def add_network(self, value): self.network.append(value)
def insert_network(self, index, value): self.network[index] = value
def get_tree(self): return self.tree
def set_tree(self, tree): self.tree = tree
def add_tree(self, value): self.tree.append(value)
def insert_tree(self, index, value): self.tree[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractTrees', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractTrees')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractTrees"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractTrees'):
super(AbstractTrees, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractTrees')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractTrees', fromsubclass_=False):
super(AbstractTrees, self).exportChildren(outfile, level, namespace_, name_, True)
for network_ in self.get_network():
network_.export(outfile, level, namespace_, name_='network')
for tree_ in self.get_tree():
tree_.export(outfile, level, namespace_, name_='tree')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.network or
self.tree or
self.set or
super(AbstractTrees, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractTrees'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractTrees, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractTrees, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('network=[\n')
level += 1
for network_ in self.network:
showIndent(outfile, level)
outfile.write('model_.AbstractNetwork(\n')
network_.exportLiteral(outfile, level, name_='AbstractNetwork')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('tree=[\n')
level += 1
for tree_ in self.tree:
showIndent(outfile, level)
outfile.write('model_.AbstractTree(\n')
tree_.exportLiteral(outfile, level, name_='AbstractTree')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.TreeAndNetworkSet(\n')
set_.exportLiteral(outfile, level, name_='TreeAndNetworkSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractTrees, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'network':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <network> element')
self.network.append(obj_)
elif nodeName_ == 'tree':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <tree> element')
self.tree.append(obj_)
elif nodeName_ == 'set':
obj_ = TreeAndNetworkSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractTrees, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractTrees
class AbstractNetwork(IDTagged):
"""The AbstractNetwork superclass is what a concrete network inherits
from."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, node=None, edge=None, set=None, valueOf_=None):
super(AbstractNetwork, self).__init__(about, meta, label, id, )
if node is None:
self.node = []
else:
self.node = node
if edge is None:
self.edge = []
else:
self.edge = edge
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractNetwork.subclass:
return AbstractNetwork.subclass(*args_, **kwargs_)
else:
return AbstractNetwork(*args_, **kwargs_)
factory = staticmethod(factory)
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def add_edge(self, value): self.edge.append(value)
def insert_edge(self, index, value): self.edge[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractNetwork', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractNetwork')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractNetwork"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractNetwork'):
super(AbstractNetwork, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractNetwork')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractNetwork', fromsubclass_=False):
super(AbstractNetwork, self).exportChildren(outfile, level, namespace_, name_, True)
for node_ in self.get_node():
node_.export(outfile, level, namespace_, name_='node')
for edge_ in self.get_edge():
edge_.export(outfile, level, namespace_, name_='edge')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.node or
self.edge or
self.set or
super(AbstractNetwork, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractNetwork'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractNetwork, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractNetwork, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('node=[\n')
level += 1
for node_ in self.node:
showIndent(outfile, level)
outfile.write('model_.AbstractNode(\n')
node_.exportLiteral(outfile, level, name_='AbstractNode')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('edge=[\n')
level += 1
for edge_ in self.edge:
showIndent(outfile, level)
outfile.write('model_.AbstractEdge(\n')
edge_.exportLiteral(outfile, level, name_='AbstractEdge')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.NodeAndRootEdgeAndEdgeSet(\n')
set_.exportLiteral(outfile, level, name_='NodeAndRootEdgeAndEdgeSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractNetwork, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'node':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <node> element')
self.node.append(obj_)
elif nodeName_ == 'edge':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <edge> element')
self.edge.append(obj_)
elif nodeName_ == 'set':
obj_ = NodeAndRootEdgeAndEdgeSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractNetwork, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractNetwork
class AbstractTree(IDTagged):
"""The AbstractTree superclass is what a concrete tree inherits from."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, node=None, rootedge=None, edge=None, set=None, valueOf_=None):
super(AbstractTree, self).__init__(about, meta, label, id, )
if node is None:
self.node = []
else:
self.node = node
self.rootedge = rootedge
if edge is None:
self.edge = []
else:
self.edge = edge
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractTree.subclass:
return AbstractTree.subclass(*args_, **kwargs_)
else:
return AbstractTree(*args_, **kwargs_)
factory = staticmethod(factory)
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def get_rootedge(self): return self.rootedge
def set_rootedge(self, rootedge): self.rootedge = rootedge
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def add_edge(self, value): self.edge.append(value)
def insert_edge(self, index, value): self.edge[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractTree', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractTree')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractTree"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractTree'):
super(AbstractTree, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractTree')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractTree', fromsubclass_=False):
super(AbstractTree, self).exportChildren(outfile, level, namespace_, name_, True)
for node_ in self.get_node():
node_.export(outfile, level, namespace_, name_='node')
rootedge_.export(outfile, level, namespace_, name_='rootedge')
for edge_ in self.get_edge():
edge_.export(outfile, level, namespace_, name_='edge')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.node or
self.rootedge is not None or
self.edge or
self.set or
super(AbstractTree, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractTree'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractTree, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractTree, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('node=[\n')
level += 1
for node_ in self.node:
showIndent(outfile, level)
outfile.write('model_.AbstractNode(\n')
node_.exportLiteral(outfile, level, name_='AbstractNode')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.AbstractRootEdge is not None:
showIndent(outfile, level)
outfile.write('AbstractRootEdge=model_.AbstractRootEdge(\n')
self.AbstractRootEdge.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('edge=[\n')
level += 1
for edge_ in self.edge:
showIndent(outfile, level)
outfile.write('model_.AbstractEdge(\n')
edge_.exportLiteral(outfile, level, name_='AbstractEdge')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.NodeAndRootEdgeAndEdgeSet(\n')
set_.exportLiteral(outfile, level, name_='NodeAndRootEdgeAndEdgeSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractTree, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'node':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <node> element')
self.node.append(obj_)
elif nodeName_ == 'rootedge':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <rootedge> element')
self.set_rootedge(obj_)
elif nodeName_ == 'edge':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <edge> element')
self.edge.append(obj_)
elif nodeName_ == 'set':
obj_ = NodeAndRootEdgeAndEdgeSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractTree, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractTree
class AbstractRootEdge(IDTagged):
"""The AbstractRootEdge complex type is a superclass for the edge that
leads into a root, i.e. an edge with only a target attribute,
but no source attribute. This type of edge is used for
coalescent trees, where the initial lineage has a certain length
before things start splitting up."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, length=None, target=None, valueOf_=None):
super(AbstractRootEdge, self).__init__(about, meta, label, id, )
self.length = _cast(None, length)
self.target = _cast(None, target)
pass
def factory(*args_, **kwargs_):
if AbstractRootEdge.subclass:
return AbstractRootEdge.subclass(*args_, **kwargs_)
else:
return AbstractRootEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_length(self): return self.length
def set_length(self, length): self.length = length
def get_target(self): return self.target
def set_target(self, target): self.target = target
def export(self, outfile, level, namespace_='', name_='AbstractRootEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractRootEdge')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractRootEdge"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractRootEdge'):
super(AbstractRootEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractRootEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length=%s' % (quote_attrib(self.length), ))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
outfile.write(' target=%s' % (self.gds_format_string(quote_attrib(self.target).encode(ExternalEncoding), input_name='target'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractRootEdge', fromsubclass_=False):
super(AbstractRootEdge, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractRootEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractRootEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %s,\n' % (self.length,))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
showIndent(outfile, level)
outfile.write('target = "%s",\n' % (self.target,))
super(AbstractRootEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractRootEdge, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
self.length = value
value = find_attr_value_('target', node)
if value is not None and 'target' not in already_processed:
already_processed.append('target')
self.target = value
super(AbstractRootEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractRootEdge, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractRootEdge
class AbstractEdge(IDTagged):
"""The AbstractEdge superclass is what concrete edges inherit from by
restriction. It represents an edge element much like that of
GraphML, i.e. an element that connects node elements."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, source=None, length=None, target=None, valueOf_=None):
super(AbstractEdge, self).__init__(about, meta, label, id, )
self.source = _cast(None, source)
self.length = _cast(None, length)
self.target = _cast(None, target)
pass
def factory(*args_, **kwargs_):
if AbstractEdge.subclass:
return AbstractEdge.subclass(*args_, **kwargs_)
else:
return AbstractEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_source(self): return self.source
def set_source(self, source): self.source = source
def get_length(self): return self.length
def set_length(self, length): self.length = length
def get_target(self): return self.target
def set_target(self, target): self.target = target
def export(self, outfile, level, namespace_='', name_='AbstractEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractEdge')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractEdge"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractEdge'):
super(AbstractEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractEdge')
if self.source is not None and 'source' not in already_processed:
already_processed.append('source')
outfile.write(' source=%s' % (self.gds_format_string(quote_attrib(self.source).encode(ExternalEncoding), input_name='source'), ))
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length=%s' % (quote_attrib(self.length), ))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
outfile.write(' target=%s' % (self.gds_format_string(quote_attrib(self.target).encode(ExternalEncoding), input_name='target'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractEdge', fromsubclass_=False):
super(AbstractEdge, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.source is not None and 'source' not in already_processed:
already_processed.append('source')
showIndent(outfile, level)
outfile.write('source = "%s",\n' % (self.source,))
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %s,\n' % (self.length,))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
showIndent(outfile, level)
outfile.write('target = "%s",\n' % (self.target,))
super(AbstractEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractEdge, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('source', node)
if value is not None and 'source' not in already_processed:
already_processed.append('source')
self.source = value
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
self.length = value
value = find_attr_value_('target', node)
if value is not None and 'target' not in already_processed:
already_processed.append('target')
self.target = value
super(AbstractEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractEdge, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractEdge
class IntTree(AbstractTree):
"""A concrete tree implementation, with integer edge lengths."""
subclass = None
superclass = AbstractTree
def __init__(self, about=None, meta=None, label=None, id=None, node=None, rootedge=None, edge=None, set=None, valueOf_=None):
super(IntTree, self).__init__(about, meta, label, id, node, rootedge, edge, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if node is None:
self.node = []
else:
self.node = node
self.rootedge = rootedge
if edge is None:
self.edge = []
else:
self.edge = edge
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if IntTree.subclass:
return IntTree.subclass(*args_, **kwargs_)
else:
return IntTree(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def get_rootedge(self): return self.rootedge
def set_rootedge(self, rootedge): self.rootedge = rootedge
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def add_edge(self, value): self.edge.append(value)
def insert_edge(self, index, value): self.edge[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='IntTree', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IntTree')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IntTree'):
super(IntTree, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IntTree')
def exportChildren(self, outfile, level, namespace_='', name_='IntTree', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
if self.rootedge:
self.rootedge.export(outfile, level, namespace_, name_='rootedge')
for edge_ in self.edge:
edge_.export(outfile, level, namespace_, name_='edge')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.node or
self.rootedge is not None or
self.edge or
self.set or
super(IntTree, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='IntTree'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(IntTree, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IntTree, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('node=[\n')
level += 1
for node_ in self.node:
showIndent(outfile, level)
outfile.write('model_.TreeNode(\n')
node_.exportLiteral(outfile, level, name_='TreeNode')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.rootedge is not None:
showIndent(outfile, level)
outfile.write('rootedge=model_.TreeIntRootEdge(\n')
self.rootedge.exportLiteral(outfile, level, name_='rootedge')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('edge=[\n')
level += 1
for edge_ in self.edge:
showIndent(outfile, level)
outfile.write('model_.TreeIntEdge(\n')
edge_.exportLiteral(outfile, level, name_='TreeIntEdge')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.NodeAndRootEdgeAndEdgeSet(\n')
set_.exportLiteral(outfile, level, name_='NodeAndRootEdgeAndEdgeSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(IntTree, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'node':
obj_ = TreeNode.factory()
obj_.build(child_)
self.node.append(obj_)
elif nodeName_ == 'rootedge':
obj_ = TreeIntRootEdge.factory()
obj_.build(child_)
self.set_rootedge(obj_)
elif nodeName_ == 'edge':
obj_ = TreeIntEdge.factory()
obj_.build(child_)
self.edge.append(obj_)
elif nodeName_ == 'set':
obj_ = NodeAndRootEdgeAndEdgeSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class IntTree
class FloatTree(AbstractTree):
"""A concrete tree implementation, with floating point edge lengths."""
subclass = None
superclass = AbstractTree
def __init__(self, about=None, meta=None, label=None, id=None, node=None, rootedge=None, edge=None, set=None, valueOf_=None):
super(FloatTree, self).__init__(about, meta, label, id, node, rootedge, edge, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if node is None:
self.node = []
else:
self.node = node
self.rootedge = rootedge
if edge is None:
self.edge = []
else:
self.edge = edge
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if FloatTree.subclass:
return FloatTree.subclass(*args_, **kwargs_)
else:
return FloatTree(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def get_rootedge(self): return self.rootedge
def set_rootedge(self, rootedge): self.rootedge = rootedge
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def add_edge(self, value): self.edge.append(value)
def insert_edge(self, index, value): self.edge[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='FloatTree', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='FloatTree')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FloatTree'):
super(FloatTree, self).exportAttributes(outfile, level, already_processed, namespace_, name_='FloatTree')
def exportChildren(self, outfile, level, namespace_='', name_='FloatTree', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
if self.rootedge:
self.rootedge.export(outfile, level, namespace_, name_='rootedge')
for edge_ in self.edge:
edge_.export(outfile, level, namespace_, name_='edge')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.node or
self.rootedge is not None or
self.edge or
self.set or
super(FloatTree, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='FloatTree'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(FloatTree, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(FloatTree, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('node=[\n')
level += 1
for node_ in self.node:
showIndent(outfile, level)
outfile.write('model_.TreeNode(\n')
node_.exportLiteral(outfile, level, name_='TreeNode')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.rootedge is not None:
showIndent(outfile, level)
outfile.write('rootedge=model_.TreeFloatRootEdge(\n')
self.rootedge.exportLiteral(outfile, level, name_='rootedge')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('edge=[\n')
level += 1
for edge_ in self.edge:
showIndent(outfile, level)
outfile.write('model_.TreeFloatEdge(\n')
edge_.exportLiteral(outfile, level, name_='TreeFloatEdge')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.NodeAndRootEdgeAndEdgeSet(\n')
set_.exportLiteral(outfile, level, name_='NodeAndRootEdgeAndEdgeSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(FloatTree, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'node':
obj_ = TreeNode.factory()
obj_.build(child_)
self.node.append(obj_)
elif nodeName_ == 'rootedge':
obj_ = TreeFloatRootEdge.factory()
obj_.build(child_)
self.set_rootedge(obj_)
elif nodeName_ == 'edge':
obj_ = TreeFloatEdge.factory()
obj_.build(child_)
self.edge.append(obj_)
elif nodeName_ == 'set':
obj_ = NodeAndRootEdgeAndEdgeSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class FloatTree
class TreeIntRootEdge(AbstractRootEdge):
"""A concrete root edge implementation, with int length."""
subclass = None
superclass = AbstractRootEdge
def __init__(self, about=None, meta=None, label=None, id=None, length=None, target=None, valueOf_=None):
super(TreeIntRootEdge, self).__init__(about, meta, label, id, length, target, )
self.length = _cast(int, length)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if TreeIntRootEdge.subclass:
return TreeIntRootEdge.subclass(*args_, **kwargs_)
else:
return TreeIntRootEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_length(self): return self.length
def set_length(self, length): self.length = length
def export(self, outfile, level, namespace_='', name_='TreeIntRootEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TreeIntRootEdge')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TreeIntRootEdge'):
super(TreeIntRootEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TreeIntRootEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length="%s"' % self.gds_format_integer(self.length, input_name='length'))
def exportChildren(self, outfile, level, namespace_='', name_='TreeIntRootEdge', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(TreeIntRootEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TreeIntRootEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %d,\n' % (self.length,))
super(TreeIntRootEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TreeIntRootEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(TreeIntRootEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class TreeIntRootEdge
class TreeIntEdge(AbstractEdge):
"""A concrete edge implementation, with int length."""
subclass = None
superclass = AbstractEdge
def __init__(self, about=None, meta=None, label=None, id=None, source=None, length=None, target=None, valueOf_=None):
super(TreeIntEdge, self).__init__(about, meta, label, id, source, length, target, )
self.length = _cast(int, length)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if TreeIntEdge.subclass:
return TreeIntEdge.subclass(*args_, **kwargs_)
else:
return TreeIntEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_length(self): return self.length
def set_length(self, length): self.length = length
def export(self, outfile, level, namespace_='', name_='TreeIntEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TreeIntEdge')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TreeIntEdge'):
super(TreeIntEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TreeIntEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length="%s"' % self.gds_format_integer(self.length, input_name='length'))
def exportChildren(self, outfile, level, namespace_='', name_='TreeIntEdge', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(TreeIntEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TreeIntEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %d,\n' % (self.length,))
super(TreeIntEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TreeIntEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(TreeIntEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class TreeIntEdge
class TreeFloatRootEdge(AbstractRootEdge):
"""A concrete root edge implementation, with float length."""
subclass = None
superclass = AbstractRootEdge
def __init__(self, about=None, meta=None, label=None, id=None, length=None, target=None, valueOf_=None):
super(TreeFloatRootEdge, self).__init__(about, meta, label, id, length, target, )
self.length = _cast(float, length)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if TreeFloatRootEdge.subclass:
return TreeFloatRootEdge.subclass(*args_, **kwargs_)
else:
return TreeFloatRootEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_length(self): return self.length
def set_length(self, length): self.length = length
def export(self, outfile, level, namespace_='', name_='TreeFloatRootEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TreeFloatRootEdge')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TreeFloatRootEdge'):
super(TreeFloatRootEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TreeFloatRootEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length="%s"' % self.gds_format_double(self.length, input_name='length'))
def exportChildren(self, outfile, level, namespace_='', name_='TreeFloatRootEdge', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(TreeFloatRootEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TreeFloatRootEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %e,\n' % (self.length,))
super(TreeFloatRootEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TreeFloatRootEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (length): %s' % exp)
super(TreeFloatRootEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class TreeFloatRootEdge
class TreeFloatEdge(AbstractEdge):
"""A concrete edge implementation, with float length."""
subclass = None
superclass = AbstractEdge
def __init__(self, about=None, meta=None, label=None, id=None, source=None, length=None, target=None, valueOf_=None):
super(TreeFloatEdge, self).__init__(about, meta, label, id, source, length, target, )
self.length = _cast(float, length)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if TreeFloatEdge.subclass:
return TreeFloatEdge.subclass(*args_, **kwargs_)
else:
return TreeFloatEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_length(self): return self.length
def set_length(self, length): self.length = length
def export(self, outfile, level, namespace_='', name_='TreeFloatEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TreeFloatEdge')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TreeFloatEdge'):
super(TreeFloatEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TreeFloatEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length="%s"' % self.gds_format_double(self.length, input_name='length'))
def exportChildren(self, outfile, level, namespace_='', name_='TreeFloatEdge', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(TreeFloatEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TreeFloatEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %e,\n' % (self.length,))
super(TreeFloatEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TreeFloatEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (length): %s' % exp)
super(TreeFloatEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class TreeFloatEdge
class StandardObs(AbstractObs):
"""This is a single cell in a matrix containing a standard observation."""
subclass = None
superclass = AbstractObs
def __init__(self, about=None, meta=None, label=None, char=None, state=None, valueOf_=None):
super(StandardObs, self).__init__(about, meta, label, char, state, )
self.char = _cast(None, char)
self.state = _cast(None, state)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if StandardObs.subclass:
return StandardObs.subclass(*args_, **kwargs_)
else:
return StandardObs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def get_state(self): return self.state
def set_state(self, state): self.state = state
def export(self, outfile, level, namespace_='', name_='StandardObs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardObs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardObs'):
super(StandardObs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardObs')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (self.gds_format_string(quote_attrib(self.char).encode(ExternalEncoding), input_name='char'), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (self.gds_format_string(quote_attrib(self.state).encode(ExternalEncoding), input_name='state'), ))
def exportChildren(self, outfile, level, namespace_='', name_='StandardObs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(StandardObs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardObs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = "%s",\n' % (self.state,))
super(StandardObs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardObs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
super(StandardObs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class StandardObs
class RNAObs(AbstractObs):
"""This is a single cell in a matrix containing an RNA nucleotide
observation."""
subclass = None
superclass = AbstractObs
def __init__(self, about=None, meta=None, label=None, char=None, state=None, valueOf_=None):
super(RNAObs, self).__init__(about, meta, label, char, state, )
self.char = _cast(None, char)
self.state = _cast(None, state)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if RNAObs.subclass:
return RNAObs.subclass(*args_, **kwargs_)
else:
return RNAObs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def get_state(self): return self.state
def set_state(self, state): self.state = state
def export(self, outfile, level, namespace_='', name_='RNAObs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAObs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAObs'):
super(RNAObs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAObs')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (self.gds_format_string(quote_attrib(self.char).encode(ExternalEncoding), input_name='char'), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (self.gds_format_string(quote_attrib(self.state).encode(ExternalEncoding), input_name='state'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RNAObs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(RNAObs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAObs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = "%s",\n' % (self.state,))
super(RNAObs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAObs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
super(RNAObs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class RNAObs
class RestrictionObs(AbstractObs):
"""This is a single cell in a matrix containing a restriction site
observation."""
subclass = None
superclass = AbstractObs
def __init__(self, about=None, meta=None, label=None, char=None, state=None, valueOf_=None):
super(RestrictionObs, self).__init__(about, meta, label, char, state, )
self.char = _cast(None, char)
self.state = _cast(None, state)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if RestrictionObs.subclass:
return RestrictionObs.subclass(*args_, **kwargs_)
else:
return RestrictionObs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def get_state(self): return self.state
def set_state(self, state): self.state = state
def export(self, outfile, level, namespace_='', name_='RestrictionObs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionObs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionObs'):
super(RestrictionObs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionObs')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (self.gds_format_string(quote_attrib(self.char).encode(ExternalEncoding), input_name='char'), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (self.gds_format_string(quote_attrib(self.state).encode(ExternalEncoding), input_name='state'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionObs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(RestrictionObs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionObs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = "%s",\n' % (self.state,))
super(RestrictionObs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionObs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
super(RestrictionObs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class RestrictionObs
class AAObs(AbstractObs):
"""This is a single cell in a matrix containing an amino acid
observation."""
subclass = None
superclass = AbstractObs
def __init__(self, about=None, meta=None, label=None, char=None, state=None, valueOf_=None):
super(AAObs, self).__init__(about, meta, label, char, state, )
self.char = _cast(None, char)
self.state = _cast(None, state)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if AAObs.subclass:
return AAObs.subclass(*args_, **kwargs_)
else:
return AAObs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def get_state(self): return self.state
def set_state(self, state): self.state = state
def export(self, outfile, level, namespace_='', name_='AAObs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAObs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAObs'):
super(AAObs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAObs')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (self.gds_format_string(quote_attrib(self.char).encode(ExternalEncoding), input_name='char'), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (self.gds_format_string(quote_attrib(self.state).encode(ExternalEncoding), input_name='state'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AAObs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(AAObs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAObs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = "%s",\n' % (self.state,))
super(AAObs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAObs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
super(AAObs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class AAObs
class DNAObs(AbstractObs):
"""This is a single cell in a matrix containing a nucleotide
observation."""
subclass = None
superclass = AbstractObs
def __init__(self, about=None, meta=None, label=None, char=None, state=None, valueOf_=None):
super(DNAObs, self).__init__(about, meta, label, char, state, )
self.char = _cast(None, char)
self.state = _cast(None, state)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if DNAObs.subclass:
return DNAObs.subclass(*args_, **kwargs_)
else:
return DNAObs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_char(self): return self.char
def set_char(self, char): self.char = char
def get_state(self): return self.state
def set_state(self, state): self.state = state
def export(self, outfile, level, namespace_='', name_='DNAObs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAObs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAObs'):
super(DNAObs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAObs')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (self.gds_format_string(quote_attrib(self.char).encode(ExternalEncoding), input_name='char'), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (self.gds_format_string(quote_attrib(self.state).encode(ExternalEncoding), input_name='state'), ))
def exportChildren(self, outfile, level, namespace_='', name_='DNAObs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(DNAObs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAObs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = "%s",\n' % (self.state,))
super(DNAObs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAObs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
super(DNAObs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class DNAObs
class AbstractChar(IDTagged):
"""The AbstractChar type is the superclass for a column definition,
which may have a "states" attribute that refers to an
AbstractStates element, a codon attribute of type CodonPosition
and an id attribute that may be an actual id (e.g. for
categorical matrices where observations explicitly refer to a
column definition) or an integer for sequence matrices."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(AbstractChar, self).__init__(about, meta, label, id, )
self.tokens = _cast(None, tokens)
self.states = _cast(None, states)
self.codon = _cast(None, codon)
pass
def factory(*args_, **kwargs_):
if AbstractChar.subclass:
return AbstractChar.subclass(*args_, **kwargs_)
else:
return AbstractChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def export(self, outfile, level, namespace_='', name_='AbstractChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractChar')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractChar"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractChar'):
super(AbstractChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractChar')
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractChar', fromsubclass_=False):
super(AbstractChar, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % (self.codon,))
super(AbstractChar, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractChar, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
super(AbstractChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractChar, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractChar
class AbstractStates(IDTagged):
"""A container for a set of AbstractState elements."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, state=None, polymorphic_state_set=None, uncertain_state_set=None, set=None, valueOf_=None):
super(AbstractStates, self).__init__(about, meta, label, id, )
if state is None:
self.state = []
else:
self.state = state
if polymorphic_state_set is None:
self.polymorphic_state_set = []
else:
self.polymorphic_state_set = polymorphic_state_set
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractStates.subclass:
return AbstractStates.subclass(*args_, **kwargs_)
else:
return AbstractStates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_state(self): return self.state
def set_state(self, state): self.state = state
def add_state(self, value): self.state.append(value)
def insert_state(self, index, value): self.state[index] = value
def get_polymorphic_state_set(self): return self.polymorphic_state_set
def set_polymorphic_state_set(self, polymorphic_state_set): self.polymorphic_state_set = polymorphic_state_set
def add_polymorphic_state_set(self, value): self.polymorphic_state_set.append(value)
def insert_polymorphic_state_set(self, index, value): self.polymorphic_state_set[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractStates')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractStates"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractStates'):
super(AbstractStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractStates')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractStates', fromsubclass_=False):
super(AbstractStates, self).exportChildren(outfile, level, namespace_, name_, True)
for state_ in self.get_state():
state_.export(outfile, level, namespace_, name_='state')
for polymorphic_state_set_ in self.get_polymorphic_state_set():
polymorphic_state_set_.export(outfile, level, namespace_, name_='polymorphic_state_set')
for uncertain_state_set_ in self.get_uncertain_state_set():
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.state or
self.polymorphic_state_set or
self.uncertain_state_set or
self.set or
super(AbstractStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.AbstractState(\n')
state_.exportLiteral(outfile, level, name_='AbstractState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('polymorphic_state_set=[\n')
level += 1
for polymorphic_state_set_ in self.polymorphic_state_set:
showIndent(outfile, level)
outfile.write('model_.AbstractPolymorphicStateSet(\n')
polymorphic_state_set_.exportLiteral(outfile, level, name_='AbstractPolymorphicStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.AbstractUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='AbstractUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'state':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <state> element')
self.state.append(obj_)
elif nodeName_ == 'polymorphic_state_set':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <polymorphic_state_set> element')
self.polymorphic_state_set.append(obj_)
elif nodeName_ == 'uncertain_state_set':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <uncertain_state_set> element')
self.uncertain_state_set.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractStates, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractStates
class AbstractState(IDTagged):
"""The AbstractState type is the super-class for a state definition.
The element has a required symbol attribute that in restricted
concrete subclasses must be of a sensible type such as a single
IUPAC character. It may enclose zero or more AbstractMapping
elements to resolve ambiguities."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(AbstractState, self).__init__(about, meta, label, id, )
self.symbol = _cast(None, symbol)
pass
def factory(*args_, **kwargs_):
if AbstractState.subclass:
return AbstractState.subclass(*args_, **kwargs_)
else:
return AbstractState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def export(self, outfile, level, namespace_='', name_='AbstractState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractState')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractState"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractState'):
super(AbstractState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractState', fromsubclass_=False):
super(AbstractState, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = %s,\n' % (self.symbol,))
super(AbstractState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractState, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
super(AbstractState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractState, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractState
class ContinuousChar(AbstractChar):
"""A concrete implementation of the char element, which requires only
an id attribute."""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(ContinuousChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.states = _cast(None, states)
self.tokens = _cast(None, tokens)
self.codon = _cast(None, codon)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if ContinuousChar.subclass:
return ContinuousChar.subclass(*args_, **kwargs_)
else:
return ContinuousChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def export(self, outfile, level, namespace_='', name_='ContinuousChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousChar')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousChar'):
super(ContinuousChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousChar')
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousChar', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(ContinuousChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % (self.codon,))
super(ContinuousChar, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousChar, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
super(ContinuousChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class ContinuousChar
class AbstractSet(IDTagged):
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, valueOf_=None):
super(AbstractSet, self).__init__(about, meta, label, id, )
pass
def factory(*args_, **kwargs_):
if AbstractSet.subclass:
return AbstractSet.subclass(*args_, **kwargs_)
else:
return AbstractSet(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='AbstractSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractSet'):
super(AbstractSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSet')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractSet', fromsubclass_=False):
super(AbstractSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractSet
class TaxaLinked(IDTagged):
"""The TaxaLinked complexType is a super class for objects that that
require an otus id reference."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, valueOf_=None):
super(TaxaLinked, self).__init__(about, meta, label, id, )
self.otus = _cast(None, otus)
pass
def factory(*args_, **kwargs_):
if TaxaLinked.subclass:
return TaxaLinked.subclass(*args_, **kwargs_)
else:
return TaxaLinked(*args_, **kwargs_)
factory = staticmethod(factory)
def get_otus(self): return self.otus
def set_otus(self, otus): self.otus = otus
def export(self, outfile, level, namespace_='', name_='TaxaLinked', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TaxaLinked')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="TaxaLinked"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TaxaLinked'):
super(TaxaLinked, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TaxaLinked')
if self.otus is not None and 'otus' not in already_processed:
already_processed.append('otus')
outfile.write(' otus=%s' % (self.gds_format_string(quote_attrib(self.otus).encode(ExternalEncoding), input_name='otus'), ))
def exportChildren(self, outfile, level, namespace_='', name_='TaxaLinked', fromsubclass_=False):
super(TaxaLinked, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(TaxaLinked, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TaxaLinked'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.otus is not None and 'otus' not in already_processed:
already_processed.append('otus')
showIndent(outfile, level)
outfile.write('otus = "%s",\n' % (self.otus,))
super(TaxaLinked, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TaxaLinked, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('otus', node)
if value is not None and 'otus' not in already_processed:
already_processed.append('otus')
self.otus = value
super(TaxaLinked, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TaxaLinked, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TaxaLinked
class OptionalTaxonLinked(IDTagged):
"""The OptionalOTULinked complexType is a super class for objects that
that optionally have an otu id reference."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, valueOf_=None):
super(OptionalTaxonLinked, self).__init__(about, meta, label, id, )
self.otu = _cast(None, otu)
pass
def factory(*args_, **kwargs_):
if OptionalTaxonLinked.subclass:
return OptionalTaxonLinked.subclass(*args_, **kwargs_)
else:
return OptionalTaxonLinked(*args_, **kwargs_)
factory = staticmethod(factory)
def get_otu(self): return self.otu
def set_otu(self, otu): self.otu = otu
def export(self, outfile, level, namespace_='', name_='OptionalTaxonLinked', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='OptionalTaxonLinked')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="OptionalTaxonLinked"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='OptionalTaxonLinked'):
super(OptionalTaxonLinked, self).exportAttributes(outfile, level, already_processed, namespace_, name_='OptionalTaxonLinked')
if self.otu is not None and 'otu' not in already_processed:
already_processed.append('otu')
outfile.write(' otu=%s' % (self.gds_format_string(quote_attrib(self.otu).encode(ExternalEncoding), input_name='otu'), ))
def exportChildren(self, outfile, level, namespace_='', name_='OptionalTaxonLinked', fromsubclass_=False):
super(OptionalTaxonLinked, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(OptionalTaxonLinked, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='OptionalTaxonLinked'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.otu is not None and 'otu' not in already_processed:
already_processed.append('otu')
showIndent(outfile, level)
outfile.write('otu = "%s",\n' % (self.otu,))
super(OptionalTaxonLinked, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(OptionalTaxonLinked, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('otu', node)
if value is not None and 'otu' not in already_processed:
already_processed.append('otu')
self.otu = value
super(OptionalTaxonLinked, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(OptionalTaxonLinked, self).buildChildren(child_, node, nodeName_, True)
pass
# end class OptionalTaxonLinked
class TaxonLinked(IDTagged):
"""The TaxonLinked complexType is a super class for objects that
require a taxon id reference."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, valueOf_=None):
super(TaxonLinked, self).__init__(about, meta, label, id, )
self.otu = _cast(None, otu)
pass
def factory(*args_, **kwargs_):
if TaxonLinked.subclass:
return TaxonLinked.subclass(*args_, **kwargs_)
else:
return TaxonLinked(*args_, **kwargs_)
factory = staticmethod(factory)
def get_otu(self): return self.otu
def set_otu(self, otu): self.otu = otu
def export(self, outfile, level, namespace_='', name_='TaxonLinked', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TaxonLinked')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="TaxonLinked"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TaxonLinked'):
super(TaxonLinked, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TaxonLinked')
if self.otu is not None and 'otu' not in already_processed:
already_processed.append('otu')
outfile.write(' otu=%s' % (self.gds_format_string(quote_attrib(self.otu).encode(ExternalEncoding), input_name='otu'), ))
def exportChildren(self, outfile, level, namespace_='', name_='TaxonLinked', fromsubclass_=False):
super(TaxonLinked, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(TaxonLinked, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TaxonLinked'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.otu is not None and 'otu' not in already_processed:
already_processed.append('otu')
showIndent(outfile, level)
outfile.write('otu = "%s",\n' % (self.otu,))
super(TaxonLinked, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TaxonLinked, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('otu', node)
if value is not None and 'otu' not in already_processed:
already_processed.append('otu')
self.otu = value
super(TaxonLinked, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TaxonLinked, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TaxonLinked
class IntNetwork(AbstractNetwork):
"""A concrete network implementation, with integer edge lengths."""
subclass = None
superclass = AbstractNetwork
def __init__(self, about=None, meta=None, label=None, id=None, node=None, edge=None, set=None, valueOf_=None):
super(IntNetwork, self).__init__(about, meta, label, id, node, edge, set, )
if node is None:
self.node = []
else:
self.node = node
if edge is None:
self.edge = []
else:
self.edge = edge
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if IntNetwork.subclass:
return IntNetwork.subclass(*args_, **kwargs_)
else:
return IntNetwork(*args_, **kwargs_)
factory = staticmethod(factory)
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def add_edge(self, value): self.edge.append(value)
def insert_edge(self, index, value): self.edge[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='IntNetwork', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IntNetwork')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IntNetwork'):
super(IntNetwork, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IntNetwork')
def exportChildren(self, outfile, level, namespace_='', name_='IntNetwork', fromsubclass_=False):
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
for edge_ in self.edge:
edge_.export(outfile, level, namespace_, name_='edge')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.node or
self.edge or
self.set or
super(IntNetwork, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='IntNetwork'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(IntNetwork, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IntNetwork, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('node=[\n')
level += 1
for node_ in self.node:
showIndent(outfile, level)
outfile.write('model_.NetworkNode(\n')
node_.exportLiteral(outfile, level, name_='NetworkNode')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('edge=[\n')
level += 1
for edge_ in self.edge:
showIndent(outfile, level)
outfile.write('model_.NetworkIntEdge(\n')
edge_.exportLiteral(outfile, level, name_='NetworkIntEdge')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.NodeAndRootEdgeAndEdgeSet(\n')
set_.exportLiteral(outfile, level, name_='NodeAndRootEdgeAndEdgeSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(IntNetwork, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'node':
obj_ = NetworkNode.factory()
obj_.build(child_)
self.node.append(obj_)
elif nodeName_ == 'edge':
obj_ = NetworkIntEdge.factory()
obj_.build(child_)
self.edge.append(obj_)
elif nodeName_ == 'set':
obj_ = NodeAndRootEdgeAndEdgeSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class IntNetwork
class FloatNetwork(AbstractNetwork):
"""A concrete network implementation, with floating point edge lengths."""
subclass = None
superclass = AbstractNetwork
def __init__(self, about=None, meta=None, label=None, id=None, node=None, edge=None, set=None, valueOf_=None):
super(FloatNetwork, self).__init__(about, meta, label, id, node, edge, set, )
if node is None:
self.node = []
else:
self.node = node
if edge is None:
self.edge = []
else:
self.edge = edge
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if FloatNetwork.subclass:
return FloatNetwork.subclass(*args_, **kwargs_)
else:
return FloatNetwork(*args_, **kwargs_)
factory = staticmethod(factory)
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def add_edge(self, value): self.edge.append(value)
def insert_edge(self, index, value): self.edge[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='FloatNetwork', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='FloatNetwork')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FloatNetwork'):
super(FloatNetwork, self).exportAttributes(outfile, level, already_processed, namespace_, name_='FloatNetwork')
def exportChildren(self, outfile, level, namespace_='', name_='FloatNetwork', fromsubclass_=False):
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
for edge_ in self.edge:
edge_.export(outfile, level, namespace_, name_='edge')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.node or
self.edge or
self.set or
super(FloatNetwork, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='FloatNetwork'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(FloatNetwork, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(FloatNetwork, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('node=[\n')
level += 1
for node_ in self.node:
showIndent(outfile, level)
outfile.write('model_.NetworkNode(\n')
node_.exportLiteral(outfile, level, name_='NetworkNode')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('edge=[\n')
level += 1
for edge_ in self.edge:
showIndent(outfile, level)
outfile.write('model_.NetworkFloatEdge(\n')
edge_.exportLiteral(outfile, level, name_='NetworkFloatEdge')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.NodeAndRootEdgeAndEdgeSet(\n')
set_.exportLiteral(outfile, level, name_='NodeAndRootEdgeAndEdgeSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(FloatNetwork, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'node':
obj_ = NetworkNode.factory()
obj_.build(child_)
self.node.append(obj_)
elif nodeName_ == 'edge':
obj_ = NetworkFloatEdge.factory()
obj_.build(child_)
self.edge.append(obj_)
elif nodeName_ == 'set':
obj_ = NodeAndRootEdgeAndEdgeSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class FloatNetwork
class NetworkIntEdge(AbstractEdge):
"""A concrete network edge implementation, with int edge."""
subclass = None
superclass = AbstractEdge
def __init__(self, about=None, meta=None, label=None, id=None, source=None, length=None, target=None, valueOf_=None):
super(NetworkIntEdge, self).__init__(about, meta, label, id, source, length, target, )
self.length = _cast(int, length)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if NetworkIntEdge.subclass:
return NetworkIntEdge.subclass(*args_, **kwargs_)
else:
return NetworkIntEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_length(self): return self.length
def set_length(self, length): self.length = length
def export(self, outfile, level, namespace_='', name_='NetworkIntEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkIntEdge')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NetworkIntEdge'):
super(NetworkIntEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkIntEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length="%s"' % self.gds_format_integer(self.length, input_name='length'))
def exportChildren(self, outfile, level, namespace_='', name_='NetworkIntEdge', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(NetworkIntEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NetworkIntEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %d,\n' % (self.length,))
super(NetworkIntEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NetworkIntEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(NetworkIntEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class NetworkIntEdge
class NetworkFloatEdge(AbstractEdge):
"""A concrete network edge implementation, with float edge."""
subclass = None
superclass = AbstractEdge
def __init__(self, about=None, meta=None, label=None, id=None, source=None, length=None, target=None, valueOf_=None):
super(NetworkFloatEdge, self).__init__(about, meta, label, id, source, length, target, )
self.length = _cast(float, length)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if NetworkFloatEdge.subclass:
return NetworkFloatEdge.subclass(*args_, **kwargs_)
else:
return NetworkFloatEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_length(self): return self.length
def set_length(self, length): self.length = length
def export(self, outfile, level, namespace_='', name_='NetworkFloatEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkFloatEdge')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NetworkFloatEdge'):
super(NetworkFloatEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkFloatEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length="%s"' % self.gds_format_double(self.length, input_name='length'))
def exportChildren(self, outfile, level, namespace_='', name_='NetworkFloatEdge', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(NetworkFloatEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NetworkFloatEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %e,\n' % (self.length,))
super(NetworkFloatEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NetworkFloatEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (length): %s' % exp)
super(NetworkFloatEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class NetworkFloatEdge
class AbstractNode(OptionalTaxonLinked):
"""The AbstractNode superclass is what concrete nodes inherit from by
restriction. It represents a node element much like that of
GraphML, i.e. an element that is connected into a tree by edge
elements."""
subclass = None
superclass = OptionalTaxonLinked
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, root=False, valueOf_=None):
super(AbstractNode, self).__init__(about, meta, label, id, otu, )
self.root = _cast(bool, root)
pass
def factory(*args_, **kwargs_):
if AbstractNode.subclass:
return AbstractNode.subclass(*args_, **kwargs_)
else:
return AbstractNode(*args_, **kwargs_)
factory = staticmethod(factory)
def get_root(self): return self.root
def set_root(self, root): self.root = root
def export(self, outfile, level, namespace_='', name_='AbstractNode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractNode')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractNode"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractNode'):
super(AbstractNode, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractNode')
if self.root is not None and 'root' not in already_processed:
already_processed.append('root')
outfile.write(' root="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.root)), input_name='root'))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractNode', fromsubclass_=False):
super(AbstractNode, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractNode, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractNode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.root is not None and 'root' not in already_processed:
already_processed.append('root')
showIndent(outfile, level)
outfile.write('root = %s,\n' % (self.root,))
super(AbstractNode, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractNode, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('root', node)
if value is not None and 'root' not in already_processed:
already_processed.append('root')
if value in ('true', '1'):
self.root = True
elif value in ('false', '0'):
self.root = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(AbstractNode, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractNode, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractNode
class TreeNode(AbstractNode):
"""A concrete node implementation."""
subclass = None
superclass = AbstractNode
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, root=False, valueOf_=None):
super(TreeNode, self).__init__(about, meta, label, id, otu, root, )
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if TreeNode.subclass:
return TreeNode.subclass(*args_, **kwargs_)
else:
return TreeNode(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def export(self, outfile, level, namespace_='', name_='TreeNode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TreeNode')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TreeNode'):
super(TreeNode, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TreeNode')
def exportChildren(self, outfile, level, namespace_='', name_='TreeNode', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(TreeNode, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TreeNode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TreeNode, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TreeNode, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(TreeNode, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class TreeNode
class Trees(TaxaLinked):
"""A concrete container for tree objects."""
subclass = None
superclass = TaxaLinked
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, network=None, tree=None, set=None, valueOf_=None):
super(Trees, self).__init__(about, meta, label, id, otus, )
if network is None:
self.network = []
else:
self.network = network
if tree is None:
self.tree = []
else:
self.tree = tree
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if Trees.subclass:
return Trees.subclass(*args_, **kwargs_)
else:
return Trees(*args_, **kwargs_)
factory = staticmethod(factory)
def get_network(self): return self.network
def set_network(self, network): self.network = network
def add_network(self, value): self.network.append(value)
def insert_network(self, index, value): self.network[index] = value
def get_tree(self): return self.tree
def set_tree(self, tree): self.tree = tree
def add_tree(self, value): self.tree.append(value)
def insert_tree(self, index, value): self.tree[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='Trees', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Trees')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="Trees"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Trees'):
super(Trees, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Trees')
def exportChildren(self, outfile, level, namespace_='', name_='Trees', fromsubclass_=False):
super(Trees, self).exportChildren(outfile, level, namespace_, name_, True)
for network_ in self.get_network():
network_.export(outfile, level, namespace_, name_='network')
for tree_ in self.get_tree():
tree_.export(outfile, level, namespace_, name_='tree')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.network or
self.tree or
self.set or
super(Trees, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Trees'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Trees, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Trees, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('network=[\n')
level += 1
for network_ in self.network:
showIndent(outfile, level)
outfile.write('model_.AbstractNetwork(\n')
network_.exportLiteral(outfile, level, name_='AbstractNetwork')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('tree=[\n')
level += 1
for tree_ in self.tree:
showIndent(outfile, level)
outfile.write('model_.AbstractTree(\n')
tree_.exportLiteral(outfile, level, name_='AbstractTree')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.TreeAndNetworkSet(\n')
set_.exportLiteral(outfile, level, name_='TreeAndNetworkSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Trees, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'network':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <network> element')
self.network.append(obj_)
elif nodeName_ == 'tree':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <tree> element')
self.tree.append(obj_)
elif nodeName_ == 'set':
obj_ = TreeAndNetworkSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(Trees, self).buildChildren(child_, node, nodeName_, True)
# end class Trees
class StandardChar(AbstractChar):
"""A concrete implementation of the char element, which requires a
states attribute to refer to a set of defined states"""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(StandardChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.tokens = _cast(None, tokens)
self.states = _cast(None, states)
self.codon = _cast(None, codon)
self.id = _cast(None, id)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if StandardChar.subclass:
return StandardChar.subclass(*args_, **kwargs_)
else:
return StandardChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='StandardChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardChar')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardChar'):
super(StandardChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardChar')
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='StandardChar', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(StandardChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % (self.codon,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
super(StandardChar, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardChar, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
super(StandardChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class StandardChar
class StandardStates(AbstractStates):
"""A container for a set of states."""
subclass = None
superclass = AbstractStates
def __init__(self, about=None, meta=None, label=None, id=None, state=None, polymorphic_state_set=None, uncertain_state_set=None, set=None, valueOf_=None):
super(StandardStates, self).__init__(about, meta, label, id, state, polymorphic_state_set, uncertain_state_set, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if state is None:
self.state = []
else:
self.state = state
if polymorphic_state_set is None:
self.polymorphic_state_set = []
else:
self.polymorphic_state_set = polymorphic_state_set
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if StandardStates.subclass:
return StandardStates.subclass(*args_, **kwargs_)
else:
return StandardStates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_state(self): return self.state
def set_state(self, state): self.state = state
def add_state(self, value): self.state.append(value)
def insert_state(self, index, value): self.state[index] = value
def get_polymorphic_state_set(self): return self.polymorphic_state_set
def set_polymorphic_state_set(self, polymorphic_state_set): self.polymorphic_state_set = polymorphic_state_set
def add_polymorphic_state_set(self, value): self.polymorphic_state_set.append(value)
def insert_polymorphic_state_set(self, index, value): self.polymorphic_state_set[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='StandardStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardStates')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardStates'):
super(StandardStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardStates')
def exportChildren(self, outfile, level, namespace_='', name_='StandardStates', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for state_ in self.state:
state_.export(outfile, level, namespace_, name_='state')
for polymorphic_state_set_ in self.polymorphic_state_set:
polymorphic_state_set_.export(outfile, level, namespace_, name_='polymorphic_state_set')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.state or
self.polymorphic_state_set or
self.uncertain_state_set or
self.set or
super(StandardStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.StandardState(\n')
state_.exportLiteral(outfile, level, name_='StandardState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('polymorphic_state_set=[\n')
level += 1
for polymorphic_state_set_ in self.polymorphic_state_set:
showIndent(outfile, level)
outfile.write('model_.StandardPolymorphicStateSet(\n')
polymorphic_state_set_.exportLiteral(outfile, level, name_='StandardPolymorphicStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.StandardUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='StandardUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'state':
obj_ = StandardState.factory()
obj_.build(child_)
self.state.append(obj_)
elif nodeName_ == 'polymorphic_state_set':
obj_ = StandardPolymorphicStateSet.factory()
obj_.build(child_)
self.polymorphic_state_set.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = StandardUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class StandardStates
class StandardState(AbstractState):
"""This is a concrete implementation of the state element, which
requires a symbol element, in this case restricted to integers,
and optional mapping elements to refer to other states."""
subclass = None
superclass = AbstractState
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(StandardState, self).__init__(about, meta, label, id, symbol, )
self.symbol = _cast(None, symbol)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if StandardState.subclass:
return StandardState.subclass(*args_, **kwargs_)
else:
return StandardState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_StandardToken(self, value):
# Validate type StandardToken, a restriction on xs:integer.
pass
def export(self, outfile, level, namespace_='', name_='StandardState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardState')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardState'):
super(StandardState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='StandardState', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(StandardState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = %d,\n' % (self.symbol,))
super(StandardState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardState, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
try:
self.symbol = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
self.validate_StandardToken(self.symbol) # validate type StandardToken
super(StandardState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class StandardState
class RNAChar(AbstractChar):
"""A concrete implementation of the AbstractChar element, i.e. a single
column in an alignment."""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(RNAChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.tokens = _cast(None, tokens)
self.states = _cast(None, states)
self.codon = _cast(None, codon)
self.id = _cast(None, id)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if RNAChar.subclass:
return RNAChar.subclass(*args_, **kwargs_)
else:
return RNAChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='RNAChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAChar')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAChar'):
super(RNAChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAChar')
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RNAChar', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(RNAChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % (self.codon,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
super(RNAChar, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAChar, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
super(RNAChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class RNAChar
class RNAStates(AbstractStates):
"""A container for a set of states."""
subclass = None
superclass = AbstractStates
def __init__(self, about=None, meta=None, label=None, id=None, state=None, polymorphic_state_set=None, uncertain_state_set=None, set=None, valueOf_=None):
super(RNAStates, self).__init__(about, meta, label, id, state, polymorphic_state_set, uncertain_state_set, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if state is None:
self.state = []
else:
self.state = state
if polymorphic_state_set is None:
self.polymorphic_state_set = []
else:
self.polymorphic_state_set = polymorphic_state_set
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RNAStates.subclass:
return RNAStates.subclass(*args_, **kwargs_)
else:
return RNAStates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_state(self): return self.state
def set_state(self, state): self.state = state
def add_state(self, value): self.state.append(value)
def insert_state(self, index, value): self.state[index] = value
def get_polymorphic_state_set(self): return self.polymorphic_state_set
def set_polymorphic_state_set(self, polymorphic_state_set): self.polymorphic_state_set = polymorphic_state_set
def add_polymorphic_state_set(self, value): self.polymorphic_state_set.append(value)
def insert_polymorphic_state_set(self, index, value): self.polymorphic_state_set[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RNAStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAStates')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAStates'):
super(RNAStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAStates')
def exportChildren(self, outfile, level, namespace_='', name_='RNAStates', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for state_ in self.state:
state_.export(outfile, level, namespace_, name_='state')
for polymorphic_state_set_ in self.polymorphic_state_set:
polymorphic_state_set_.export(outfile, level, namespace_, name_='polymorphic_state_set')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.state or
self.polymorphic_state_set or
self.uncertain_state_set or
self.set or
super(RNAStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RNAStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.RNAState(\n')
state_.exportLiteral(outfile, level, name_='RNAState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('polymorphic_state_set=[\n')
level += 1
for polymorphic_state_set_ in self.polymorphic_state_set:
showIndent(outfile, level)
outfile.write('model_.RNAPolymorphicStateSet(\n')
polymorphic_state_set_.exportLiteral(outfile, level, name_='RNAPolymorphicStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.RNAUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='RNAUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RNAStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'state':
obj_ = RNAState.factory()
obj_.build(child_)
self.state.append(obj_)
elif nodeName_ == 'polymorphic_state_set':
obj_ = RNAPolymorphicStateSet.factory()
obj_.build(child_)
self.polymorphic_state_set.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = RNAUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RNAStates
class RNAState(AbstractState):
"""This is a concrete implementation of the state element, which
requires a symbol attribute, in this case restricted to
RNAToken, i.e. a single IUPAC nucleotide symbol, and optional
mapping elements to refer to other states."""
subclass = None
superclass = AbstractState
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(RNAState, self).__init__(about, meta, label, id, symbol, )
self.symbol = _cast(None, symbol)
pass
def factory(*args_, **kwargs_):
if RNAState.subclass:
return RNAState.subclass(*args_, **kwargs_)
else:
return RNAState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_RNAToken(self, value):
# Validate type RNAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='RNAState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAState')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAState'):
super(RNAState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='RNAState', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(RNAState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(RNAState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAState, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_RNAToken(self.symbol) # validate type RNAToken
super(RNAState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class RNAState
class RestrictionChar(AbstractChar):
"""A concrete implementation of the char element, which requires a
unique identifier and a state set reference."""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(RestrictionChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.tokens = _cast(None, tokens)
self.states = _cast(None, states)
self.codon = _cast(None, codon)
self.id = _cast(None, id)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if RestrictionChar.subclass:
return RestrictionChar.subclass(*args_, **kwargs_)
else:
return RestrictionChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='RestrictionChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionChar')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionChar'):
super(RestrictionChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionChar')
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionChar', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(RestrictionChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % (self.codon,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
super(RestrictionChar, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionChar, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
super(RestrictionChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class RestrictionChar
class RestrictionStates(AbstractStates):
"""A container for a set of states."""
subclass = None
superclass = AbstractStates
def __init__(self, about=None, meta=None, label=None, id=None, state=None, polymorphic_state_set=None, uncertain_state_set=None, set=None, valueOf_=None):
super(RestrictionStates, self).__init__(about, meta, label, id, state, polymorphic_state_set, uncertain_state_set, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if state is None:
self.state = []
else:
self.state = state
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RestrictionStates.subclass:
return RestrictionStates.subclass(*args_, **kwargs_)
else:
return RestrictionStates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_state(self): return self.state
def set_state(self, state): self.state = state
def add_state(self, value): self.state.append(value)
def insert_state(self, index, value): self.state[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RestrictionStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionStates')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionStates'):
super(RestrictionStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionStates')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionStates', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for state_ in self.state:
state_.export(outfile, level, namespace_, name_='state')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.state or
self.set or
super(RestrictionStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.RestrictionState(\n')
state_.exportLiteral(outfile, level, name_='RestrictionState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'state':
obj_ = RestrictionState.factory()
obj_.build(child_)
self.state.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RestrictionStates
class RestrictionState(AbstractState):
"""This is a concrete implementation of the state element, which
requires a symbol element, in this case restricted to 0/1."""
subclass = None
superclass = AbstractState
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(RestrictionState, self).__init__(about, meta, label, id, symbol, )
self.symbol = _cast(None, symbol)
pass
def factory(*args_, **kwargs_):
if RestrictionState.subclass:
return RestrictionState.subclass(*args_, **kwargs_)
else:
return RestrictionState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_RestrictionToken(self, value):
# Validate type RestrictionToken, a restriction on xs:integer.
pass
def export(self, outfile, level, namespace_='', name_='RestrictionState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionState')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionState'):
super(RestrictionState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionState', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(RestrictionState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = %d,\n' % (self.symbol,))
super(RestrictionState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionState, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
try:
self.symbol = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
self.validate_RestrictionToken(self.symbol) # validate type RestrictionToken
super(RestrictionState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class RestrictionState
class AAState(AbstractState):
"""This is a concrete implementation of the state element, which
requires a symbol element, in this case restricted to AAToken,
i.e. a single IUPAC amino acid symbol, and optional mapping
elements to refer to other states."""
subclass = None
superclass = AbstractState
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(AAState, self).__init__(about, meta, label, id, symbol, )
self.symbol = _cast(None, symbol)
pass
def factory(*args_, **kwargs_):
if AAState.subclass:
return AAState.subclass(*args_, **kwargs_)
else:
return AAState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_AAToken(self, value):
# Validate type AAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='AAState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAState')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAState'):
super(AAState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='AAState', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(AAState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(AAState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAState, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_AAToken(self.symbol) # validate type AAToken
super(AAState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AAState
class AAStates(AbstractStates):
"""A container for a set of states."""
subclass = None
superclass = AbstractStates
def __init__(self, about=None, meta=None, label=None, id=None, state=None, polymorphic_state_set=None, uncertain_state_set=None, set=None, valueOf_=None):
super(AAStates, self).__init__(about, meta, label, id, state, polymorphic_state_set, uncertain_state_set, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if state is None:
self.state = []
else:
self.state = state
if polymorphic_state_set is None:
self.polymorphic_state_set = []
else:
self.polymorphic_state_set = polymorphic_state_set
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AAStates.subclass:
return AAStates.subclass(*args_, **kwargs_)
else:
return AAStates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_state(self): return self.state
def set_state(self, state): self.state = state
def add_state(self, value): self.state.append(value)
def insert_state(self, index, value): self.state[index] = value
def get_polymorphic_state_set(self): return self.polymorphic_state_set
def set_polymorphic_state_set(self, polymorphic_state_set): self.polymorphic_state_set = polymorphic_state_set
def add_polymorphic_state_set(self, value): self.polymorphic_state_set.append(value)
def insert_polymorphic_state_set(self, index, value): self.polymorphic_state_set[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AAStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAStates')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAStates'):
super(AAStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAStates')
def exportChildren(self, outfile, level, namespace_='', name_='AAStates', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for state_ in self.state:
state_.export(outfile, level, namespace_, name_='state')
for polymorphic_state_set_ in self.polymorphic_state_set:
polymorphic_state_set_.export(outfile, level, namespace_, name_='polymorphic_state_set')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.state or
self.polymorphic_state_set or
self.uncertain_state_set or
self.set or
super(AAStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AAStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.AAState(\n')
state_.exportLiteral(outfile, level, name_='AAState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('polymorphic_state_set=[\n')
level += 1
for polymorphic_state_set_ in self.polymorphic_state_set:
showIndent(outfile, level)
outfile.write('model_.AAPolymorphicStateSet(\n')
polymorphic_state_set_.exportLiteral(outfile, level, name_='AAPolymorphicStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.AAUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='AAUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AAStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'state':
obj_ = AAState.factory()
obj_.build(child_)
self.state.append(obj_)
elif nodeName_ == 'polymorphic_state_set':
obj_ = AAPolymorphicStateSet.factory()
obj_.build(child_)
self.polymorphic_state_set.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = AAUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class AAStates
class AAChar(AbstractChar):
"""A concrete implementation of the AbstractChar element."""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(AAChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.tokens = _cast(None, tokens)
self.states = _cast(None, states)
self.codon = _cast(None, codon)
self.id = _cast(None, id)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if AAChar.subclass:
return AAChar.subclass(*args_, **kwargs_)
else:
return AAChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='AAChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAChar')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAChar'):
super(AAChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAChar')
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AAChar', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(AAChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % (self.codon,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
super(AAChar, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAChar, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
super(AAChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class AAChar
class DNAChar(AbstractChar):
"""A concrete implementation of the AbstractChar element."""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(DNAChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.tokens = _cast(None, tokens)
self.states = _cast(None, states)
self.codon = _cast(None, codon)
self.id = _cast(None, id)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if DNAChar.subclass:
return DNAChar.subclass(*args_, **kwargs_)
else:
return DNAChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='DNAChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAChar')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAChar'):
super(DNAChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAChar')
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='DNAChar', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(DNAChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % (self.codon,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
super(DNAChar, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAChar, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
super(DNAChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class DNAChar
class DNAStates(AbstractStates):
"""A container for a set of states."""
subclass = None
superclass = AbstractStates
def __init__(self, about=None, meta=None, label=None, id=None, state=None, polymorphic_state_set=None, uncertain_state_set=None, set=None, valueOf_=None):
super(DNAStates, self).__init__(about, meta, label, id, state, polymorphic_state_set, uncertain_state_set, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if state is None:
self.state = []
else:
self.state = state
if polymorphic_state_set is None:
self.polymorphic_state_set = []
else:
self.polymorphic_state_set = polymorphic_state_set
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if DNAStates.subclass:
return DNAStates.subclass(*args_, **kwargs_)
else:
return DNAStates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_state(self): return self.state
def set_state(self, state): self.state = state
def add_state(self, value): self.state.append(value)
def insert_state(self, index, value): self.state[index] = value
def get_polymorphic_state_set(self): return self.polymorphic_state_set
def set_polymorphic_state_set(self, polymorphic_state_set): self.polymorphic_state_set = polymorphic_state_set
def add_polymorphic_state_set(self, value): self.polymorphic_state_set.append(value)
def insert_polymorphic_state_set(self, index, value): self.polymorphic_state_set[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='DNAStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAStates')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAStates'):
super(DNAStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAStates')
def exportChildren(self, outfile, level, namespace_='', name_='DNAStates', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for state_ in self.state:
state_.export(outfile, level, namespace_, name_='state')
for polymorphic_state_set_ in self.polymorphic_state_set:
polymorphic_state_set_.export(outfile, level, namespace_, name_='polymorphic_state_set')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.state or
self.polymorphic_state_set or
self.uncertain_state_set or
self.set or
super(DNAStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DNAStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.DNAState(\n')
state_.exportLiteral(outfile, level, name_='DNAState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('polymorphic_state_set=[\n')
level += 1
for polymorphic_state_set_ in self.polymorphic_state_set:
showIndent(outfile, level)
outfile.write('model_.DNAPolymorphicStateSet(\n')
polymorphic_state_set_.exportLiteral(outfile, level, name_='DNAPolymorphicStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.DNAUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='DNAUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DNAStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'state':
obj_ = DNAState.factory()
obj_.build(child_)
self.state.append(obj_)
elif nodeName_ == 'polymorphic_state_set':
obj_ = DNAPolymorphicStateSet.factory()
obj_.build(child_)
self.polymorphic_state_set.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = DNAUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class DNAStates
class DNAState(AbstractState):
"""This is a concrete implementation of the state element, which
requires a symbol element, in this case restricted to DNAToken,
i.e. a single IUPAC nucleotide symbol, and optional mapping
elements to refer to other states."""
subclass = None
superclass = AbstractState
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(DNAState, self).__init__(about, meta, label, id, symbol, )
self.symbol = _cast(None, symbol)
pass
def factory(*args_, **kwargs_):
if DNAState.subclass:
return DNAState.subclass(*args_, **kwargs_)
else:
return DNAState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_DNAToken(self, value):
# Validate type DNAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='DNAState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAState')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAState'):
super(DNAState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='DNAState', fromsubclass_=False):
pass
def hasContent_(self):
if (
super(DNAState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(DNAState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAState, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_DNAToken(self.symbol) # validate type DNAToken
super(DNAState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class DNAState
class AbstractBlock(TaxaLinked):
"""The AbstractBlock is the superclass for blocks that contain an
element structure of type AbstractFormat."""
subclass = None
superclass = TaxaLinked
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, valueOf_=None):
super(AbstractBlock, self).__init__(about, meta, label, id, otus, )
self.format = format
def factory(*args_, **kwargs_):
if AbstractBlock.subclass:
return AbstractBlock.subclass(*args_, **kwargs_)
else:
return AbstractBlock(*args_, **kwargs_)
factory = staticmethod(factory)
def get_format(self): return self.format
def set_format(self, format): self.format = format
def export(self, outfile, level, namespace_='', name_='AbstractBlock', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractBlock')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractBlock"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractBlock'):
super(AbstractBlock, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractBlock')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractBlock', fromsubclass_=False):
super(AbstractBlock, self).exportChildren(outfile, level, namespace_, name_, True)
format_.export(outfile, level, namespace_, name_='format')
def hasContent_(self):
if (
self.format is not None or
super(AbstractBlock, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractBlock'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractBlock, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractBlock, self).exportLiteralChildren(outfile, level, name_)
if self.AbstractFormat is not None:
showIndent(outfile, level)
outfile.write('AbstractFormat=model_.AbstractFormat(\n')
self.AbstractFormat.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractBlock, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'format':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <format> element')
self.set_format(obj_)
super(AbstractBlock, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractBlock
class AbstractObsRow(TaxonLinked):
"""The AbstractObsRow represents a single row in a matrix. The row must
refer to a previously declared otu element by its id attribute
(and must have an id itself, may have a label, and may have meta
attachments). The row contains multiple cell elements."""
subclass = None
superclass = TaxonLinked
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, cell=None, set=None, valueOf_=None):
super(AbstractObsRow, self).__init__(about, meta, label, id, otu, )
if cell is None:
self.cell = []
else:
self.cell = cell
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractObsRow.subclass:
return AbstractObsRow.subclass(*args_, **kwargs_)
else:
return AbstractObsRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractObsRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractObsRow')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractObsRow"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractObsRow'):
super(AbstractObsRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractObsRow')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractObsRow', fromsubclass_=False):
super(AbstractObsRow, self).exportChildren(outfile, level, namespace_, name_, True)
for cell_ in self.get_cell():
cell_.export(outfile, level, namespace_, name_='cell')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.cell or
self.set or
super(AbstractObsRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractObsRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractObsRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractObsRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.AbstractObs(\n')
cell_.exportLiteral(outfile, level, name_='AbstractObs')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
set_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractObsRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'cell':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <cell> element')
self.cell.append(obj_)
elif nodeName_ == 'set':
obj_ = CellSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractObsRow, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractObsRow
class AbstractSeqRow(TaxonLinked):
"""The AbstractSeqRow represents a single row in a matrix. The row must
refer to a previously declared otu element by its id attribute
(and must have an id itself, may have a label, and may have meta
attachments). The row contains a single seq element with raw
character data."""
subclass = None
superclass = TaxonLinked
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, seq=None, valueOf_=None):
super(AbstractSeqRow, self).__init__(about, meta, label, id, otu, )
self.seq = seq
def factory(*args_, **kwargs_):
if AbstractSeqRow.subclass:
return AbstractSeqRow.subclass(*args_, **kwargs_)
else:
return AbstractSeqRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_seq(self): return self.seq
def set_seq(self, seq): self.seq = seq
def export(self, outfile, level, namespace_='', name_='AbstractSeqRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSeqRow')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractSeqRow"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractSeqRow'):
super(AbstractSeqRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSeqRow')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractSeqRow', fromsubclass_=False):
super(AbstractSeqRow, self).exportChildren(outfile, level, namespace_, name_, True)
if self.seq:
self.seq.export(outfile, level, namespace_, name_='seq', )
def hasContent_(self):
if (
self.seq is not None or
super(AbstractSeqRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractSeqRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractSeqRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractSeqRow, self).exportLiteralChildren(outfile, level, name_)
if self.seq is not None:
showIndent(outfile, level)
outfile.write('seq=model_.xs_anySimpleType(\n')
self.seq.exportLiteral(outfile, level, name_='seq')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractSeqRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'seq':
obj_ = xs_anySimpleType.factory()
obj_.build(child_)
self.set_seq(obj_)
super(AbstractSeqRow, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractSeqRow
class AbstractUncertainStateSet(AbstractState):
"""The AbstractUncertainStateSet type is the super-class for an
uncertain state set definition. The element has a required
AbstractSymbol attribute that in restricted concrete subclasses
must be of a sensible type such as a single IUPAC character. It
may enclose zero or more AbstractMapping elements to resolve
ambiguities."""
subclass = None
superclass = AbstractState
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, valueOf_=None):
super(AbstractUncertainStateSet, self).__init__(about, meta, label, id, symbol, )
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if AbstractUncertainStateSet.subclass:
return AbstractUncertainStateSet.subclass(*args_, **kwargs_)
else:
return AbstractUncertainStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractUncertainStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractUncertainStateSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractUncertainStateSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractUncertainStateSet'):
super(AbstractUncertainStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractUncertainStateSet')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractUncertainStateSet', fromsubclass_=False):
super(AbstractUncertainStateSet, self).exportChildren(outfile, level, namespace_, name_, True)
for member_ in self.get_member():
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.member or
super(AbstractUncertainStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractUncertainStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractUncertainStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractUncertainStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.AbstractMapping(\n')
member_.exportLiteral(outfile, level, name_='AbstractMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractUncertainStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <member> element')
self.member.append(obj_)
super(AbstractUncertainStateSet, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractUncertainStateSet
class ContinuousMatrixObsRow(AbstractObsRow):
"""This is a row in a matrix of continuous data as granular
obervations."""
subclass = None
superclass = AbstractObsRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, cell=None, set=None, valueOf_=None):
super(ContinuousMatrixObsRow, self).__init__(about, meta, label, id, otu, cell, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if cell is None:
self.cell = []
else:
self.cell = cell
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if ContinuousMatrixObsRow.subclass:
return ContinuousMatrixObsRow.subclass(*args_, **kwargs_)
else:
return ContinuousMatrixObsRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='ContinuousMatrixObsRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousMatrixObsRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousMatrixObsRow'):
super(ContinuousMatrixObsRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousMatrixObsRow')
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousMatrixObsRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for cell_ in self.cell:
cell_.export(outfile, level, namespace_, name_='cell')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.cell or
self.set or
super(ContinuousMatrixObsRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousMatrixObsRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContinuousMatrixObsRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousMatrixObsRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.ContinuousObs(\n')
cell_.exportLiteral(outfile, level, name_='ContinuousObs')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
set_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContinuousMatrixObsRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'cell':
obj_ = ContinuousObs.factory()
obj_.build(child_)
self.cell.append(obj_)
elif nodeName_ == 'set':
obj_ = CellSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class ContinuousMatrixObsRow
class ContinuousMatrixSeqRow(AbstractSeqRow):
"""This is a row in a matrix of continuous data as character sequences."""
subclass = None
superclass = AbstractSeqRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, seq=None, valueOf_=None):
super(ContinuousMatrixSeqRow, self).__init__(about, meta, label, id, otu, seq, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.seq = seq
def factory(*args_, **kwargs_):
if ContinuousMatrixSeqRow.subclass:
return ContinuousMatrixSeqRow.subclass(*args_, **kwargs_)
else:
return ContinuousMatrixSeqRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_seq(self): return self.seq
def set_seq(self, seq): self.seq = seq
def validate_ContinuousSeq(self, value):
# Validate type ContinuousSeq, a restriction on AbstractTokenList.
pass
def export(self, outfile, level, namespace_='', name_='ContinuousMatrixSeqRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousMatrixSeqRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousMatrixSeqRow'):
super(ContinuousMatrixSeqRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousMatrixSeqRow')
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousMatrixSeqRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('<%sseq>%s</%sseq>\n' % (namespace_, self.gds_format_double_list(self.seq, input_name='seq'), namespace_))
def hasContent_(self):
if (
self.meta or
self.seq is not None or
super(ContinuousMatrixSeqRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousMatrixSeqRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContinuousMatrixSeqRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousMatrixSeqRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('seq=%e,\n' % self.seq)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContinuousMatrixSeqRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'seq':
seq_ = child_.text
seq_ = self.gds_validate_double_list(seq_, node, 'seq')
self.seq = seq_
self.validate_ContinuousSeq(self.seq) # validate type ContinuousSeq
# end class ContinuousMatrixSeqRow
class NodeAndRootEdgeAndEdgeSet(AbstractSet):
subclass = None
superclass = AbstractSet
def __init__(self, about=None, meta=None, label=None, id=None, node=None, edge=None, rootedge=None, valueOf_=None):
super(NodeAndRootEdgeAndEdgeSet, self).__init__(about, meta, label, id, )
self.node = _cast(None, node)
self.edge = _cast(None, edge)
self.rootedge = _cast(None, rootedge)
pass
def factory(*args_, **kwargs_):
if NodeAndRootEdgeAndEdgeSet.subclass:
return NodeAndRootEdgeAndEdgeSet.subclass(*args_, **kwargs_)
else:
return NodeAndRootEdgeAndEdgeSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_node(self): return self.node
def set_node(self, node): self.node = node
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def get_rootedge(self): return self.rootedge
def set_rootedge(self, rootedge): self.rootedge = rootedge
def export(self, outfile, level, namespace_='', name_='NodeAndRootEdgeAndEdgeSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NodeAndRootEdgeAndEdgeSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="NodeAndRootEdgeAndEdgeSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NodeAndRootEdgeAndEdgeSet'):
super(NodeAndRootEdgeAndEdgeSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='NodeAndRootEdgeAndEdgeSet')
if self.node is not None and 'node' not in already_processed:
already_processed.append('node')
outfile.write(' node=%s' % (self.gds_format_string(quote_attrib(self.node).encode(ExternalEncoding), input_name='node'), ))
if self.edge is not None and 'edge' not in already_processed:
already_processed.append('edge')
outfile.write(' edge=%s' % (self.gds_format_string(quote_attrib(self.edge).encode(ExternalEncoding), input_name='edge'), ))
if self.rootedge is not None and 'rootedge' not in already_processed:
already_processed.append('rootedge')
outfile.write(' rootedge=%s' % (self.gds_format_string(quote_attrib(self.rootedge).encode(ExternalEncoding), input_name='rootedge'), ))
def exportChildren(self, outfile, level, namespace_='', name_='NodeAndRootEdgeAndEdgeSet', fromsubclass_=False):
super(NodeAndRootEdgeAndEdgeSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(NodeAndRootEdgeAndEdgeSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NodeAndRootEdgeAndEdgeSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.node is not None and 'node' not in already_processed:
already_processed.append('node')
showIndent(outfile, level)
outfile.write('node = "%s",\n' % (self.node,))
if self.edge is not None and 'edge' not in already_processed:
already_processed.append('edge')
showIndent(outfile, level)
outfile.write('edge = "%s",\n' % (self.edge,))
if self.rootedge is not None and 'rootedge' not in already_processed:
already_processed.append('rootedge')
showIndent(outfile, level)
outfile.write('rootedge = "%s",\n' % (self.rootedge,))
super(NodeAndRootEdgeAndEdgeSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NodeAndRootEdgeAndEdgeSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('node', node)
if value is not None and 'node' not in already_processed:
already_processed.append('node')
self.node = value
value = find_attr_value_('edge', node)
if value is not None and 'edge' not in already_processed:
already_processed.append('edge')
self.edge = value
value = find_attr_value_('rootedge', node)
if value is not None and 'rootedge' not in already_processed:
already_processed.append('rootedge')
self.rootedge = value
super(NodeAndRootEdgeAndEdgeSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(NodeAndRootEdgeAndEdgeSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class NodeAndRootEdgeAndEdgeSet
class TreeAndNetworkSet(AbstractSet):
subclass = None
superclass = AbstractSet
def __init__(self, about=None, meta=None, label=None, id=None, tree=None, network=None, valueOf_=None):
super(TreeAndNetworkSet, self).__init__(about, meta, label, id, )
self.tree = _cast(None, tree)
self.network = _cast(None, network)
pass
def factory(*args_, **kwargs_):
if TreeAndNetworkSet.subclass:
return TreeAndNetworkSet.subclass(*args_, **kwargs_)
else:
return TreeAndNetworkSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tree(self): return self.tree
def set_tree(self, tree): self.tree = tree
def get_network(self): return self.network
def set_network(self, network): self.network = network
def export(self, outfile, level, namespace_='', name_='TreeAndNetworkSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TreeAndNetworkSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="TreeAndNetworkSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TreeAndNetworkSet'):
super(TreeAndNetworkSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TreeAndNetworkSet')
if self.tree is not None and 'tree' not in already_processed:
already_processed.append('tree')
outfile.write(' tree=%s' % (self.gds_format_string(quote_attrib(self.tree).encode(ExternalEncoding), input_name='tree'), ))
if self.network is not None and 'network' not in already_processed:
already_processed.append('network')
outfile.write(' network=%s' % (self.gds_format_string(quote_attrib(self.network).encode(ExternalEncoding), input_name='network'), ))
def exportChildren(self, outfile, level, namespace_='', name_='TreeAndNetworkSet', fromsubclass_=False):
super(TreeAndNetworkSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(TreeAndNetworkSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TreeAndNetworkSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tree is not None and 'tree' not in already_processed:
already_processed.append('tree')
showIndent(outfile, level)
outfile.write('tree = "%s",\n' % (self.tree,))
if self.network is not None and 'network' not in already_processed:
already_processed.append('network')
showIndent(outfile, level)
outfile.write('network = "%s",\n' % (self.network,))
super(TreeAndNetworkSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TreeAndNetworkSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tree', node)
if value is not None and 'tree' not in already_processed:
already_processed.append('tree')
self.tree = value
value = find_attr_value_('network', node)
if value is not None and 'network' not in already_processed:
already_processed.append('network')
self.network = value
super(TreeAndNetworkSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TreeAndNetworkSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TreeAndNetworkSet
class CellSet(AbstractSet):
subclass = None
superclass = AbstractSet
def __init__(self, about=None, meta=None, label=None, id=None, cell=None, valueOf_=None):
super(CellSet, self).__init__(about, meta, label, id, )
self.cell = _cast(None, cell)
pass
def factory(*args_, **kwargs_):
if CellSet.subclass:
return CellSet.subclass(*args_, **kwargs_)
else:
return CellSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def export(self, outfile, level, namespace_='', name_='CellSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CellSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="CellSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CellSet'):
super(CellSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='CellSet')
if self.cell is not None and 'cell' not in already_processed:
already_processed.append('cell')
outfile.write(' cell=%s' % (self.gds_format_string(quote_attrib(self.cell).encode(ExternalEncoding), input_name='cell'), ))
def exportChildren(self, outfile, level, namespace_='', name_='CellSet', fromsubclass_=False):
super(CellSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(CellSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CellSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.cell is not None and 'cell' not in already_processed:
already_processed.append('cell')
showIndent(outfile, level)
outfile.write('cell = "%s",\n' % (self.cell,))
super(CellSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(CellSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('cell', node)
if value is not None and 'cell' not in already_processed:
already_processed.append('cell')
self.cell = value
super(CellSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(CellSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class CellSet
class RowSet(AbstractSet):
subclass = None
superclass = AbstractSet
def __init__(self, about=None, meta=None, label=None, id=None, row=None, valueOf_=None):
super(RowSet, self).__init__(about, meta, label, id, )
self.row = _cast(None, row)
pass
def factory(*args_, **kwargs_):
if RowSet.subclass:
return RowSet.subclass(*args_, **kwargs_)
else:
return RowSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def export(self, outfile, level, namespace_='', name_='RowSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RowSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="RowSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RowSet'):
super(RowSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RowSet')
if self.row is not None and 'row' not in already_processed:
already_processed.append('row')
outfile.write(' row=%s' % (self.gds_format_string(quote_attrib(self.row).encode(ExternalEncoding), input_name='row'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RowSet', fromsubclass_=False):
super(RowSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(RowSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RowSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.row is not None and 'row' not in already_processed:
already_processed.append('row')
showIndent(outfile, level)
outfile.write('row = "%s",\n' % (self.row,))
super(RowSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RowSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('row', node)
if value is not None and 'row' not in already_processed:
already_processed.append('row')
self.row = value
super(RowSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(RowSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class RowSet
class CharSet(AbstractSet):
subclass = None
superclass = AbstractSet
def __init__(self, about=None, meta=None, label=None, id=None, char=None, valueOf_=None):
super(CharSet, self).__init__(about, meta, label, id, )
self.char = _cast(None, char)
pass
def factory(*args_, **kwargs_):
if CharSet.subclass:
return CharSet.subclass(*args_, **kwargs_)
else:
return CharSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_char(self): return self.char
def set_char(self, char): self.char = char
def export(self, outfile, level, namespace_='', name_='CharSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CharSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="CharSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CharSet'):
super(CharSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='CharSet')
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
outfile.write(' char=%s' % (self.gds_format_string(quote_attrib(self.char).encode(ExternalEncoding), input_name='char'), ))
def exportChildren(self, outfile, level, namespace_='', name_='CharSet', fromsubclass_=False):
super(CharSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(CharSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CharSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.char is not None and 'char' not in already_processed:
already_processed.append('char')
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
super(CharSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(CharSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('char', node)
if value is not None and 'char' not in already_processed:
already_processed.append('char')
self.char = value
super(CharSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(CharSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class CharSet
class StateSet(AbstractSet):
subclass = None
superclass = AbstractSet
def __init__(self, about=None, meta=None, label=None, id=None, uncertain_state_set=None, state=None, polymorphic_state_set=None, valueOf_=None):
super(StateSet, self).__init__(about, meta, label, id, )
self.uncertain_state_set = _cast(None, uncertain_state_set)
self.state = _cast(None, state)
self.polymorphic_state_set = _cast(None, polymorphic_state_set)
pass
def factory(*args_, **kwargs_):
if StateSet.subclass:
return StateSet.subclass(*args_, **kwargs_)
else:
return StateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def get_state(self): return self.state
def set_state(self, state): self.state = state
def get_polymorphic_state_set(self): return self.polymorphic_state_set
def set_polymorphic_state_set(self, polymorphic_state_set): self.polymorphic_state_set = polymorphic_state_set
def export(self, outfile, level, namespace_='', name_='StateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StateSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="StateSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StateSet'):
super(StateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StateSet')
if self.uncertain_state_set is not None and 'uncertain_state_set' not in already_processed:
already_processed.append('uncertain_state_set')
outfile.write(' uncertain_state_set=%s' % (self.gds_format_string(quote_attrib(self.uncertain_state_set).encode(ExternalEncoding), input_name='uncertain_state_set'), ))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
outfile.write(' state=%s' % (self.gds_format_string(quote_attrib(self.state).encode(ExternalEncoding), input_name='state'), ))
if self.polymorphic_state_set is not None and 'polymorphic_state_set' not in already_processed:
already_processed.append('polymorphic_state_set')
outfile.write(' polymorphic_state_set=%s' % (self.gds_format_string(quote_attrib(self.polymorphic_state_set).encode(ExternalEncoding), input_name='polymorphic_state_set'), ))
def exportChildren(self, outfile, level, namespace_='', name_='StateSet', fromsubclass_=False):
super(StateSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(StateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.uncertain_state_set is not None and 'uncertain_state_set' not in already_processed:
already_processed.append('uncertain_state_set')
showIndent(outfile, level)
outfile.write('uncertain_state_set = "%s",\n' % (self.uncertain_state_set,))
if self.state is not None and 'state' not in already_processed:
already_processed.append('state')
showIndent(outfile, level)
outfile.write('state = "%s",\n' % (self.state,))
if self.polymorphic_state_set is not None and 'polymorphic_state_set' not in already_processed:
already_processed.append('polymorphic_state_set')
showIndent(outfile, level)
outfile.write('polymorphic_state_set = "%s",\n' % (self.polymorphic_state_set,))
super(StateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StateSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('uncertain_state_set', node)
if value is not None and 'uncertain_state_set' not in already_processed:
already_processed.append('uncertain_state_set')
self.uncertain_state_set = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.append('state')
self.state = value
value = find_attr_value_('polymorphic_state_set', node)
if value is not None and 'polymorphic_state_set' not in already_processed:
already_processed.append('polymorphic_state_set')
self.polymorphic_state_set = value
super(StateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(StateSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class StateSet
class TaxonSet(AbstractSet):
subclass = None
superclass = AbstractSet
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, valueOf_=None):
super(TaxonSet, self).__init__(about, meta, label, id, )
self.otu = _cast(None, otu)
pass
def factory(*args_, **kwargs_):
if TaxonSet.subclass:
return TaxonSet.subclass(*args_, **kwargs_)
else:
return TaxonSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_otu(self): return self.otu
def set_otu(self, otu): self.otu = otu
def export(self, outfile, level, namespace_='', name_='TaxonSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TaxonSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="TaxonSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TaxonSet'):
super(TaxonSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TaxonSet')
if self.otu is not None and 'otu' not in already_processed:
already_processed.append('otu')
outfile.write(' otu=%s' % (self.gds_format_string(quote_attrib(self.otu).encode(ExternalEncoding), input_name='otu'), ))
def exportChildren(self, outfile, level, namespace_='', name_='TaxonSet', fromsubclass_=False):
super(TaxonSet, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(TaxonSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TaxonSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.otu is not None and 'otu' not in already_processed:
already_processed.append('otu')
showIndent(outfile, level)
outfile.write('otu = "%s",\n' % (self.otu,))
super(TaxonSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TaxonSet, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('otu', node)
if value is not None and 'otu' not in already_processed:
already_processed.append('otu')
self.otu = value
super(TaxonSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TaxonSet, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TaxonSet
class NetworkNode(AbstractNode):
"""A concrete network node implementation."""
subclass = None
superclass = AbstractNode
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, root=False, valueOf_=None):
super(NetworkNode, self).__init__(about, meta, label, id, otu, root, )
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if NetworkNode.subclass:
return NetworkNode.subclass(*args_, **kwargs_)
else:
return NetworkNode(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def export(self, outfile, level, namespace_='', name_='NetworkNode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkNode')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NetworkNode'):
super(NetworkNode, self).exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkNode')
def exportChildren(self, outfile, level, namespace_='', name_='NetworkNode', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(NetworkNode, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NetworkNode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(NetworkNode, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NetworkNode, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(NetworkNode, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class NetworkNode
class StandardMatrixObsRow(AbstractObsRow):
"""This is a row in a matrix of standard data as granular obervations."""
subclass = None
superclass = AbstractObsRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, cell=None, set=None, valueOf_=None):
super(StandardMatrixObsRow, self).__init__(about, meta, label, id, otu, cell, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if cell is None:
self.cell = []
else:
self.cell = cell
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if StandardMatrixObsRow.subclass:
return StandardMatrixObsRow.subclass(*args_, **kwargs_)
else:
return StandardMatrixObsRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='StandardMatrixObsRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardMatrixObsRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardMatrixObsRow'):
super(StandardMatrixObsRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardMatrixObsRow')
def exportChildren(self, outfile, level, namespace_='', name_='StandardMatrixObsRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for cell_ in self.cell:
cell_.export(outfile, level, namespace_, name_='cell')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.cell or
self.set or
super(StandardMatrixObsRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardMatrixObsRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardMatrixObsRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardMatrixObsRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.StandardObs(\n')
cell_.exportLiteral(outfile, level, name_='StandardObs')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
set_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardMatrixObsRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'cell':
obj_ = StandardObs.factory()
obj_.build(child_)
self.cell.append(obj_)
elif nodeName_ == 'set':
obj_ = CellSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class StandardMatrixObsRow
class StandardMatrixSeqRow(AbstractSeqRow):
"""This is a row in a matrix of standard data as character sequences."""
subclass = None
superclass = AbstractSeqRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, seq=None, valueOf_=None):
super(StandardMatrixSeqRow, self).__init__(about, meta, label, id, otu, seq, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.seq = seq
def factory(*args_, **kwargs_):
if StandardMatrixSeqRow.subclass:
return StandardMatrixSeqRow.subclass(*args_, **kwargs_)
else:
return StandardMatrixSeqRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_seq(self): return self.seq
def set_seq(self, seq): self.seq = seq
def validate_StandardSeq(self, value):
# Validate type StandardSeq, a restriction on AbstractTokenList.
pass
def export(self, outfile, level, namespace_='', name_='StandardMatrixSeqRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardMatrixSeqRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardMatrixSeqRow'):
super(StandardMatrixSeqRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardMatrixSeqRow')
def exportChildren(self, outfile, level, namespace_='', name_='StandardMatrixSeqRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('<%sseq>%s</%sseq>\n' % (namespace_, self.gds_format_double_list(self.seq, input_name='seq'), namespace_))
def hasContent_(self):
if (
self.meta or
self.seq is not None or
super(StandardMatrixSeqRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardMatrixSeqRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardMatrixSeqRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardMatrixSeqRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('seq=%e,\n' % self.seq)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardMatrixSeqRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'seq':
seq_ = child_.text
seq_ = self.gds_validate_double_list(seq_, node, 'seq')
self.seq = seq_
self.validate_StandardSeq(self.seq) # validate type StandardSeq
# end class StandardMatrixSeqRow
class StandardUncertainStateSet(AbstractUncertainStateSet):
"""The StandardUncertainStateSet type is a single uncertain ambiguity
mapping."""
subclass = None
superclass = AbstractUncertainStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, valueOf_=None):
super(StandardUncertainStateSet, self).__init__(about, meta, label, id, symbol, member, )
self.symbol = _cast(None, symbol)
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if StandardUncertainStateSet.subclass:
return StandardUncertainStateSet.subclass(*args_, **kwargs_)
else:
return StandardUncertainStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def export(self, outfile, level, namespace_='', name_='StandardUncertainStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardUncertainStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardUncertainStateSet'):
super(StandardUncertainStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardUncertainStateSet')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (self.gds_format_string(quote_attrib(self.symbol).encode(ExternalEncoding), input_name='symbol'), ))
def exportChildren(self, outfile, level, namespace_='', name_='StandardUncertainStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.member or
super(StandardUncertainStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardUncertainStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(StandardUncertainStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardUncertainStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.StandardMapping(\n')
member_.exportLiteral(outfile, level, name_='StandardMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
super(StandardUncertainStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = StandardMapping.factory()
obj_.build(child_)
self.member.append(obj_)
# end class StandardUncertainStateSet
class RNAMatrixObsRow(AbstractObsRow):
"""This is a row in a matrix of RNA data containing granular
observations."""
subclass = None
superclass = AbstractObsRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, cell=None, set=None, valueOf_=None):
super(RNAMatrixObsRow, self).__init__(about, meta, label, id, otu, cell, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if cell is None:
self.cell = []
else:
self.cell = cell
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RNAMatrixObsRow.subclass:
return RNAMatrixObsRow.subclass(*args_, **kwargs_)
else:
return RNAMatrixObsRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RNAMatrixObsRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAMatrixObsRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAMatrixObsRow'):
super(RNAMatrixObsRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAMatrixObsRow')
def exportChildren(self, outfile, level, namespace_='', name_='RNAMatrixObsRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for cell_ in self.cell:
cell_.export(outfile, level, namespace_, name_='cell')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.cell or
self.set or
super(RNAMatrixObsRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAMatrixObsRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RNAMatrixObsRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAMatrixObsRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.RNAObs(\n')
cell_.exportLiteral(outfile, level, name_='RNAObs')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
set_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RNAMatrixObsRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'cell':
obj_ = RNAObs.factory()
obj_.build(child_)
self.cell.append(obj_)
elif nodeName_ == 'set':
obj_ = CellSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RNAMatrixObsRow
class RNAMatrixSeqRow(AbstractSeqRow):
"""This is a row in a matrix of RNA data containing raw sequence data."""
subclass = None
superclass = AbstractSeqRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, seq=None, valueOf_=None):
super(RNAMatrixSeqRow, self).__init__(about, meta, label, id, otu, seq, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.seq = seq
def factory(*args_, **kwargs_):
if RNAMatrixSeqRow.subclass:
return RNAMatrixSeqRow.subclass(*args_, **kwargs_)
else:
return RNAMatrixSeqRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_seq(self): return self.seq
def set_seq(self, seq): self.seq = seq
def validate_RNASeq(self, value):
# Validate type RNASeq, a restriction on AbstractSeq.
pass
def export(self, outfile, level, namespace_='', name_='RNAMatrixSeqRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAMatrixSeqRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAMatrixSeqRow'):
super(RNAMatrixSeqRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAMatrixSeqRow')
def exportChildren(self, outfile, level, namespace_='', name_='RNAMatrixSeqRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('<%sseq>%s</%sseq>\n' % (namespace_, self.gds_format_string(quote_xml(self.seq).encode(ExternalEncoding), input_name='seq'), namespace_))
def hasContent_(self):
if (
self.meta or
self.seq is not None or
super(RNAMatrixSeqRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAMatrixSeqRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RNAMatrixSeqRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAMatrixSeqRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('seq=%s,\n' % quote_python(self.seq).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RNAMatrixSeqRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'seq':
seq_ = child_.text
seq_ = self.gds_validate_string(seq_, node, 'seq')
self.seq = seq_
self.validate_RNASeq(self.seq) # validate type RNASeq
# end class RNAMatrixSeqRow
class RNAUncertainStateSet(AbstractUncertainStateSet):
"""The RNAUncertainStateSet describes a single uncertain IUPAC
ambiguity mapping."""
subclass = None
superclass = AbstractUncertainStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, valueOf_=None):
super(RNAUncertainStateSet, self).__init__(about, meta, label, id, symbol, member, )
self.symbol = _cast(None, symbol)
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if RNAUncertainStateSet.subclass:
return RNAUncertainStateSet.subclass(*args_, **kwargs_)
else:
return RNAUncertainStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_RNAToken(self, value):
# Validate type RNAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='RNAUncertainStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAUncertainStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAUncertainStateSet'):
super(RNAUncertainStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAUncertainStateSet')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='RNAUncertainStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.member or
super(RNAUncertainStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAUncertainStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(RNAUncertainStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAUncertainStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.RNAMapping(\n')
member_.exportLiteral(outfile, level, name_='RNAMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_RNAToken(self.symbol) # validate type RNAToken
super(RNAUncertainStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = RNAMapping.factory()
obj_.build(child_)
self.member.append(obj_)
# end class RNAUncertainStateSet
class RestrictionMatrixObsRow(AbstractObsRow):
"""This is a row in a matrix of restriction site data as granular
obervations."""
subclass = None
superclass = AbstractObsRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, cell=None, set=None, valueOf_=None):
super(RestrictionMatrixObsRow, self).__init__(about, meta, label, id, otu, cell, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if cell is None:
self.cell = []
else:
self.cell = cell
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if RestrictionMatrixObsRow.subclass:
return RestrictionMatrixObsRow.subclass(*args_, **kwargs_)
else:
return RestrictionMatrixObsRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='RestrictionMatrixObsRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionMatrixObsRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionMatrixObsRow'):
super(RestrictionMatrixObsRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionMatrixObsRow')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionMatrixObsRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for cell_ in self.cell:
cell_.export(outfile, level, namespace_, name_='cell')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.cell or
self.set or
super(RestrictionMatrixObsRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionMatrixObsRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionMatrixObsRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionMatrixObsRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.RestrictionObs(\n')
cell_.exportLiteral(outfile, level, name_='RestrictionObs')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
set_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionMatrixObsRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'cell':
obj_ = RestrictionObs.factory()
obj_.build(child_)
self.cell.append(obj_)
elif nodeName_ == 'set':
obj_ = CellSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class RestrictionMatrixObsRow
class RestrictionMatrixSeqRow(AbstractSeqRow):
"""This is a row in a matrix of restriction site data as character
sequences."""
subclass = None
superclass = AbstractSeqRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, seq=None, valueOf_=None):
super(RestrictionMatrixSeqRow, self).__init__(about, meta, label, id, otu, seq, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.seq = seq
def factory(*args_, **kwargs_):
if RestrictionMatrixSeqRow.subclass:
return RestrictionMatrixSeqRow.subclass(*args_, **kwargs_)
else:
return RestrictionMatrixSeqRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_seq(self): return self.seq
def set_seq(self, seq): self.seq = seq
def validate_RestrictionSeq(self, value):
# Validate type RestrictionSeq, a restriction on xs:string.
pass
def export(self, outfile, level, namespace_='', name_='RestrictionMatrixSeqRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionMatrixSeqRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionMatrixSeqRow'):
super(RestrictionMatrixSeqRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionMatrixSeqRow')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionMatrixSeqRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('<%sseq>%s</%sseq>\n' % (namespace_, self.gds_format_string(quote_xml(self.seq).encode(ExternalEncoding), input_name='seq'), namespace_))
def hasContent_(self):
if (
self.meta or
self.seq is not None or
super(RestrictionMatrixSeqRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionMatrixSeqRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionMatrixSeqRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionMatrixSeqRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('seq=%s,\n' % quote_python(self.seq).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionMatrixSeqRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'seq':
seq_ = child_.text
seq_ = self.gds_validate_string(seq_, node, 'seq')
self.seq = seq_
self.validate_RestrictionSeq(self.seq) # validate type RestrictionSeq
# end class RestrictionMatrixSeqRow
class AAMatrixObsRow(AbstractObsRow):
"""This is a row in a matrix of amino acid data containing granular
observations."""
subclass = None
superclass = AbstractObsRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, cell=None, set=None, valueOf_=None):
super(AAMatrixObsRow, self).__init__(about, meta, label, id, otu, cell, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if cell is None:
self.cell = []
else:
self.cell = cell
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AAMatrixObsRow.subclass:
return AAMatrixObsRow.subclass(*args_, **kwargs_)
else:
return AAMatrixObsRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AAMatrixObsRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAMatrixObsRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAMatrixObsRow'):
super(AAMatrixObsRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAMatrixObsRow')
def exportChildren(self, outfile, level, namespace_='', name_='AAMatrixObsRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for cell_ in self.cell:
cell_.export(outfile, level, namespace_, name_='cell')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.cell or
self.set or
super(AAMatrixObsRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAMatrixObsRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AAMatrixObsRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAMatrixObsRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.AAObs(\n')
cell_.exportLiteral(outfile, level, name_='AAObs')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
set_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AAMatrixObsRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'cell':
obj_ = AAObs.factory()
obj_.build(child_)
self.cell.append(obj_)
elif nodeName_ == 'set':
obj_ = CellSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class AAMatrixObsRow
class AAMatrixSeqRow(AbstractSeqRow):
"""This is a row in a matrix of amino acid data containing raw sequence
data."""
subclass = None
superclass = AbstractSeqRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, seq=None, valueOf_=None):
super(AAMatrixSeqRow, self).__init__(about, meta, label, id, otu, seq, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.seq = seq
def factory(*args_, **kwargs_):
if AAMatrixSeqRow.subclass:
return AAMatrixSeqRow.subclass(*args_, **kwargs_)
else:
return AAMatrixSeqRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_seq(self): return self.seq
def set_seq(self, seq): self.seq = seq
def validate_AASeq(self, value):
# Validate type AASeq, a restriction on AbstractSeq.
pass
def export(self, outfile, level, namespace_='', name_='AAMatrixSeqRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAMatrixSeqRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAMatrixSeqRow'):
super(AAMatrixSeqRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAMatrixSeqRow')
def exportChildren(self, outfile, level, namespace_='', name_='AAMatrixSeqRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('<%sseq>%s</%sseq>\n' % (namespace_, self.gds_format_string(quote_xml(self.seq).encode(ExternalEncoding), input_name='seq'), namespace_))
def hasContent_(self):
if (
self.meta or
self.seq is not None or
super(AAMatrixSeqRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAMatrixSeqRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AAMatrixSeqRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAMatrixSeqRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('seq=%s,\n' % quote_python(self.seq).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AAMatrixSeqRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'seq':
seq_ = child_.text
seq_ = self.gds_validate_string(seq_, node, 'seq')
self.seq = seq_
self.validate_AASeq(self.seq) # validate type AASeq
# end class AAMatrixSeqRow
class AAUncertainStateSet(AbstractUncertainStateSet):
"""The AAUncertainStateSet defines an uncertain ambiguity mapping."""
subclass = None
superclass = AbstractUncertainStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, valueOf_=None):
super(AAUncertainStateSet, self).__init__(about, meta, label, id, symbol, member, )
self.symbol = _cast(None, symbol)
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if AAUncertainStateSet.subclass:
return AAUncertainStateSet.subclass(*args_, **kwargs_)
else:
return AAUncertainStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_AAToken(self, value):
# Validate type AAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='AAUncertainStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAUncertainStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAUncertainStateSet'):
super(AAUncertainStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAUncertainStateSet')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='AAUncertainStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.member or
super(AAUncertainStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAUncertainStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(AAUncertainStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAUncertainStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.AAMapping(\n')
member_.exportLiteral(outfile, level, name_='AAMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_AAToken(self.symbol) # validate type AAToken
super(AAUncertainStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = AAMapping.factory()
obj_.build(child_)
self.member.append(obj_)
# end class AAUncertainStateSet
class DNAMatrixObsRow(AbstractObsRow):
"""This is a row in a matrix of DNA data containing granular
observations."""
subclass = None
superclass = AbstractObsRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, cell=None, set=None, valueOf_=None):
super(DNAMatrixObsRow, self).__init__(about, meta, label, id, otu, cell, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if cell is None:
self.cell = []
else:
self.cell = cell
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if DNAMatrixObsRow.subclass:
return DNAMatrixObsRow.subclass(*args_, **kwargs_)
else:
return DNAMatrixObsRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='DNAMatrixObsRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAMatrixObsRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAMatrixObsRow'):
super(DNAMatrixObsRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAMatrixObsRow')
def exportChildren(self, outfile, level, namespace_='', name_='DNAMatrixObsRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for cell_ in self.cell:
cell_.export(outfile, level, namespace_, name_='cell')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.cell or
self.set or
super(DNAMatrixObsRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAMatrixObsRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DNAMatrixObsRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAMatrixObsRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.DNAObs(\n')
cell_.exportLiteral(outfile, level, name_='DNAObs')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
set_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DNAMatrixObsRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'cell':
obj_ = DNAObs.factory()
obj_.build(child_)
self.cell.append(obj_)
elif nodeName_ == 'set':
obj_ = CellSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class DNAMatrixObsRow
class DNAMatrixSeqRow(AbstractSeqRow):
"""This is a row in a matrix of DNA data containing raw sequence data."""
subclass = None
superclass = AbstractSeqRow
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, seq=None, valueOf_=None):
super(DNAMatrixSeqRow, self).__init__(about, meta, label, id, otu, seq, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.seq = seq
def factory(*args_, **kwargs_):
if DNAMatrixSeqRow.subclass:
return DNAMatrixSeqRow.subclass(*args_, **kwargs_)
else:
return DNAMatrixSeqRow(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_seq(self): return self.seq
def set_seq(self, seq): self.seq = seq
def validate_DNASeq(self, value):
# Validate type DNASeq, a restriction on AbstractSeq.
pass
def export(self, outfile, level, namespace_='', name_='DNAMatrixSeqRow', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAMatrixSeqRow')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAMatrixSeqRow'):
super(DNAMatrixSeqRow, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAMatrixSeqRow')
def exportChildren(self, outfile, level, namespace_='', name_='DNAMatrixSeqRow', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('<%sseq>%s</%sseq>\n' % (namespace_, self.gds_format_string(quote_xml(self.seq).encode(ExternalEncoding), input_name='seq'), namespace_))
def hasContent_(self):
if (
self.meta or
self.seq is not None or
super(DNAMatrixSeqRow, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAMatrixSeqRow'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DNAMatrixSeqRow, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAMatrixSeqRow, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.seq is not None:
showIndent(outfile, level)
outfile.write('seq=%s,\n' % quote_python(self.seq).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DNAMatrixSeqRow, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'seq':
seq_ = child_.text
seq_ = self.gds_validate_string(seq_, node, 'seq')
self.seq = seq_
self.validate_DNASeq(self.seq) # validate type DNASeq
# end class DNAMatrixSeqRow
class DNAUncertainStateSet(AbstractUncertainStateSet):
"""The DNAUncertainStateSet type defines an IUPAC ambiguity mapping. It
may enclose zero or more AbstractMapping elements to resolve
ambiguities."""
subclass = None
superclass = AbstractUncertainStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, valueOf_=None):
super(DNAUncertainStateSet, self).__init__(about, meta, label, id, symbol, member, )
self.symbol = _cast(None, symbol)
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if DNAUncertainStateSet.subclass:
return DNAUncertainStateSet.subclass(*args_, **kwargs_)
else:
return DNAUncertainStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_DNAToken(self, value):
# Validate type DNAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='DNAUncertainStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAUncertainStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAUncertainStateSet'):
super(DNAUncertainStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAUncertainStateSet')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='DNAUncertainStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.member or
super(DNAUncertainStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAUncertainStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(DNAUncertainStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAUncertainStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.DNAMapping(\n')
member_.exportLiteral(outfile, level, name_='DNAMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_DNAToken(self.symbol) # validate type DNAToken
super(DNAUncertainStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = DNAMapping.factory()
obj_.build(child_)
self.member.append(obj_)
# end class DNAUncertainStateSet
class AbstractCells(AbstractBlock):
"""The AbstractSeqBlock type is the superclass for character blocks
that consist of granular character state observations."""
subclass = None
superclass = AbstractBlock
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(AbstractCells, self).__init__(about, meta, label, id, otus, format, )
self.matrix = matrix
def factory(*args_, **kwargs_):
if AbstractCells.subclass:
return AbstractCells.subclass(*args_, **kwargs_)
else:
return AbstractCells(*args_, **kwargs_)
factory = staticmethod(factory)
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='AbstractCells', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractCells')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractCells"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractCells'):
super(AbstractCells, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractCells')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractCells', fromsubclass_=False):
super(AbstractCells, self).exportChildren(outfile, level, namespace_, name_, True)
matrix_.export(outfile, level, namespace_, name_='matrix')
def hasContent_(self):
if (
self.matrix is not None or
super(AbstractCells, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractCells'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractCells, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractCells, self).exportLiteralChildren(outfile, level, name_)
if self.AbstractObsMatrix is not None:
showIndent(outfile, level)
outfile.write('AbstractObsMatrix=model_.AbstractObsMatrix(\n')
self.AbstractObsMatrix.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractCells, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'matrix':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <matrix> element')
self.set_matrix(obj_)
super(AbstractCells, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractCells
class AbstractSeqs(AbstractBlock):
"""The AbstractSeqBlock type is the superclass for character blocks
that consist of raw character sequences."""
subclass = None
superclass = AbstractBlock
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(AbstractSeqs, self).__init__(about, meta, label, id, otus, format, )
self.matrix = matrix
def factory(*args_, **kwargs_):
if AbstractSeqs.subclass:
return AbstractSeqs.subclass(*args_, **kwargs_)
else:
return AbstractSeqs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='AbstractSeqs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSeqs')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractSeqs"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractSeqs'):
super(AbstractSeqs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractSeqs')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractSeqs', fromsubclass_=False):
super(AbstractSeqs, self).exportChildren(outfile, level, namespace_, name_, True)
matrix_.export(outfile, level, namespace_, name_='matrix')
def hasContent_(self):
if (
self.matrix is not None or
super(AbstractSeqs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractSeqs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractSeqs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractSeqs, self).exportLiteralChildren(outfile, level, name_)
if self.AbstractSeqMatrix is not None:
showIndent(outfile, level)
outfile.write('AbstractSeqMatrix=model_.AbstractSeqMatrix(\n')
self.AbstractSeqMatrix.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractSeqs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'matrix':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <matrix> element')
self.set_matrix(obj_)
super(AbstractSeqs, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractSeqs
class AbstractPolymorphicStateSet(AbstractUncertainStateSet):
"""The AbstractPolymorphicStateSet type is the super-class for a
polymorphic state set definition. The element has a required
AbstractSymbol attribute that in restricted concrete subclasses
must be of a sensible type such as a single IUPAC character. It
may enclose zero or more AbstractMapping elements to resolve
ambiguities."""
subclass = None
superclass = AbstractUncertainStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, uncertain_state_set=None, valueOf_=None):
super(AbstractPolymorphicStateSet, self).__init__(about, meta, label, id, symbol, member, )
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
def factory(*args_, **kwargs_):
if AbstractPolymorphicStateSet.subclass:
return AbstractPolymorphicStateSet.subclass(*args_, **kwargs_)
else:
return AbstractPolymorphicStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractPolymorphicStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractPolymorphicStateSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractPolymorphicStateSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractPolymorphicStateSet'):
super(AbstractPolymorphicStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractPolymorphicStateSet')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractPolymorphicStateSet', fromsubclass_=False):
super(AbstractPolymorphicStateSet, self).exportChildren(outfile, level, namespace_, name_, True)
for uncertain_state_set_ in self.get_uncertain_state_set():
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
def hasContent_(self):
if (
self.uncertain_state_set or
super(AbstractPolymorphicStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractPolymorphicStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractPolymorphicStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractPolymorphicStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.AbstractUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='AbstractUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractPolymorphicStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'uncertain_state_set':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <uncertain_state_set> element')
self.uncertain_state_set.append(obj_)
super(AbstractPolymorphicStateSet, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractPolymorphicStateSet
class ContinuousCells(AbstractCells):
"""A continuous characters block consisting of granular cells preceded
by metadata."""
subclass = None
superclass = AbstractCells
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(ContinuousCells, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if ContinuousCells.subclass:
return ContinuousCells.subclass(*args_, **kwargs_)
else:
return ContinuousCells(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='ContinuousCells', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousCells')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousCells'):
super(ContinuousCells, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousCells')
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousCells', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(ContinuousCells, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousCells'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContinuousCells, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousCells, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.ContinuousFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.ContinuousObsMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContinuousCells, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = ContinuousFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = ContinuousObsMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class ContinuousCells
class ContinuousSeqs(AbstractSeqs):
"""A continuous characters block consisting of float sequences preceded
by metadata."""
subclass = None
superclass = AbstractSeqs
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(ContinuousSeqs, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if ContinuousSeqs.subclass:
return ContinuousSeqs.subclass(*args_, **kwargs_)
else:
return ContinuousSeqs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='ContinuousSeqs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousSeqs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContinuousSeqs'):
super(ContinuousSeqs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousSeqs')
def exportChildren(self, outfile, level, namespace_='', name_='ContinuousSeqs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(ContinuousSeqs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContinuousSeqs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContinuousSeqs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContinuousSeqs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.ContinuousFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.ContinuousSeqMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContinuousSeqs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = ContinuousFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = ContinuousSeqMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class ContinuousSeqs
class StandardCells(AbstractCells):
"""A standard characters block consisting of granular cells preceded by
metadata."""
subclass = None
superclass = AbstractCells
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(StandardCells, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if StandardCells.subclass:
return StandardCells.subclass(*args_, **kwargs_)
else:
return StandardCells(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='StandardCells', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardCells')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardCells'):
super(StandardCells, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardCells')
def exportChildren(self, outfile, level, namespace_='', name_='StandardCells', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(StandardCells, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardCells'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardCells, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardCells, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.StandardFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.StandardObsMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardCells, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = StandardFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = StandardObsMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class StandardCells
class StandardSeqs(AbstractSeqs):
"""A standard characters block consisting of sequences preceded by
metadata."""
subclass = None
superclass = AbstractSeqs
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(StandardSeqs, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if StandardSeqs.subclass:
return StandardSeqs.subclass(*args_, **kwargs_)
else:
return StandardSeqs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='StandardSeqs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardSeqs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardSeqs'):
super(StandardSeqs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardSeqs')
def exportChildren(self, outfile, level, namespace_='', name_='StandardSeqs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(StandardSeqs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardSeqs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardSeqs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardSeqs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.StandardFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.StandardSeqMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardSeqs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = StandardFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = StandardSeqMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class StandardSeqs
class StandardPolymorphicStateSet(AbstractPolymorphicStateSet):
"""The StandardPolymorphicStateSet type is a single polymorphic
ambiguity mapping."""
subclass = None
superclass = AbstractPolymorphicStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, uncertain_state_set=None, valueOf_=None):
super(StandardPolymorphicStateSet, self).__init__(about, meta, label, id, symbol, member, uncertain_state_set, )
if member is None:
self.member = []
else:
self.member = member
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
def factory(*args_, **kwargs_):
if StandardPolymorphicStateSet.subclass:
return StandardPolymorphicStateSet.subclass(*args_, **kwargs_)
else:
return StandardPolymorphicStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def export(self, outfile, level, namespace_='', name_='StandardPolymorphicStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardPolymorphicStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardPolymorphicStateSet'):
super(StandardPolymorphicStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardPolymorphicStateSet')
def exportChildren(self, outfile, level, namespace_='', name_='StandardPolymorphicStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
def hasContent_(self):
if (
self.member or
self.uncertain_state_set or
super(StandardPolymorphicStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardPolymorphicStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardPolymorphicStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardPolymorphicStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.StandardMapping(\n')
member_.exportLiteral(outfile, level, name_='StandardMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.StandardUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='StandardUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardPolymorphicStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = StandardMapping.factory()
obj_.build(child_)
self.member.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = StandardUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
# end class StandardPolymorphicStateSet
class RnaCells(AbstractCells):
"""A RNA characters block consisting of granular cells preceded by
metadata."""
subclass = None
superclass = AbstractCells
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(RnaCells, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if RnaCells.subclass:
return RnaCells.subclass(*args_, **kwargs_)
else:
return RnaCells(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='RnaCells', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RnaCells')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RnaCells'):
super(RnaCells, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RnaCells')
def exportChildren(self, outfile, level, namespace_='', name_='RnaCells', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format')
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(RnaCells, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RnaCells'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RnaCells, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RnaCells, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.RNAFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.RNAObsMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RnaCells, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = RNAFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = RNAObsMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class RnaCells
class RnaSeqs(AbstractSeqs):
"""A RNA characters block consisting of sequences preceded by metadata."""
subclass = None
superclass = AbstractSeqs
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(RnaSeqs, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if RnaSeqs.subclass:
return RnaSeqs.subclass(*args_, **kwargs_)
else:
return RnaSeqs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='RnaSeqs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RnaSeqs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RnaSeqs'):
super(RnaSeqs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RnaSeqs')
def exportChildren(self, outfile, level, namespace_='', name_='RnaSeqs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(RnaSeqs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RnaSeqs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RnaSeqs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RnaSeqs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.RNAFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.RNASeqMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RnaSeqs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = RNAFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = RNASeqMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class RnaSeqs
class RNAPolymorphicStateSet(AbstractPolymorphicStateSet):
"""The RNAPolymorphicStateSet describes a single polymorphic IUPAC
ambiguity mapping."""
subclass = None
superclass = AbstractPolymorphicStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, uncertain_state_set=None, valueOf_=None):
super(RNAPolymorphicStateSet, self).__init__(about, meta, label, id, symbol, member, uncertain_state_set, )
self.symbol = _cast(None, symbol)
if member is None:
self.member = []
else:
self.member = member
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
def factory(*args_, **kwargs_):
if RNAPolymorphicStateSet.subclass:
return RNAPolymorphicStateSet.subclass(*args_, **kwargs_)
else:
return RNAPolymorphicStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_RNAToken(self, value):
# Validate type RNAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='RNAPolymorphicStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAPolymorphicStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAPolymorphicStateSet'):
super(RNAPolymorphicStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAPolymorphicStateSet')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='RNAPolymorphicStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
def hasContent_(self):
if (
self.member or
self.uncertain_state_set or
super(RNAPolymorphicStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAPolymorphicStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(RNAPolymorphicStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RNAPolymorphicStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.RNAMapping(\n')
member_.exportLiteral(outfile, level, name_='RNAMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.RNAUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='RNAUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_RNAToken(self.symbol) # validate type RNAToken
super(RNAPolymorphicStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = RNAMapping.factory()
obj_.build(child_)
self.member.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = RNAUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
# end class RNAPolymorphicStateSet
class RestrictionCells(AbstractCells):
"""A standard characters block consisting of granular cells preceded by
metadata."""
subclass = None
superclass = AbstractCells
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(RestrictionCells, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if RestrictionCells.subclass:
return RestrictionCells.subclass(*args_, **kwargs_)
else:
return RestrictionCells(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='RestrictionCells', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionCells')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionCells'):
super(RestrictionCells, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionCells')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionCells', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(RestrictionCells, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionCells'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionCells, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionCells, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.RestrictionFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.RestrictionObsMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionCells, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = RestrictionFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = RestrictionObsMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class RestrictionCells
class RestrictionSeqs(AbstractSeqs):
"""A restriction site characters block consisting of sequences preceded
by metadata."""
subclass = None
superclass = AbstractSeqs
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(RestrictionSeqs, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if RestrictionSeqs.subclass:
return RestrictionSeqs.subclass(*args_, **kwargs_)
else:
return RestrictionSeqs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='RestrictionSeqs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionSeqs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RestrictionSeqs'):
super(RestrictionSeqs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RestrictionSeqs')
def exportChildren(self, outfile, level, namespace_='', name_='RestrictionSeqs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(RestrictionSeqs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RestrictionSeqs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RestrictionSeqs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RestrictionSeqs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.RestrictionFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.RestrictionSeqMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RestrictionSeqs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = RestrictionFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = RestrictionSeqMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class RestrictionSeqs
class ProteinCells(AbstractCells):
"""An amino acid characters block consisting of granular cells preceded
by metadata."""
subclass = None
superclass = AbstractCells
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(ProteinCells, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if ProteinCells.subclass:
return ProteinCells.subclass(*args_, **kwargs_)
else:
return ProteinCells(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='ProteinCells', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ProteinCells')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ProteinCells'):
super(ProteinCells, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ProteinCells')
def exportChildren(self, outfile, level, namespace_='', name_='ProteinCells', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(ProteinCells, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ProteinCells'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ProteinCells, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ProteinCells, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.AAFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.AAObsMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ProteinCells, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = AAFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = AAObsMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class ProteinCells
class ProteinSeqs(AbstractSeqs):
"""An amino acid characters block consisting of sequences preceded by
metadata."""
subclass = None
superclass = AbstractSeqs
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(ProteinSeqs, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if ProteinSeqs.subclass:
return ProteinSeqs.subclass(*args_, **kwargs_)
else:
return ProteinSeqs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='ProteinSeqs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ProteinSeqs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ProteinSeqs'):
super(ProteinSeqs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ProteinSeqs')
def exportChildren(self, outfile, level, namespace_='', name_='ProteinSeqs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(ProteinSeqs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ProteinSeqs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ProteinSeqs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ProteinSeqs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.AAFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.AASeqMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ProteinSeqs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = AAFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = AASeqMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class ProteinSeqs
class AAPolymorphicStateSet(AbstractPolymorphicStateSet):
"""The AAPolymorphicStateSet defines a polymorphic ambiguity mapping."""
subclass = None
superclass = AbstractPolymorphicStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, uncertain_state_set=None, valueOf_=None):
super(AAPolymorphicStateSet, self).__init__(about, meta, label, id, symbol, member, uncertain_state_set, )
self.symbol = _cast(None, symbol)
if member is None:
self.member = []
else:
self.member = member
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
def factory(*args_, **kwargs_):
if AAPolymorphicStateSet.subclass:
return AAPolymorphicStateSet.subclass(*args_, **kwargs_)
else:
return AAPolymorphicStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_AAToken(self, value):
# Validate type AAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='AAPolymorphicStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AAPolymorphicStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AAPolymorphicStateSet'):
super(AAPolymorphicStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AAPolymorphicStateSet')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='AAPolymorphicStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
def hasContent_(self):
if (
self.member or
self.uncertain_state_set or
super(AAPolymorphicStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AAPolymorphicStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(AAPolymorphicStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AAPolymorphicStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.AAMapping(\n')
member_.exportLiteral(outfile, level, name_='AAMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.AAUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='AAUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_AAToken(self.symbol) # validate type AAToken
super(AAPolymorphicStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = AAMapping.factory()
obj_.build(child_)
self.member.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = AAUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
# end class AAPolymorphicStateSet
class DnaCells(AbstractCells):
"""A DNA characters block consisting of granular cells preceded by
metadata."""
subclass = None
superclass = AbstractCells
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(DnaCells, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if DnaCells.subclass:
return DnaCells.subclass(*args_, **kwargs_)
else:
return DnaCells(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='DnaCells', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DnaCells')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DnaCells'):
super(DnaCells, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DnaCells')
def exportChildren(self, outfile, level, namespace_='', name_='DnaCells', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(DnaCells, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DnaCells'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DnaCells, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DnaCells, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.DNAFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.DNAObsMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DnaCells, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = DNAFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = DNAObsMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class DnaCells
class DnaSeqs(AbstractSeqs):
"""A DNA characters block consisting of sequences preceded by metadata."""
subclass = None
superclass = AbstractSeqs
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, format=None, matrix=None, valueOf_=None):
super(DnaSeqs, self).__init__(about, meta, label, id, otus, format, matrix, )
if meta is None:
self.meta = []
else:
self.meta = meta
self.format = format
self.matrix = matrix
def factory(*args_, **kwargs_):
if DnaSeqs.subclass:
return DnaSeqs.subclass(*args_, **kwargs_)
else:
return DnaSeqs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_matrix(self): return self.matrix
def set_matrix(self, matrix): self.matrix = matrix
def export(self, outfile, level, namespace_='', name_='DnaSeqs', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DnaSeqs')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DnaSeqs'):
super(DnaSeqs, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DnaSeqs')
def exportChildren(self, outfile, level, namespace_='', name_='DnaSeqs', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
if self.format:
self.format.export(outfile, level, namespace_, name_='format', )
if self.matrix:
self.matrix.export(outfile, level, namespace_, name_='matrix', )
def hasContent_(self):
if (
self.meta or
self.format is not None or
self.matrix is not None or
super(DnaSeqs, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DnaSeqs'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DnaSeqs, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DnaSeqs, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.format is not None:
showIndent(outfile, level)
outfile.write('format=model_.DNAFormat(\n')
self.format.exportLiteral(outfile, level, name_='format')
showIndent(outfile, level)
outfile.write('),\n')
if self.matrix is not None:
showIndent(outfile, level)
outfile.write('matrix=model_.DNASeqMatrix(\n')
self.matrix.exportLiteral(outfile, level, name_='matrix')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DnaSeqs, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'format':
obj_ = DNAFormat.factory()
obj_.build(child_)
self.set_format(obj_)
elif nodeName_ == 'matrix':
obj_ = DNASeqMatrix.factory()
obj_.build(child_)
self.set_matrix(obj_)
# end class DnaSeqs
class DNAPolymorphicStateSet(AbstractPolymorphicStateSet):
"""The DNAPolymorphicStateSet type defines an IUPAC ambiguity mapping.
It may enclose zero or more AbstractMapping elements to resolve
ambiguities."""
subclass = None
superclass = AbstractPolymorphicStateSet
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, member=None, uncertain_state_set=None, valueOf_=None):
super(DNAPolymorphicStateSet, self).__init__(about, meta, label, id, symbol, member, uncertain_state_set, )
self.symbol = _cast(None, symbol)
if member is None:
self.member = []
else:
self.member = member
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
def factory(*args_, **kwargs_):
if DNAPolymorphicStateSet.subclass:
return DNAPolymorphicStateSet.subclass(*args_, **kwargs_)
else:
return DNAPolymorphicStateSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_DNAToken(self, value):
# Validate type DNAToken, a restriction on AbstractSymbol.
pass
def export(self, outfile, level, namespace_='', name_='DNAPolymorphicStateSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAPolymorphicStateSet')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DNAPolymorphicStateSet'):
super(DNAPolymorphicStateSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DNAPolymorphicStateSet')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='DNAPolymorphicStateSet', fromsubclass_=False):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
def hasContent_(self):
if (
self.member or
self.uncertain_state_set or
super(DNAPolymorphicStateSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DNAPolymorphicStateSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = "%s",\n' % (self.symbol,))
super(DNAPolymorphicStateSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DNAPolymorphicStateSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.DNAMapping(\n')
member_.exportLiteral(outfile, level, name_='DNAMapping')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.DNAUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='DNAUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
self.validate_DNAToken(self.symbol) # validate type DNAToken
super(DNAPolymorphicStateSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = DNAMapping.factory()
obj_.build(child_)
self.member.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = DNAUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
# end class DNAPolymorphicStateSet
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Nexml'
rootClass = Nexml
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='xmlns:nex="http://www.nexml.org/2009"')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Nexml'
rootClass = Nexml
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_="Nexml",
## namespacedef_='xmlns:nex="http://www.nexml.org/2009"')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Nexml'
rootClass = Nexml
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from nexml import *\n\n')
## sys.stdout.write('import nexml as model_\n\n')
## sys.stdout.write('rootObj = model_.rootTag(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
## sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AAChar",
"AAFormat",
"AAMapping",
"AAMatrixObsRow",
"AAMatrixSeqRow",
"AAObs",
"AAObsMatrix",
"AAPolymorphicStateSet",
"AASeqMatrix",
"AAState",
"AAStates",
"AAUncertainStateSet",
"AbstractBlock",
"AbstractCells",
"AbstractChar",
"AbstractEdge",
"AbstractFormat",
"AbstractMapping",
"AbstractNetwork",
"AbstractNode",
"AbstractObs",
"AbstractObsMatrix",
"AbstractObsRow",
"AbstractPolymorphicStateSet",
"AbstractRootEdge",
"AbstractSeqMatrix",
"AbstractSeqRow",
"AbstractSeqs",
"AbstractSet",
"AbstractState",
"AbstractStates",
"AbstractTree",
"AbstractTrees",
"AbstractUncertainStateSet",
"Annotated",
"Base",
"CellSet",
"CharSet",
"ContinuousCells",
"ContinuousChar",
"ContinuousFormat",
"ContinuousMatrixObsRow",
"ContinuousMatrixSeqRow",
"ContinuousObs",
"ContinuousObsMatrix",
"ContinuousSeqMatrix",
"ContinuousSeqs",
"DNAChar",
"DNAFormat",
"DNAMapping",
"DNAMatrixObsRow",
"DNAMatrixSeqRow",
"DNAObs",
"DNAObsMatrix",
"DNAPolymorphicStateSet",
"DNASeqMatrix",
"DNAState",
"DNAStates",
"DNAUncertainStateSet",
"DnaCells",
"DnaSeqs",
"FloatNetwork",
"FloatTree",
"IDTagged",
"IntNetwork",
"IntTree",
"Labelled",
"LiteralMeta",
"Meta",
"NetworkFloatEdge",
"NetworkIntEdge",
"NetworkNode",
"Nexml",
"NodeAndRootEdgeAndEdgeSet",
"OptionalTaxonLinked",
"ProteinCells",
"ProteinSeqs",
"RNAChar",
"RNAFormat",
"RNAMapping",
"RNAMatrixObsRow",
"RNAMatrixSeqRow",
"RNAObs",
"RNAObsMatrix",
"RNAPolymorphicStateSet",
"RNASeqMatrix",
"RNAState",
"RNAStates",
"RNAUncertainStateSet",
"ResourceMeta",
"RestrictionCells",
"RestrictionChar",
"RestrictionFormat",
"RestrictionMatrixObsRow",
"RestrictionMatrixSeqRow",
"RestrictionObs",
"RestrictionObsMatrix",
"RestrictionSeqMatrix",
"RestrictionSeqs",
"RestrictionState",
"RestrictionStates",
"RnaCells",
"RnaSeqs",
"RowSet",
"StandardCells",
"StandardChar",
"StandardFormat",
"StandardMapping",
"StandardMatrixObsRow",
"StandardMatrixSeqRow",
"StandardObs",
"StandardObsMatrix",
"StandardPolymorphicStateSet",
"StandardSeqMatrix",
"StandardSeqs",
"StandardState",
"StandardStates",
"StandardUncertainStateSet",
"StateSet",
"Taxa",
"TaxaLinked",
"Taxon",
"TaxonLinked",
"TaxonSet",
"TreeAndNetworkSet",
"TreeFloatEdge",
"TreeFloatRootEdge",
"TreeIntEdge",
"TreeIntRootEdge",
"TreeNode",
"Trees",
"attrExtensions"
]
| gpl-3.0 |
phobson/statsmodels | statsmodels/sandbox/tsa/movstat.py | 34 | 14871 | '''using scipy signal and numpy correlate to calculate some time series
statistics
original developer notes
see also scikits.timeseries (movstat is partially inspired by it)
added 2009-08-29
timeseries moving stats are in c, autocorrelation similar to here
I thought I saw moving stats somewhere in python, maybe not)
TODO
moving statistics
- filters don't handle boundary conditions nicely (correctly ?)
e.g. minimum order filter uses 0 for out of bounds value
-> append and prepend with last resp. first value
- enhance for nd arrays, with axis = 0
Note: Equivalence for 1D signals
>>> np.all(signal.correlate(x,[1,1,1],'valid')==np.correlate(x,[1,1,1]))
True
>>> np.all(ndimage.filters.correlate(x,[1,1,1], origin = -1)[:-3+1]==np.correlate(x,[1,1,1]))
True
# multidimensional, but, it looks like it uses common filter across time series, no VAR
ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)
ndimage.filters.correlate(x,[1,1,1],origin = 1))
ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)[0]==\
ndimage.filters.correlate(x,[1,1,1],origin = 1))
True
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)[0]==ndimage.filters.correlate(x,[1,1,1],origin = 1))
update
2009-09-06: cosmetic changes, rearrangements
'''
from __future__ import print_function
import numpy as np
from scipy import signal
from numpy.testing import assert_array_equal, assert_array_almost_equal
import statsmodels.api as sm
def expandarr(x,k):
#make it work for 2D or nD with axis
kadd = k
if np.ndim(x) == 2:
kadd = (kadd, np.shape(x)[1])
return np.r_[np.ones(kadd)*x[0],x,np.ones(kadd)*x[-1]]
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : array
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order) == True: #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading')
# identity filter
##>>> signal.order_filter(x,np.ones(1),0)
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.])
# median filter
##signal.medfilt(np.sin(x), kernel_size=3)
##>>> plt.figure()
##<matplotlib.figure.Figure object at 0x069BBB50>
##>>> x=np.linspace(0,3,100);plt.plot(x,np.sin(x),x,signal.medfilt(np.sin(x), kernel_size=3))
# remove old version
##def movmeanvar(x, windowsize=3, valid='same'):
## '''
## this should also work along axis or at least for columns
## '''
## n = x.shape[0]
## x = expandarr(x, windowsize - 1)
## takeslice = slice(windowsize-1, n + windowsize-1)
## avgkern = (np.ones(windowsize)/float(windowsize))
## m = np.correlate(x, avgkern, 'same')#[takeslice]
## print(m.shape)
## print(x.shape)
## xm = x - m
## v = np.correlate(x*x, avgkern, 'same') - m**2
## v1 = np.correlate(xm*xm, avgkern, valid) #not correct for var of window
###>>> np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')-np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')**2
## return m[takeslice], v[takeslice], v1
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag)
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print(sl)
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print(xext.shape)
print(avgkern[:,None].shape)
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
#x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,[1],'full')
#x=0.5**np.arange(3);np.correlate(x,x,'same')
##>>> x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
##
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> xo
##xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> x=np.ones(10);xo=x-x.mean();a=np.correlate(xo,xo,'full')
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> d
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 9.,
## 8., 7., 6., 5., 4., 3., 2., 1.])
##def ccovf():
## pass
## #x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
__all__ = ['movorder', 'movmean', 'movvar', 'movmoment']
if __name__ == '__main__':
print('\ncheckin moving mean and variance')
nobs = 10
x = np.arange(nobs)
ws = 3
ave = np.array([ 0., 1/3., 1., 2., 3., 4., 5., 6., 7., 8.,
26/3., 9])
va = np.array([[ 0. , 0. ],
[ 0.22222222, 0.88888889],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.22222222, 0.88888889],
[ 0. , 0. ]])
ave2d = np.c_[ave, 2*ave]
print(movmean(x, windowsize=ws, lag='lagged'))
print(movvar(x, windowsize=ws, lag='lagged'))
print([np.var(x[i-ws:i]) for i in range(ws, nobs)])
m1 = movmoment(x, 1, windowsize=3, lag='lagged')
m2 = movmoment(x, 2, windowsize=3, lag='lagged')
print(m1)
print(m2)
print(m2 - m1*m1)
# this implicitly also tests moment
assert_array_almost_equal(va[ws-1:,0],
movvar(x, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,0],
movvar(x, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,0],
movvar(x, windowsize=ws, lag='lagged'))
print('\nchecking moving moment for 2d (columns only)')
x2d = np.c_[x, 2*x]
print(movmoment(x2d, 1, windowsize=3, lag='centered'))
print(movmean(x2d, windowsize=ws, lag='lagged'))
print(movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(va[ws-1:,:],
movvar(x2d, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,:],
movvar(x2d, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,:],
movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(ave2d[ws-1:],
movmoment(x2d, 1, windowsize=3, lag='leading'))
assert_array_almost_equal(ave2d[ws//2:-ws//2+1],
movmoment(x2d, 1, windowsize=3, lag='centered'))
assert_array_almost_equal(ave2d[:-ws+1],
movmean(x2d, windowsize=ws, lag='lagged'))
from scipy import ndimage
print(ndimage.filters.correlate1d(x2d, np.array([1,1,1])/3., axis=0))
#regression test check
xg = np.array([ 0. , 0.1, 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6,
4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5,
13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5,
22.5, 23.5, 24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5,
31.5, 32.5, 33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5,
40.5, 41.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5,
49.5, 50.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5,
58.5, 59.5, 60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5,
67.5, 68.5, 69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5,
76.5, 77.5, 78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5,
85.5, 86.5, 87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5,
94.5])
assert_array_almost_equal(xg, movmean(np.arange(100), 10,'lagged'))
xd = np.array([ 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6, 4.5, 5.5,
6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5,
15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5, 22.5, 23.5,
24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5, 31.5, 32.5,
33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5, 40.5, 41.5,
42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 50.5,
51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5, 67.5, 68.5,
69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5, 76.5, 77.5,
78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5, 85.5, 86.5,
87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5, 94.5, 95.4,
96.2, 96.9, 97.5, 98. , 98.4, 98.7, 98.9, 99. ])
assert_array_almost_equal(xd, movmean(np.arange(100), 10,'leading'))
xc = np.array([ 1.36363636, 1.90909091, 2.54545455, 3.27272727,
4.09090909, 5. , 6. , 7. ,
8. , 9. , 10. , 11. ,
12. , 13. , 14. , 15. ,
16. , 17. , 18. , 19. ,
20. , 21. , 22. , 23. ,
24. , 25. , 26. , 27. ,
28. , 29. , 30. , 31. ,
32. , 33. , 34. , 35. ,
36. , 37. , 38. , 39. ,
40. , 41. , 42. , 43. ,
44. , 45. , 46. , 47. ,
48. , 49. , 50. , 51. ,
52. , 53. , 54. , 55. ,
56. , 57. , 58. , 59. ,
60. , 61. , 62. , 63. ,
64. , 65. , 66. , 67. ,
68. , 69. , 70. , 71. ,
72. , 73. , 74. , 75. ,
76. , 77. , 78. , 79. ,
80. , 81. , 82. , 83. ,
84. , 85. , 86. , 87. ,
88. , 89. , 90. , 91. ,
92. , 93. , 94. , 94.90909091,
95.72727273, 96.45454545, 97.09090909, 97.63636364])
assert_array_almost_equal(xc, movmean(np.arange(100), 11,'centered'))
| bsd-3-clause |
mattmccarthy11/vidly-development | bk/mediadrop/lib/tests/players_test.py | 10 | 1427 | # -*- coding: utf-8 -*-
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code in this file is dual licensed under the MIT license or
# the GPLv3 or (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from mediadrop.lib.filetypes import VIDEO
from mediadrop.lib.players import FileSupportMixin, RTMP
from mediadrop.lib.test.pythonic_testcase import *
from mediadrop.lib.uri import StorageURI
from mediadrop.model import MediaFile
class FileSupportMixinTest(PythonicTestCase):
def test_can_play_ignores_empty_container(self):
class FakePlayer(FileSupportMixin):
supported_containers = set(['mp4'])
supported_schemes = set([RTMP])
fake_player = FakePlayer()
media_file = MediaFile()
media_file.container = ''
media_file.type = VIDEO
uri = StorageURI(media_file, 'rtmp', 'test',
server_uri='rtmp://stream.host.example/play')
assert_equals('', uri.file.container,
message='It is important that the server uri has no container.')
assert_equals((True, ), fake_player.can_play([uri]))
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FileSupportMixinTest))
return suite
| gpl-3.0 |
Wen777/beets | beets/util/functemplate.py | 16 | 19245 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module implements a string formatter based on the standard PEP
292 string.Template class extended with function calls. Variables, as
with string.Template, are indicated with $ and functions are delimited
with %.
This module assumes that everything is Unicode: the template and the
substitution values. Bytestrings are not supported. Also, the templates
always behave like the ``safe_substitute`` method in the standard
library: unknown symbols are left intact.
This is sort of like a tiny, horrible degeneration of a real templating
engine like Jinja2 or Mustache.
"""
from __future__ import print_function
import re
import ast
import dis
import types
SYMBOL_DELIM = u'$'
FUNC_DELIM = u'%'
GROUP_OPEN = u'{'
GROUP_CLOSE = u'}'
ARG_SEP = u','
ESCAPE_CHAR = u'$'
VARIABLE_PREFIX = '__var_'
FUNCTION_PREFIX = '__func_'
class Environment(object):
"""Contains the values and functions to be substituted into a
template.
"""
def __init__(self, values, functions):
self.values = values
self.functions = functions
# Code generation helpers.
def ex_lvalue(name):
"""A variable load expression."""
return ast.Name(name, ast.Store())
def ex_rvalue(name):
"""A variable store expression."""
return ast.Name(name, ast.Load())
def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given
value.
"""
if val is None:
return ast.Name('None', ast.Load())
elif isinstance(val, (int, float, long)):
return ast.Num(val)
elif isinstance(val, bool):
return ast.Name(str(val), ast.Load())
elif isinstance(val, basestring):
return ast.Str(val)
raise TypeError('no literal for {0}'.format(type(val)))
def ex_varassign(name, expr):
"""Assign an expression into a single variable. The expression may
either be an `ast.expr` object or a value to be used as a literal.
"""
if not isinstance(expr, ast.expr):
expr = ex_literal(expr)
return ast.Assign([ex_lvalue(name)], expr)
def ex_call(func, args):
"""A function-call expression with only positional parameters. The
function may be an expression or the name of a function. Each
argument may be an expression or a value to be used as a literal.
"""
if isinstance(func, basestring):
func = ex_rvalue(func)
args = list(args)
for i in range(len(args)):
if not isinstance(args[i], ast.expr):
args[i] = ex_literal(args[i])
return ast.Call(func, args, [], None, None)
def compile_func(arg_names, statements, name='_the_func', debug=False):
"""Compile a list of statements as the body of a function and return
the resulting Python function. If `debug`, then print out the
bytecode of the compiled function.
"""
func_def = ast.FunctionDef(
name,
ast.arguments(
[ast.Name(n, ast.Param()) for n in arg_names],
None, None,
[ex_literal(None) for _ in arg_names],
),
statements,
[],
)
mod = ast.Module([func_def])
ast.fix_missing_locations(mod)
prog = compile(mod, '<generated>', 'exec')
# Debug: show bytecode.
if debug:
dis.dis(prog)
for const in prog.co_consts:
if isinstance(const, types.CodeType):
dis.dis(const)
the_locals = {}
exec prog in {}, the_locals
return the_locals[name]
# AST nodes for the template language.
class Symbol(object):
"""A variable-substitution symbol in a template."""
def __init__(self, ident, original):
self.ident = ident
self.original = original
def __repr__(self):
return u'Symbol(%s)' % repr(self.ident)
def evaluate(self, env):
"""Evaluate the symbol in the environment, returning a Unicode
string.
"""
if self.ident in env.values:
# Substitute for a value.
return env.values[self.ident]
else:
# Keep original text.
return self.original
def translate(self):
"""Compile the variable lookup."""
expr = ex_rvalue(VARIABLE_PREFIX + self.ident.encode('utf8'))
return [expr], set([self.ident.encode('utf8')]), set()
class Call(object):
"""A function call in a template."""
def __init__(self, ident, args, original):
self.ident = ident
self.args = args
self.original = original
def __repr__(self):
return u'Call(%s, %s, %s)' % (repr(self.ident), repr(self.args),
repr(self.original))
def evaluate(self, env):
"""Evaluate the function call in the environment, returning a
Unicode string.
"""
if self.ident in env.functions:
arg_vals = [expr.evaluate(env) for expr in self.args]
try:
out = env.functions[self.ident](*arg_vals)
except Exception as exc:
# Function raised exception! Maybe inlining the name of
# the exception will help debug.
return u'<%s>' % unicode(exc)
return unicode(out)
else:
return self.original
def translate(self):
"""Compile the function call."""
varnames = set()
funcnames = set([self.ident.encode('utf8')])
arg_exprs = []
for arg in self.args:
subexprs, subvars, subfuncs = arg.translate()
varnames.update(subvars)
funcnames.update(subfuncs)
# Create a subexpression that joins the result components of
# the arguments.
arg_exprs.append(ex_call(
ast.Attribute(ex_literal(u''), 'join', ast.Load()),
[ex_call(
'map',
[
ex_rvalue('unicode'),
ast.List(subexprs, ast.Load()),
]
)],
))
subexpr_call = ex_call(
FUNCTION_PREFIX + self.ident.encode('utf8'),
arg_exprs
)
return [subexpr_call], varnames, funcnames
class Expression(object):
"""Top-level template construct: contains a list of text blobs,
Symbols, and Calls.
"""
def __init__(self, parts):
self.parts = parts
def __repr__(self):
return u'Expression(%s)' % (repr(self.parts))
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
a Unicode string.
"""
out = []
for part in self.parts:
if isinstance(part, basestring):
out.append(part)
else:
out.append(part.evaluate(env))
return u''.join(map(unicode, out))
def translate(self):
"""Compile the expression to a list of Python AST expressions, a
set of variable names used, and a set of function names.
"""
expressions = []
varnames = set()
funcnames = set()
for part in self.parts:
if isinstance(part, basestring):
expressions.append(ex_literal(part))
else:
e, v, f = part.translate()
expressions.extend(e)
varnames.update(v)
funcnames.update(f)
return expressions, varnames, funcnames
# Parser.
class ParseError(Exception):
pass
class Parser(object):
"""Parses a template expression string. Instantiate the class with
the template source and call ``parse_expression``. The ``pos`` field
will indicate the character after the expression finished and
``parts`` will contain a list of Unicode strings, Symbols, and Calls
reflecting the concatenated portions of the expression.
This is a terrible, ad-hoc parser implementation based on a
left-to-right scan with no lexing step to speak of; it's probably
both inefficient and incorrect. Maybe this should eventually be
replaced with a real, accepted parsing technique (PEG, parser
generator, etc.).
"""
def __init__(self, string):
self.string = string
self.pos = 0
self.parts = []
# Common parsing resources.
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
ARG_SEP, ESCAPE_CHAR)
special_char_re = re.compile(ur'[%s]|$' %
u''.join(re.escape(c) for c in special_chars))
def parse_expression(self):
"""Parse a template expression starting at ``pos``. Resulting
components (Unicode strings, Symbols, and Calls) are added to
the ``parts`` field, a list. The ``pos`` field is updated to be
the next character after the expression.
"""
text_parts = []
while self.pos < len(self.string):
char = self.string[self.pos]
if char not in self.special_chars:
# A non-special character. Skip to the next special
# character, treating the interstice as literal text.
next_pos = (
self.special_char_re.search(self.string[self.pos:]).start()
+ self.pos
)
text_parts.append(self.string[self.pos:next_pos])
self.pos = next_pos
continue
if self.pos == len(self.string) - 1:
# The last character can never begin a structure, so we
# just interpret it as a literal character (unless it
# terminates the expression, as with , and }).
if char not in (GROUP_CLOSE, ARG_SEP):
text_parts.append(char)
self.pos += 1
break
next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in \
(SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP):
# An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just
# using { suffices in all cases).
text_parts.append(next_char)
self.pos += 2 # Skip the next character.
continue
# Shift all characters collected so far into a single string.
if text_parts:
self.parts.append(u''.join(text_parts))
text_parts = []
if char == SYMBOL_DELIM:
# Parse a symbol.
self.parse_symbol()
elif char == FUNC_DELIM:
# Parse a function call.
self.parse_call()
elif char in (GROUP_CLOSE, ARG_SEP):
# Template terminated.
break
elif char == GROUP_OPEN:
# Start of a group has no meaning hear; just pass
# through the character.
text_parts.append(char)
self.pos += 1
else:
assert False
# If any parsed characters remain, shift them into a string.
if text_parts:
self.parts.append(u''.join(text_parts))
def parse_symbol(self):
"""Parse a variable reference (like ``$foo`` or ``${foo}``)
starting at ``pos``. Possibly appends a Symbol object (or,
failing that, text) to the ``parts`` field and updates ``pos``.
The character at ``pos`` must, as a precondition, be ``$``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == SYMBOL_DELIM
if self.pos == len(self.string) - 1:
# Last character.
self.parts.append(SYMBOL_DELIM)
self.pos += 1
return
next_char = self.string[self.pos + 1]
start_pos = self.pos
self.pos += 1
if next_char == GROUP_OPEN:
# A symbol like ${this}.
self.pos += 1 # Skip opening.
closer = self.string.find(GROUP_CLOSE, self.pos)
if closer == -1 or closer == self.pos:
# No closing brace found or identifier is empty.
self.parts.append(self.string[start_pos:self.pos])
else:
# Closer found.
ident = self.string[self.pos:closer]
self.pos = closer + 1
self.parts.append(Symbol(ident,
self.string[start_pos:self.pos]))
else:
# A bare-word symbol.
ident = self._parse_ident()
if ident:
# Found a real symbol.
self.parts.append(Symbol(ident,
self.string[start_pos:self.pos]))
else:
# A standalone $.
self.parts.append(SYMBOL_DELIM)
def parse_call(self):
"""Parse a function call (like ``%foo{bar,baz}``) starting at
``pos``. Possibly appends a Call object to ``parts`` and update
``pos``. The character at ``pos`` must be ``%``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == FUNC_DELIM
start_pos = self.pos
self.pos += 1
ident = self._parse_ident()
if not ident:
# No function name.
self.parts.append(FUNC_DELIM)
return
if self.pos >= len(self.string):
# Identifier terminates string.
self.parts.append(self.string[start_pos:self.pos])
return
if self.string[self.pos] != GROUP_OPEN:
# Argument list not opened.
self.parts.append(self.string[start_pos:self.pos])
return
# Skip past opening brace and try to parse an argument list.
self.pos += 1
args = self.parse_argument_list()
if self.pos >= len(self.string) or \
self.string[self.pos] != GROUP_CLOSE:
# Arguments unclosed.
self.parts.append(self.string[start_pos:self.pos])
return
self.pos += 1 # Move past closing brace.
self.parts.append(Call(ident, args, self.string[start_pos:self.pos]))
def parse_argument_list(self):
"""Parse a list of arguments starting at ``pos``, returning a
list of Expression objects. Does not modify ``parts``. Should
leave ``pos`` pointing to a } character or the end of the
string.
"""
# Try to parse a subexpression in a subparser.
expressions = []
while self.pos < len(self.string):
subparser = Parser(self.string[self.pos:])
subparser.parse_expression()
# Extract and advance past the parsed expression.
expressions.append(Expression(subparser.parts))
self.pos += subparser.pos
if self.pos >= len(self.string) or \
self.string[self.pos] == GROUP_CLOSE:
# Argument list terminated by EOF or closing brace.
break
# Only other way to terminate an expression is with ,.
# Continue to the next argument.
assert self.string[self.pos] == ARG_SEP
self.pos += 1
return expressions
def _parse_ident(self):
"""Parse an identifier and return it (possibly an empty string).
Updates ``pos``.
"""
remainder = self.string[self.pos:]
ident = re.match(ur'\w*', remainder).group(0)
self.pos += len(ident)
return ident
def _parse(template):
"""Parse a top-level template string Expression. Any extraneous text
is considered literal text.
"""
parser = Parser(template)
parser.parse_expression()
parts = parser.parts
remainder = parser.string[parser.pos:]
if remainder:
parts.append(remainder)
return Expression(parts)
# External interface.
class Template(object):
"""A string template, including text, Symbols, and Calls.
"""
def __init__(self, template):
self.expr = _parse(template)
self.original = template
self.compiled = self.translate()
def __eq__(self, other):
return self.original == other.original
def interpret(self, values={}, functions={}):
"""Like `substitute`, but forces the interpreter (rather than
the compiled version) to be used. The interpreter includes
exception-handling code for missing variables and buggy template
functions but is much slower.
"""
return self.expr.evaluate(Environment(values, functions))
def substitute(self, values={}, functions={}):
"""Evaluate the template given the values and functions.
"""
try:
res = self.compiled(values, functions)
except: # Handle any exceptions thrown by compiled version.
res = self.interpret(values, functions)
return res
def translate(self):
"""Compile the template to a Python function."""
expressions, varnames, funcnames = self.expr.translate()
argnames = []
for varname in varnames:
argnames.append(VARIABLE_PREFIX.encode('utf8') + varname)
for funcname in funcnames:
argnames.append(FUNCTION_PREFIX.encode('utf8') + funcname)
func = compile_func(
argnames,
[ast.Return(ast.List(expressions, ast.Load()))],
)
def wrapper_func(values={}, functions={}):
args = {}
for varname in varnames:
args[VARIABLE_PREFIX + varname] = values[varname]
for funcname in funcnames:
args[FUNCTION_PREFIX + funcname] = functions[funcname]
parts = func(**args)
return u''.join(parts)
return wrapper_func
# Performance tests.
if __name__ == '__main__':
import timeit
_tmpl = Template(u'foo $bar %baz{foozle $bar barzle} $bar')
_vars = {'bar': 'qux'}
_funcs = {'baz': unicode.upper}
interp_time = timeit.timeit('_tmpl.interpret(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs',
number=10000)
print(interp_time)
comp_time = timeit.timeit('_tmpl.substitute(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs',
number=10000)
print(comp_time)
print('Speedup:', interp_time / comp_time)
| mit |
MariaSolovyeva/inasafe | safe/impact_functions/volcanic/volcano_point_building/impact_function.py | 2 | 8807 | # coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Volcano Point on Building
Impact Function.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from collections import OrderedDict
from safe.impact_functions.bases.classified_vh_classified_ve import \
ClassifiedVHClassifiedVE
from safe.impact_functions.volcanic.volcano_point_building\
.metadata_definitions import VolcanoPointBuildingFunctionMetadata
from safe.storage.vector import Vector
from safe.utilities.i18n import tr
from safe.engine.core import buffer_points
from safe.common.utilities import (
get_thousand_separator,
get_non_conflicting_attribute_name,
get_osm_building_usage)
from safe.engine.interpolation import (
assign_hazard_values_to_exposure_data)
from safe.impact_reports.building_exposure_report_mixin import (
BuildingExposureReportMixin)
from safe.common.exceptions import KeywordNotFoundError
class VolcanoPointBuildingFunction(
ClassifiedVHClassifiedVE,
BuildingExposureReportMixin):
"""Impact Function for Volcano Point on Building."""
_metadata = VolcanoPointBuildingFunctionMetadata()
def __init__(self):
super(VolcanoPointBuildingFunction, self).__init__()
self.volcano_names = tr('Not specified in data')
def notes(self):
"""Return the notes section of the report.
:return: The notes that should be attached to this impact report.
:rtype: list
"""
volcano_names = self.volcano_names
return [
{
'content': tr('Notes'),
'header': True
},
{
'content': tr(
'Map shows buildings affected in each of the '
'volcano buffered zones.')
},
{
'content': tr(
'Only buildings available in OpenStreetMap '
'are considered.')
},
{
'content': tr('Volcanoes considered: %s.') % volcano_names,
'header': True
}
]
def run(self):
"""Counts number of building exposed to each volcano hazard zones.
:returns: Map of building exposed to volcanic hazard zones.
Table with number of buildings affected
:rtype: dict
"""
self.validate()
self.prepare()
# Hazard Zone Attribute
hazard_zone_attribute = 'radius'
# Parameters
radii = self.parameters['distances'].value
# Get parameters from layer's keywords
volcano_name_attribute = self.hazard.keyword('volcano_name_field')
# Try to get the value from keyword, if not exist, it will not fail,
# but use the old get_osm_building_usage
try:
self.exposure_class_attribute = self.exposure.keyword(
'structure_class_field')
except KeywordNotFoundError:
self.exposure_class_attribute = None
# Input checks
if not self.hazard.layer.is_point_data:
message = (
'Input hazard must be a vector point layer. I got %s '
'with layer type %s' % (
self.hazard.name, self.hazard.layer.get_geometry_name()))
raise Exception(message)
# Make hazard layer by buffering the point
centers = self.hazard.layer.get_geometry()
features = self.hazard.layer.get_data()
radii_meter = [x * 1000 for x in radii] # Convert to meters
hazard_layer = buffer_points(
centers,
radii_meter,
hazard_zone_attribute,
data_table=features)
# Category names for the impact zone
category_names = radii_meter
category_names.append(self._not_affected_value)
# Get names of volcanoes considered
if volcano_name_attribute in hazard_layer.get_attribute_names():
volcano_name_list = set()
for row in hazard_layer.get_data():
# Run through all polygons and get unique names
volcano_name_list.add(row[volcano_name_attribute])
self.volcano_names = ', '.join(volcano_name_list)
# Find the target field name that has no conflict with the attribute
# names in the hazard layer
hazard_attribute_names = hazard_layer.get_attribute_names()
target_field = get_non_conflicting_attribute_name(
self.target_field, hazard_attribute_names)
# Run interpolation function for polygon2polygon
interpolated_layer = assign_hazard_values_to_exposure_data(
hazard_layer, self.exposure.layer)
# Extract relevant interpolated layer data
attribute_names = interpolated_layer.get_attribute_names()
features = interpolated_layer.get_data()
self.buildings = {}
self.affected_buildings = OrderedDict()
for category in radii_meter:
self.affected_buildings[category] = {}
# Iterate the interpolated building layer
for i in range(len(features)):
hazard_value = features[i][hazard_zone_attribute]
if not hazard_value:
hazard_value = self._not_affected_value
features[i][target_field] = hazard_value
# Count affected buildings by usage type if available
if (self.exposure_class_attribute and
self.exposure_class_attribute in attribute_names):
usage = features[i][self.exposure_class_attribute]
else:
usage = get_osm_building_usage(attribute_names, features[i])
if usage is [None, 'NULL', 'null', 'Null', 0]:
usage = tr('Unknown')
if usage not in self.buildings:
self.buildings[usage] = 0
for category in self.affected_buildings.keys():
self.affected_buildings[category][
usage] = OrderedDict([
(tr('Buildings Affected'), 0)])
self.buildings[usage] += 1
if hazard_value in self.affected_buildings.keys():
self.affected_buildings[hazard_value][usage][
tr('Buildings Affected')] += 1
# Lump small entries and 'unknown' into 'other' category
self._consolidate_to_other()
# Generate simple impact report
impact_summary = impact_table = self.generate_html_report()
# Create style
colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
colours = colours[::-1] # flip
colours = colours[:len(category_names)]
style_classes = []
i = 0
for category_name in category_names:
style_class = dict()
style_class['label'] = tr(category_name)
style_class['transparency'] = 0
style_class['value'] = category_name
style_class['size'] = 1
if i >= len(category_names):
i = len(category_names) - 1
style_class['colour'] = colours[i]
i += 1
style_classes.append(style_class)
# Override style info with new classes and name
style_info = dict(target_field=target_field,
style_classes=style_classes,
style_type='categorizedSymbol')
# For printing map purpose
map_title = tr('Buildings affected by volcanic buffered point')
legend_notes = tr('Thousand separator is represented by %s' %
get_thousand_separator())
legend_units = tr('(building)')
legend_title = tr('Building count')
# Create vector layer and return
impact_layer = Vector(
data=features,
projection=interpolated_layer.get_projection(),
geometry=interpolated_layer.get_geometry(),
name=tr('Buildings affected by volcanic buffered point'),
keywords={'impact_summary': impact_summary,
'impact_table': impact_table,
'target_field': target_field,
'map_title': map_title,
'legend_notes': legend_notes,
'legend_units': legend_units,
'legend_title': legend_title},
style_info=style_info)
self._impact = impact_layer
return impact_layer
| gpl-3.0 |
somehume/wavefu | src/python/api/django_oauth.py | 9 | 4076 | #!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filter and decorator to wave enable django sites.
If you want to require wave authentication for every handler, just add
WaveOAuthMiddleware to your middleware. If you only want to require
authentication for specific handlers, decorate those with @waveoauth.
In any wave authenticated handler the request object should have a
waveservice field that can be used to talk to wave.
You can specify the following in your settings:
WAVE_CONSUMER_KEY: the consumer key passed to the waveservice. defaults to
anonymous if not set.
WAVE_CONSUMER_SECRET: the consumer key passed to the waveservice. defaults to
anonymous if not set.
WAVE_USE_SANDBOX: whether to use the sandbox for this app. Defaults to false.
"""
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate
import base64
import logging
from functools import wraps
import waveservice
class WaveOAuthMiddleware(object):
"""Wave middleware to authenticate all requests at this site."""
def process_request(self, request):
return _oauth_helper(request)
def waveoauth(func):
"""Decorator used to specify that a handler requires wave authentication."""
@wraps(func)
def inner(request, *args, **kwargs):
result = _oauth_helper(request)
if result is not None:
return result
return func(request, *args, **kwargs)
return inner
def _oauth_helper(request):
"Check if we're authenticated and if not, execute the oauth dance."
consumer_key = getattr(settings, 'WAVE_CONSUMER_KEY', 'anonymous')
consumer_secret = getattr(settings, 'WAVE_CONSUMER_SECRET', 'anonymous')
use_sandbox = getattr(settings, 'WAVE_USE_SANDBOX', False)
service = waveservice.WaveService(
consumer_key=consumer_key, consumer_secret=consumer_secret, use_sandbox=use_sandbox)
access_token = request.COOKIES.get('WAVE_ACCESS_TOKEN')
if access_token:
service.set_access_token(access_token)
request.waveservice = service
return None
# no access token. dance monkey dance.
oauth_token = request.GET.get('oauth_token')
verifier = request.GET.get('oauth_verifier')
request_token = request.COOKIES.get('WAVE_REQUEST_TOKEN')
meta = request.META
# you'd think there would be something better than this madness:
this_url = meta.get('HTTP_HOST')
if not this_url:
this_url = meta.get('SERVER_NAME')
port = meta.get('SEVER_PORT')
if port:
this_url += ':' + port
this_url += request.path
schema = meta.get('wsgi.url_scheme', 'http')
this_url = schema + '://' + this_url
if not oauth_token or not verifier or not request_token:
# we're here not returning from a callback. Start.
request_token = service.fetch_request_token(callback=this_url)
auth_url = service.generate_authorization_url()
response = HttpResponseRedirect(auth_url)
# set a session cookie
response.set_cookie('WAVE_REQUEST_TOKEN', request_token.to_string())
return response
else:
logging.info('upgrading to access token')
access_token = service.upgrade_to_access_token(request_token=request_token,
verifier=verifier)
# This redirect could be avoided if the caller would set the cookie. This way
# however we keep the cgi arguments clean.
response = HttpResponseRedirect(this_url)
response.set_cookie('WAVE_ACCESS_TOKEN', access_token.to_string(), max_age=24*3600*365)
return response
| apache-2.0 |
GeyerA/android_external_chromium_org | build/extract_from_cab.py | 51 | 2058 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts a single file from a CAB archive."""
import os
import shutil
import subprocess
import sys
import tempfile
def run_quiet(*args):
"""Run 'expand' supressing noisy output. Returns returncode from process."""
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = popen.communicate()
if popen.returncode:
# expand emits errors to stdout, so if we fail, then print that out.
print out
return popen.returncode
def main():
if len(sys.argv) != 4:
print 'Usage: extract_from_cab.py cab_path archived_file output_dir'
return 1
[cab_path, archived_file, output_dir] = sys.argv[1:]
# Expand.exe does its work in a fixed-named temporary directory created within
# the given output directory. This is a problem for concurrent extractions, so
# create a unique temp dir within the desired output directory to work around
# this limitation.
temp_dir = tempfile.mkdtemp(dir=output_dir)
try:
# Invoke the Windows expand utility to extract the file.
level = run_quiet('expand', cab_path, '-F:' + archived_file, temp_dir)
if level == 0:
# Move the output file into place, preserving expand.exe's behavior of
# paving over any preexisting file.
output_file = os.path.join(output_dir, archived_file)
try:
os.remove(output_file)
except OSError:
pass
os.rename(os.path.join(temp_dir, archived_file), output_file)
finally:
shutil.rmtree(temp_dir, True)
if level != 0:
return level
# The expand utility preserves the modification date and time of the archived
# file. Touch the extracted file. This helps build systems that compare the
# modification times of input and output files to determine whether to do an
# action.
os.utime(os.path.join(output_dir, archived_file), None)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Johnetordoff/osf.io | api_tests/providers/preprints/views/test_preprint_provider_list.py | 13 | 3037 | import mock
import pytest
from waffle.testutils import override_switch
from osf import features
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
PreprintProviderFactory,
)
@pytest.fixture(params=['/{}preprint_providers/?version=2.2&', '/{}providers/preprints/?version=2.2&'])
def url(request):
url = (request.param)
return url.format(API_BASE)
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def provider_one():
return PreprintProviderFactory(_id='sock', name='Sockarxiv')
@pytest.fixture()
def provider_two():
provider = PreprintProviderFactory(name='Spotarxiv')
provider.allow_submissions = False
provider.domain = 'https://www.spotarxiv.com'
provider.description = 'spots not dots'
provider.domain_redirect_enabled = True
provider._id = 'spot'
provider.share_publish_type = 'Thesis'
provider.save()
return provider
@pytest.mark.django_db
class TestPreprintProviderList:
def test_preprint_provider_list(
self, app, url, user, provider_one, provider_two):
# Test length and not auth
res = app.get(url)
assert res.status_code == 200
assert len(res.json['data']) == 2
# Test length and auth
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
@pytest.mark.parametrize('filter_type,filter_value', [
('allow_submissions', True),
('description', 'spots%20not%20dots'),
('domain', 'https://www.spotarxiv.com'),
('domain_redirect_enabled', True),
('id', 'spot'),
('name', 'Spotarxiv'),
('share_publish_type', 'Thesis'),
])
def test_preprint_provider_list_filtering(
self, filter_type, filter_value, app, url,
provider_one, provider_two):
res = app.get('{}filter[{}]={}'.format(
url, filter_type, filter_value))
assert res.status_code == 200
assert len(res.json['data']) == 1
@pytest.mark.django_db
class TestPreprintProviderListWithMetrics:
# enable the ELASTICSEARCH_METRICS switch for all tests
@pytest.fixture(autouse=True)
def enable_elasticsearch_metrics(self):
with override_switch(features.ELASTICSEARCH_METRICS, active=True):
yield
def test_preprint_provider_list_with_metrics(self, app, url, provider_one, provider_two):
provider_one.downloads = 41
provider_two.downloads = 42
with mock.patch('api.preprints.views.PreprintDownload.get_top_by_count') as mock_get_top_by_count:
mock_get_top_by_count.return_value = [provider_one, provider_two]
res = app.get(url + 'metrics[downloads]=total')
assert res.status_code == 200
provider_2_data = res.json['data'][0]
provider_2_data['meta']['metrics']['downloads'] == 42
provider_1_data = res.json['data'][1]
provider_1_data['meta']['metrics']['downloads'] == 41
| apache-2.0 |
wireservice/agate | tests/test_py3.py | 1 | 7971 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import six
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
from agate import csv_py3
from agate.exceptions import FieldSizeLimitError
@unittest.skipIf(six.PY2, "Not supported in Python 2.")
class TestReader(unittest.TestCase):
def setUp(self):
self.rows = [
['number', 'text', 'boolean', 'date', 'datetime', 'timedelta'],
['1', 'a', 'True', '2015-11-04', '2015-11-04T12:22:00', '0:04:15'],
['2', '👍', 'False', '2015-11-05', '2015-11-04T12:45:00', '0:06:18'],
['', 'b', '', '', '', '']
]
def test_utf8(self):
with open('examples/test.csv', encoding='utf-8') as f:
rows = list(csv_py3.Reader(f))
for a, b in zip(self.rows, rows):
self.assertEqual(a, b)
def test_reader_alias(self):
with open('examples/test.csv', encoding='utf-8') as f:
rows = list(csv_py3.reader(f))
for a, b in zip(self.rows, rows):
self.assertEqual(a, b)
def test_properties(self):
with open('examples/test.csv', encoding='utf-8') as f:
reader = csv_py3.Reader(f)
self.assertEqual(reader.dialect.delimiter, ',')
self.assertEqual(reader.line_num, 0)
next(reader)
self.assertEqual(reader.line_num, 1)
def test_line_numbers(self):
with open('examples/test.csv', encoding='utf-8') as f:
rows = list(csv_py3.Reader(f, line_numbers=True))
sample_rows = [
['line_numbers', 'number', 'text', 'boolean', 'date', 'datetime', 'timedelta'],
['1', '1', 'a', 'True', '2015-11-04', '2015-11-04T12:22:00', '0:04:15'],
['2', '2', u'👍', 'False', '2015-11-05', '2015-11-04T12:45:00', '0:06:18'],
['3', '', 'b', '', '', '', '']
]
for a, b in zip(sample_rows, rows):
self.assertEqual(a, b)
@unittest.skipIf(six.PY2, "Not supported in Python 2.")
class TestFieldSizeLimit(unittest.TestCase):
def setUp(self):
self.lim = csv.field_size_limit()
with open('.test.csv', 'w', encoding='utf-8') as f:
f.write('a' * 10)
def tearDown(self):
# Resetting limit to avoid failure in other tests.
csv.field_size_limit(self.lim)
os.remove('.test.csv')
def test_field_size_limit(self):
# Testing field_size_limit for failure. Creating data using str * int.
with open('.test.csv', 'r', encoding='utf-8') as f:
c = csv_py3.Reader(f, field_size_limit=9)
try:
c.__next__()
except FieldSizeLimitError:
pass
else:
raise AssertionError('Expected FieldSizeLimitError')
# Now testing higher field_size_limit.
with open('.test.csv', 'r', encoding='utf-8') as f:
c = csv_py3.Reader(f, field_size_limit=11)
self.assertEqual(['a' * 10], c.__next__())
@unittest.skipIf(six.PY2, "Not supported in Python 2.")
class TestWriter(unittest.TestCase):
def test_utf8(self):
output = six.StringIO()
writer = csv_py3.Writer(output)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = six.StringIO(output.getvalue())
reader = csv_py3.Reader(written)
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'ʤ'])
def test_writer_alias(self):
output = six.StringIO()
writer = csv_py3.writer(output)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = six.StringIO(output.getvalue())
reader = csv_py3.reader(written)
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'ʤ'])
def test_line_numbers(self):
output = six.StringIO()
writer = csv_py3.Writer(output, line_numbers=True)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = six.StringIO(output.getvalue())
reader = csv_py3.Reader(written)
self.assertEqual(next(reader), ['line_number', 'a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '1', '2', '3'])
self.assertEqual(next(reader), ['2', '4', '5', u'ʤ'])
def test_writerows(self):
output = six.StringIO()
writer = csv_py3.Writer(output)
writer.writerows([
['a', 'b', 'c'],
['1', '2', '3'],
['4', '5', u'ʤ']
])
written = six.StringIO(output.getvalue())
reader = csv_py3.Reader(written)
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'ʤ'])
@unittest.skipIf(six.PY2, "Not supported in Python 2.")
class TestDictReader(unittest.TestCase):
def setUp(self):
self.rows = [
['number', 'text', 'boolean', 'date', 'datetime', 'timedelta'],
['1', 'a', 'True', '2015-11-04', '2015-11-04T12:22:00', '0:04:15'],
['2', '👍', 'False', '2015-11-05', '2015-11-04T12:45:00', '0:06:18'],
['', 'b', '', '', '', '']
]
self.f = open('examples/test.csv', encoding='utf-8')
def tearDown(self):
self.f.close()
def test_reader(self):
reader = csv_py3.DictReader(self.f)
self.assertEqual(next(reader), dict(zip(self.rows[0], self.rows[1])))
def test_reader_alias(self):
reader = csv_py3.DictReader(self.f)
self.assertEqual(next(reader), dict(zip(self.rows[0], self.rows[1])))
@unittest.skipIf(six.PY2, "Not supported in Python 2.")
class TestDictWriter(unittest.TestCase):
def setUp(self):
self.output = six.StringIO()
def tearDown(self):
self.output.close()
def test_writer(self):
writer = csv_py3.DictWriter(self.output, ['a', 'b', 'c'])
writer.writeheader()
writer.writerow({
u'a': u'1',
u'b': u'2',
u'c': u'☃'
})
result = self.output.getvalue()
self.assertEqual(result, 'a,b,c\n1,2,☃\n')
def test_writer_alias(self):
writer = csv_py3.DictWriter(self.output, ['a', 'b', 'c'])
writer.writeheader()
writer.writerow({
u'a': u'1',
u'b': u'2',
u'c': u'☃'
})
result = self.output.getvalue()
self.assertEqual(result, 'a,b,c\n1,2,☃\n')
def test_line_numbers(self):
writer = csv_py3.DictWriter(self.output, ['a', 'b', 'c'], line_numbers=True)
writer.writeheader()
writer.writerow({
u'a': u'1',
u'b': u'2',
u'c': u'☃'
})
result = self.output.getvalue()
self.assertEqual(result, 'line_number,a,b,c\n1,1,2,☃\n')
def test_writerows(self):
writer = csv_py3.DictWriter(self.output, ['a', 'b', 'c'], line_numbers=True)
writer.writeheader()
writer.writerows([{
u'a': u'1',
u'b': u'2',
u'c': u'☃'
}])
result = self.output.getvalue()
self.assertEqual(result, 'line_number,a,b,c\n1,1,2,☃\n')
@unittest.skipIf(six.PY2, "Not supported in Python 2.")
class TestSniffer(unittest.TestCase):
def setUp(self):
pass
def test_sniffer(self):
with open('examples/test.csv', encoding='utf-8') as f:
contents = f.read()
self.assertEqual(csv_py3.Sniffer().sniff(contents).__dict__, csv.Sniffer().sniff(contents).__dict__)
| mit |
otsaloma/pan-bikes | providers/test/test_hsl.py | 1 | 1746 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import pan.test
class TestModule(pan.test.TestCase):
def setup_method(self, method):
self.provider = pan.Provider("hsl")
self.month = datetime.datetime.today().month
def test_list_networks(self):
if not 5 <= self.month <= 10: return
networks = self.provider.list_networks()
networks = list(map(pan.AttrDict, networks))
assert networks
for network in networks:
assert network.city
assert network.country
assert network.id
assert network.x
assert network.y
def test_list_stations(self):
if not 5 <= self.month <= 10: return
stations = self.provider.list_stations("hsl")
stations = list(map(pan.AttrDict, stations))
assert stations
for station in stations:
assert station.empty_slots >= 0
assert station.free_bikes >= 0
assert station.id
assert station.name
assert station.x
assert station.y
| gpl-3.0 |
ecosoft-odoo/odoo | openerp/report/render/rml.py | 457 | 3244 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
import rml2pdf
import rml2html as htmlizer
import rml2txt as txtizer
import odt2odt as odt
import html2html as html
import makohtml2html as makohtml
class rml(render.render):
def __init__(self, rml, localcontext = None, datas=None, path='.', title=None):
render.render.__init__(self, datas, path)
self.localcontext = localcontext
self.rml = rml
self.output_type = 'pdf'
self.title=title
def _render(self):
return rml2pdf.parseNode(self.rml, self.localcontext, images=self.bin_datas, path=self.path,title=self.title)
class rml2html(render.render):
def __init__(self, rml,localcontext = None, datas=None):
super(rml2html, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return htmlizer.parseString(self.rml,self.localcontext)
class rml2txt(render.render):
def __init__(self, rml, localcontext= None, datas=None):
super(rml2txt, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'txt'
def _render(self):
return txtizer.parseString(self.rml, self.localcontext)
class odt2odt(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'odt'
def _render(self):
return odt.parseNode(self.rml_dom,self.localcontext)
class html2html(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return html.parseString(self.rml_dom,self.localcontext)
class makohtml2html(render.render):
def __init__(self, html, localcontext = None):
render.render.__init__(self)
self.html = html
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return makohtml.parseNode(self.html,self.localcontext)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sarakha63/persomov | libs/git/repository.py | 109 | 20401 | # Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import Sequence
import re
import os
import subprocess
import sys
from . import branch
from . import tag
from . import commit
from . import config
from .files import ModifiedFile
from . import ref
from . import ref_container
from . import remotes
from .utils import quote_for_shell
from .utils import CommandString as CMD
#exceptions
from .exceptions import CannotFindRepository
from .exceptions import GitException
from .exceptions import GitCommandFailedException
from .exceptions import MergeConflict
from .exceptions import NonexistentRefException
BRANCH_ALIAS_MARKER = ' -> '
class Repository(ref_container.RefContainer):
_git_command = None
def setCommand(self, command):
self._git_command = command
############################# internal methods #############################
_loggingEnabled = False
def _getWorkingDirectory(self):
return '.'
def _logGitCommand(self, command, cwd):
if self._loggingEnabled:
print >> sys.stderr, ">>", command
def enableLogging(self):
self._loggingEnabled = True
def disableLogging(self):
self._loggingEnabled = False
def _executeGitCommand(self, command, cwd = None):
if cwd is None:
cwd = self._getWorkingDirectory()
command = '%s %s' % (self._git_command, str(command))
self._logGitCommand(command, cwd)
returned = subprocess.Popen(command,
shell = True,
cwd = cwd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
returned.wait()
return returned
def _executeGitCommandAssertSuccess(self, command, **kwargs):
returned = self._executeGitCommand(command, **kwargs)
assert returned.returncode is not None
if returned.returncode != 0:
raise GitCommandFailedException(kwargs.get('cwd', self._getWorkingDirectory()), command, returned)
return returned
def _getOutputAssertSuccess(self, command, **kwargs):
return self._executeGitCommandAssertSuccess(command, **kwargs).stdout.read()
def _getMergeBase(self, a, b):
raise NotImplementedError()
def getMergeBase(self, a, b):
repo = self
if isinstance(b, commit.Commit) and isinstance(b.repo, LocalRepository):
repo = b.repo
elif isinstance(a, commit.Commit) and isinstance(a.repo, LocalRepository):
repo = a.repo
return repo._getMergeBase(a, b)
############################## remote repositories #############################
class RemoteRepository(Repository):
def __init__(self, url, command = 'git'):
self.setCommand(command)
super(RemoteRepository, self).__init__()
self.url = url
def _getRefs(self, prefix = ''):
output = self._executeGitCommandAssertSuccess("ls-remote %s" % (self.url,))
for output_line in output.stdout:
commit, refname = output_line.split()
if refname.startswith(prefix):
yield refname[len(prefix):], commit.strip()
def _getRefsAsClass(self, prefix, cls):
return [cls(self, ref) for ref, _ in self._getRefs(prefix)]
def _getCommitByRefName(self, refname):
sha_by_ref = dict(self._getRefs())
for prefix in 'refs/tags/', 'refs/heads/':
sha = sha_by_ref.get(prefix + refname, None)
if sha is not None:
return commit.Commit(self, sha)
raise NonexistentRefException("Cannot find ref name %r in %s" % (refname, self))
def getBranches(self):
return self._getRefsAsClass('refs/heads/', branch.RemoteBranch)
def getTags(self):
return self._getRefsAsClass('refs/tags/', tag.RemoteTag)
############################## local repositories ##############################
class LocalRepository(Repository):
def __init__(self, path, command = 'git'):
self.setCommand(command)
super(LocalRepository, self).__init__()
self.path = path
self.config = config.GitConfiguration(self)
self._version = None
def __repr__(self):
return "<Git Repository at %s>" % (self.path,)
def _getWorkingDirectory(self):
return self.path
def _getCommitByHash(self, sha):
return commit.Commit(self, sha)
def _getCommitByRefName(self, name):
return commit.Commit(self, self._getOutputAssertSuccess("rev-parse %s" % name).strip())
def _getCommitByPartialHash(self, sha):
return self._getCommitByRefName(sha)
def getGitVersion(self):
if self._version is None:
version_output = self._getOutputAssertSuccess("version")
version_match = re.match(r"git\s+version\s+(\S+)[\s\(]?", version_output, re.I)
if version_match is None:
raise GitException("Cannot extract git version (unfamiliar output format %r?)" % version_output)
self._version = version_match.group(1)
return self._version
########################### Initializing a repository ##########################
def init(self, bare = False):
if not os.path.exists(self.path):
os.mkdir(self.path)
if not os.path.isdir(self.path):
raise GitException("Cannot create repository in %s - "
"not a directory" % self.path)
self._executeGitCommandAssertSuccess("init %s" % ("--bare" if bare else ""))
def _asURL(self, repo):
if isinstance(repo, LocalRepository):
repo = repo.path
elif isinstance(repo, RemoteRepository):
repo = repo.url
elif not isinstance(repo, basestring):
raise TypeError("Cannot clone from %r" % (repo,))
return repo
def clone(self, repo):
self._executeGitCommandAssertSuccess("clone %s %s" % (self._asURL(repo), self.path), cwd = ".")
########################### Querying repository refs ###########################
def getBranches(self):
returned = []
for git_branch_line in self._executeGitCommandAssertSuccess("branch").stdout:
if git_branch_line.startswith("*"):
git_branch_line = git_branch_line[1:]
git_branch_line = git_branch_line.strip()
if BRANCH_ALIAS_MARKER in git_branch_line:
alias_name, aliased = git_branch_line.split(BRANCH_ALIAS_MARKER)
returned.append(branch.LocalBranchAlias(self, alias_name, aliased))
else:
returned.append(branch.LocalBranch(self, git_branch_line))
return returned
def getTags(self):
returned = []
for git_tag_line in self._executeGitCommandAssertSuccess("tag").stdout:
returned.append(tag.LocalTag(self, git_tag_line.strip()))
return returned
def _getCommits(self, specs, includeMerges):
command = "log --pretty=format:%%H %s" % specs
if not includeMerges:
command += " --no-merges"
for c in self._executeGitCommandAssertSuccess(command).stdout:
yield commit.Commit(self, c.strip())
def getCommits(self, start = None, end = "HEAD", includeMerges = True):
spec = self._normalizeRefName(start or "")
spec += ".."
spec += self._normalizeRefName(end)
return list(self._getCommits(spec, includeMerges = includeMerges))
def getCurrentBranch(self):
#todo: improve this method of obtaining current branch
for branch_name in self._executeGitCommandAssertSuccess("branch").stdout:
branch_name = branch_name.strip()
if not branch_name.startswith("*"):
continue
branch_name = branch_name[1:].strip()
if branch_name == '(no branch)':
return None
return self.getBranchByName(branch_name)
def getRemotes(self):
config_dict = self.config.getDict()
returned = []
for line in self._getOutputAssertSuccess("remote show -n").splitlines():
line = line.strip()
returned.append(remotes.Remote(self, line, config_dict.get('remote.%s.url' % line.strip())))
return returned
def getRemoteByName(self, name):
return self._getByName(self.getRemotes, name)
def _getMergeBase(self, a, b):
if isinstance(a, ref.Ref):
a = a.getHead()
if isinstance(b, ref.Ref):
b = b.getHead()
returned = self._executeGitCommand("merge-base %s %s" % (a, b))
if returned.returncode == 0:
return commit.Commit(self, returned.stdout.read().strip())
# make sure this is not a misc. error with git
unused = self.getHead()
return None
################################ Querying Status ###############################
def containsCommit(self, commit):
try:
self._executeGitCommandAssertSuccess("log -1 %s" % (commit,))
except GitException:
return False
return True
def getHead(self):
return self._getCommitByRefName("HEAD")
def _getFiles(self, *flags):
flags = ["--exclude-standard"] + list(flags)
return [f.strip()
for f in self._getOutputAssertSuccess("ls-files %s" % (" ".join(flags))).splitlines()]
def _getRawDiff(self, *flags, **options):
match_statuses = options.pop('fileStatuses', None)
if match_statuses is not None and not isinstance(match_statuses, Sequence):
raise ValueError("matchedStatuses must be a sequence")
if options:
raise TypeError("Unknown arguments specified: %s" % ", ".join(options))
flags = " ".join(str(f) for f in flags)
modified_files = []
for line in self._getOutputAssertSuccess("diff --raw %s" % flags).splitlines():
file_status = line.split()[-2]
file_name = line.split()[-1]
if match_statuses is None or file_status in match_statuses:
modified_files.append(ModifiedFile(file_name))
return modified_files
def getStagedFiles(self):
if self.isInitialized():
return self._getRawDiff('--cached')
return self._getFiles()
def getUnchangedFiles(self):
return self._getFiles()
def getChangedFiles(self):
return self._getRawDiff()
def getDeletedFiles(self):
return self._getRawDiff(fileStatuses = ['D'])
def getUntrackedFiles(self):
return self._getFiles("--others")
def isInitialized(self):
try:
self.getHead()
return True
except GitException:
return False
def isValid(self):
return os.path.isdir(os.path.join(self.path, ".git")) or \
(os.path.isfile(os.path.join(self.path, "HEAD")) and os.path.isdir(os.path.join(self.path, "objects")))
def isWorkingDirectoryClean(self):
return not (self.getUntrackedFiles() or self.getChangedFiles() or self.getStagedFiles())
def __contains__(self, thing):
if isinstance(thing, basestring) or isinstance(thing, commit.Commit):
return self.containsCommit(thing)
raise NotImplementedError()
################################ Staging content ###############################
def add(self, path):
self._executeGitCommandAssertSuccess("add %s" % quote_for_shell(path))
def delete(self, path, recursive = False, force = False):
flags = ""
if recursive:
flags += "-r "
if force:
flags += "-f "
self._executeGitCommandAssertSuccess("rm %s%s" % (flags, quote_for_shell(path)))
def addAll(self):
return self.add('.')
################################## Committing ##################################
def _normalizeRefName(self, thing):
if isinstance(thing, ref.Ref):
thing = thing.getNormalizedName()
return str(thing)
def _deduceNewCommitFromCommitOutput(self, output):
for pattern in [
# new-style commit pattern
r"^\[\S+\s+(?:\(root-commit\)\s+)?(\S+)\]",
]:
match = re.search(pattern, output)
if match:
return commit.Commit(self, match.group(1))
return None
def commit(self, message, allowEmpty = False, commitAll = False):
args = ''
if commitAll:
args = args + '--all'
command = "commit %s -m %s" % (args, quote_for_shell(message))
if allowEmpty:
command += " --allow-empty"
output = self._getOutputAssertSuccess(command)
return self._deduceNewCommitFromCommitOutput(output)
################################ Changing state ################################
def _createBranchOrTag(self, objname, name, startingPoint, returned_class):
command = "%s %s " % (objname, name)
if startingPoint is not None:
command += self._normalizeRefName(startingPoint)
self._executeGitCommandAssertSuccess(command)
return returned_class(self, name)
def createBranch(self, name, startingPoint = None):
return self._createBranchOrTag('branch', name, startingPoint, branch.LocalBranch)
def createTag(self, name, startingPoint = None):
return self._createBranchOrTag('tag', name, startingPoint, tag.LocalTag)
def checkout(self, thing = None, targetBranch = None, files = ()):
if thing is None:
thing = ""
command = "checkout %s" % (self._normalizeRefName(thing),)
if targetBranch is not None:
command += " -b %s" % (targetBranch,)
if files:
command += " -- %s" % " ".join(files)
self._executeGitCommandAssertSuccess(command)
def mergeMultiple(self, srcs, allowFastForward = True, log = False, message = None):
try:
self._executeGitCommandAssertSuccess(CMD("merge",
" ".join(self._normalizeRefName(src) for src in srcs),
"--no-ff" if not allowFastForward else None,
"--log" if log else None,
("-m \"%s\"" % message) if message is not None else None))
except GitCommandFailedException, e:
# git-merge tends to ignore the stderr rule...
output = e.stdout + e.stderr
if 'conflict' in output.lower():
raise MergeConflict()
raise
def merge(self, src, *args, **kwargs):
return self.mergeMultiple([src], *args, **kwargs)
def _reset(self, flag, thing):
command = "reset %s %s" % (
flag,
self._normalizeRefName(thing))
self._executeGitCommandAssertSuccess(command)
def resetSoft(self, thing = "HEAD"):
return self._reset("--soft", thing)
def resetHard(self, thing = "HEAD"):
return self._reset("--hard", thing)
def resetMixed(self, thing = "HEAD"):
return self._reset("--mixed", thing)
def _clean(self, flags):
self._executeGitCommandAssertSuccess("clean -q " + flags)
def cleanIgnoredFiles(self):
"""Cleans files that match the patterns in .gitignore"""
return self._clean("-f -X")
def cleanUntrackedFiles(self):
return self._clean("-f -d")
################################# collaboration ################################
def addRemote(self, name, url):
self._executeGitCommandAssertSuccess("remote add %s %s" % (name, url))
return remotes.Remote(self, name, url)
def fetch(self, repo = None):
command = "fetch"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def pull(self, repo = None):
command = "pull"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def _getRefspec(self, fromBranch = None, toBranch = None, force = False):
returned = ""
if fromBranch is not None:
returned += self._normalizeRefName(fromBranch)
if returned or toBranch is not None:
returned += ":"
if toBranch is not None:
if isinstance(toBranch, branch.RegisteredRemoteBranch):
toBranch = toBranch.name
returned += self._normalizeRefName(toBranch)
if returned and force:
returned = "+%s" % returned
return returned
def push(self, remote = None, fromBranch = None, toBranch = None, force = False):
command = "push"
#build push arguments
refspec = self._getRefspec(toBranch = toBranch, fromBranch = fromBranch, force = force)
if refspec and not remote:
remote = "origin"
if isinstance(remote, remotes.Remote):
remote = remote.name
elif isinstance(remote, RemoteRepository):
remote = remote.url
elif isinstance(remote, LocalRepository):
remote = remote.path
if remote is not None and not isinstance(remote, basestring):
raise TypeError("Invalid type for 'remote' parameter: %s" % (type(remote),))
command = "push %s %s" % (remote if remote is not None else "", refspec)
self._executeGitCommandAssertSuccess(command)
def rebase(self, src):
self._executeGitCommandAssertSuccess("rebase %s" % self._normalizeRefName(src))
#################################### Stashes ###################################
def saveStash(self, name = None):
command = "stash save"
if name is not None:
command += " %s" % name
self._executeGitCommandAssertSuccess(command)
def popStash(self, arg = None):
command = "stash pop"
if arg is not None:
command += " %s" % arg
self._executeGitCommandAssertSuccess(command)
################################# Configuration ################################
################################### Shortcuts ##################################
def clone(source, location):
returned = LocalRepository(location)
returned.clone(source)
return returned
def find_repository():
orig_path = path = os.path.realpath('.')
drive, path = os.path.splitdrive(path)
while path:
current_path = os.path.join(drive, path)
current_repo = LocalRepository(current_path)
if current_repo.isValid():
return current_repo
path, path_tail = os.path.split(current_path)
if not path_tail:
raise CannotFindRepository("Cannot find repository for %s" % (orig_path,))
| gpl-3.0 |
QingChenmsft/azure-cli | src/command_modules/azure-cli-component/azure/cli/command_modules/component/custom.py | 4 | 8044 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import site
import logging
from six import StringIO
from azure.cli.core.util import CLIError
from azure.cli.core._config import az_config
import azure.cli.core.azlogging as azlogging
logger = azlogging.get_az_logger(__name__)
CLI_PACKAGE_NAME = 'azure-cli'
COMPONENT_PREFIX = 'azure-cli-'
def _deprecate_warning():
logger.warning("The 'component' commands will be deprecated in the future.")
def _verify_not_dev():
from azure.cli.core import __version__ as core_version
dev_version = core_version.endswith('+dev')
if dev_version:
raise CLIError('This operation is not available in the developer version of the CLI.')
def list_components():
""" List the installed components """
_deprecate_warning()
_verify_not_dev()
import pip
return sorted([{'name': dist.key.replace(COMPONENT_PREFIX, ''), 'version': dist.version}
for dist in pip.get_installed_distributions(local_only=True)
if dist.key.startswith(COMPONENT_PREFIX)], key=lambda x: x['name'])
def _get_first_party_pypi_command_modules():
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib # pylint: disable=import-error
results = []
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
pypi_hits = client.search({'author': 'Microsoft Corporation', 'author_email': 'azpycli'})
for hit in pypi_hits:
if hit['name'].startswith(COMPONENT_PREFIX):
comp_name = hit['name'].replace(COMPONENT_PREFIX, '')
results.append({
'name': comp_name,
'summary': hit['summary'],
'version': hit['version']
})
return results
def list_available_components():
""" List publicly available components that can be installed """
_deprecate_warning()
_verify_not_dev()
import pip
available_components = []
installed_component_names = [dist.key.replace(COMPONENT_PREFIX, '') for dist in
pip.get_installed_distributions(local_only=True) if
dist.key.startswith(COMPONENT_PREFIX)]
pypi_results = _get_first_party_pypi_command_modules()
logger.debug('The following components are already installed %s', installed_component_names)
logger.debug("Found %d result(s)", len(pypi_results))
for pypi_res in pypi_results:
if pypi_res['name'] not in installed_component_names:
available_components.append(pypi_res)
if not available_components:
logger.warning('All available components are already installed.')
return available_components
def remove(component_name):
""" Remove a component """
_deprecate_warning()
_verify_not_dev()
if component_name in ['nspkg', 'core']:
raise CLIError("This component cannot be removed, it is required for the CLI to function.")
import pip
full_component_name = COMPONENT_PREFIX + component_name
found = bool([dist for dist in pip.get_installed_distributions(local_only=True)
if dist.key == full_component_name])
if found:
options = ['--isolated', '--yes']
pip_args = ['uninstall'] + options + ['--disable-pip-version-check', full_component_name]
_run_pip(pip, pip_args)
else:
raise CLIError("Component not installed.")
def _run_pip(pip, pip_exec_args):
log_stream = StringIO()
log_handler = logging.StreamHandler(log_stream)
log_handler.setFormatter(logging.Formatter('%(name)s : %(message)s'))
pip.logger.handlers = []
pip.logger.addHandler(log_handler)
# Don't propagate to root logger as we catch the pip logs in our own log stream
pip.logger.propagate = False
logger.debug('Running pip: %s %s', pip, pip_exec_args)
status_code = pip.main(pip_exec_args)
log_output = log_stream.getvalue()
logger.debug(log_output)
log_stream.close()
if status_code > 0:
if '[Errno 13] Permission denied' in log_output:
raise CLIError('Permission denied. Run command with --debug for more information.\n'
'If executing az with sudo, you may want sudo\'s -E and -H flags.')
raise CLIError('An error occurred. Run command with --debug for more information.\n'
'If executing az with sudo, you may want sudo\'s -E and -H flags.')
def _installed_in_user():
try:
return __file__.startswith(site.getusersitepackages())
except (TypeError, AttributeError):
return False
def _install_or_update(package_list, link, private, pre):
import pip
options = ['--isolated', '--disable-pip-version-check', '--upgrade']
if pre:
options.append('--pre')
if _installed_in_user():
options.append('--user')
pkg_index_options = ['--find-links', link] if link else []
if private:
package_index_url = az_config.get('component', 'package_index_url', fallback=None)
package_index_trusted_host = az_config.get('component', 'package_index_trusted_host',
fallback=None)
if package_index_url:
pkg_index_options += ['--extra-index-url', package_index_url]
else:
raise CLIError('AZURE_COMPONENT_PACKAGE_INDEX_URL environment variable not set and not '
'specified in config. AZURE_COMPONENT_PACKAGE_INDEX_TRUSTED_HOST may '
'also need to be set.\nIf executing az with sudo, you may want sudo\'s '
'-E and -H flags.')
pkg_index_options += ['--trusted-host',
package_index_trusted_host] if package_index_trusted_host else []
pip_args = ['install'] + options + package_list + pkg_index_options
_run_pip(pip, pip_args)
# Fix to make sure that we have empty __init__.py files for the azure site-packages folder.
nspkg_pip_args = ['install'] + options + ['--force-reinstall', 'azure-nspkg', 'azure-mgmt-nspkg'] + pkg_index_options # pylint: disable=line-too-long
_run_pip(pip, nspkg_pip_args)
def _verify_additional_components(components, private, allow_third_party):
# Don't verify as third party packages allowed or private server which we can't query
if allow_third_party or private:
return
third_party = []
first_party_component_names = [r['name']for r in _get_first_party_pypi_command_modules()]
for c in components:
if c not in first_party_component_names:
third_party.append(c)
if third_party:
raise CLIError("The following component(s) '{}' are third party or not available. "
"Use --allow-third-party to install "
"third party packages.".format(', '.join(third_party)))
def update(private=False,
pre=False,
link=None,
additional_components=None,
allow_third_party=False):
""" Update the CLI and all installed components """
_deprecate_warning()
_verify_not_dev()
import pip
# Update the CLI itself
package_list = [CLI_PACKAGE_NAME]
# Update all the packages we currently have installed
package_list += [dist.key for dist in pip.get_installed_distributions(local_only=True)
if dist.key.startswith(COMPONENT_PREFIX)]
# Install/Update any new components the user requested
if additional_components:
_verify_additional_components(additional_components, private, allow_third_party)
for c in additional_components:
package_list += [COMPONENT_PREFIX + c]
_install_or_update(package_list, link, private, pre)
| mit |
jemmyw/ansible | lib/ansible/plugins/callback/oneline.py | 144 | 3487 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'oneline'
def _command_generic_msg(self, hostname, result, caption):
stdout = result.get('stdout','').replace('\n', '\\n')
if 'stderr' in result and result['stderr']:
stderr = result.get('stderr','').replace('\n', '\\n')
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc',0), stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc',0), stdout)
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red')
else:
self._display.display(msg, color='red')
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red')
def v2_runner_on_ok(self, result):
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green')
else:
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green')
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
| gpl-3.0 |
ryfeus/lambda-packs | Keras_tensorflow/source/tensorflow/python/debug/cli/offline_analyzer.py | 16 | 2561 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Offline dump analyzer of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
# Google-internal import(s).
from tensorflow.python.debug import debug_data
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.platform import app
def main(_):
if FLAGS.log_usage:
pass # No logging for open-source.
if not FLAGS.dump_dir:
print("ERROR: dump_dir flag is empty.", file=sys.stderr)
sys.exit(1)
print("tfdbg offline: FLAGS.dump_dir = %s" % FLAGS.dump_dir)
debug_dump = debug_data.DebugDumpDir(
FLAGS.dump_dir, validate=FLAGS.validate_graph)
cli = analyzer_cli.create_analyzer_ui(
debug_dump,
tensor_filters={"has_inf_or_nan": debug_data.has_inf_or_nan},
ui_type=FLAGS.ui_type)
title = "tfdbg offline @ %s" % FLAGS.dump_dir
cli.run_ui(title=title, title_color="black_on_white", init_command="lt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--dump_dir", type=str, default="", help="tfdbg dump directory to load")
parser.add_argument(
"--log_usage",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether the usage of this tool is to be logged")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--validate_graph",
nargs="?",
const=True,
type="bool",
default=True,
help="""\
Whether the dumped tensors will be validated against the GraphDefs\
""")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
naturali/tensorflow | tensorflow/python/kernel_tests/stack_ops_test.py | 21 | 5173 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_data_flow_ops
class StackOpTest(tf.test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with tf.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, tf.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
x = tf.constant(a, dtype=tf.float32)
h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, x, swap_memory=True)
with tf.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, tf.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(0)
h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
def c(x):
return tf.less(x, 10)
def b(x):
with tf.control_dependencies([x]):
a = tf.constant(np.ones(2000), dtype=tf.float32)
v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
with tf.control_dependencies([v]):
return tf.add(x, 1)
r = tf.while_loop(c, b, [n])
v = tf.constant(np.zeros(2000), dtype=tf.float32)
def c1(x, y):
return tf.greater(x, 0)
def b1(x, y):
nx = tf.sub(x, 1)
ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
return [nx, ny]
rx, ry = tf.while_loop(c1, b1, [r, v],
[r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
with tf.control_dependencies([c1]):
c1 = gen_data_flow_ops._stack_pop(h1, tf.float32)
h2 = gen_data_flow_ops._stack(tf.float32, stack_name="bar")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
with tf.control_dependencies([c2]):
c2 = gen_data_flow_ops._stack_pop(h2, tf.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
h2 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
r = c1 + c2
self.assertNotEqual(h1.eval()[1], h2.eval()[1])
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with tf.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mattclay/ansible | lib/ansible/inventory/group.py | 22 | 9603 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Mapping, MutableMapping
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
def to_safe_group_name(name, replacer="_", force=False, silent=False):
# Converts 'bad' characters in a string to underscores (or provided replacer) so they can be used as Ansible hosts or groups
warn = ''
if name: # when deserializing we might not have name yet
invalid_chars = C.INVALID_VARIABLE_NAMES.findall(name)
if invalid_chars:
msg = 'invalid character(s) "%s" in group name (%s)' % (to_text(set(invalid_chars)), to_text(name))
if C.TRANSFORM_INVALID_GROUP_CHARS not in ('never', 'ignore') or force:
name = C.INVALID_VARIABLE_NAMES.sub(replacer, name)
if not (silent or C.TRANSFORM_INVALID_GROUP_CHARS == 'silently'):
display.vvvv('Replacing ' + msg)
warn = 'Invalid characters were found in group names and automatically replaced, use -vvvv to see details'
else:
if C.TRANSFORM_INVALID_GROUP_CHARS == 'never':
display.vvvv('Not replacing %s' % msg)
warn = 'Invalid characters were found in group names but not replaced, use -vvvv to see details'
if warn:
display.warning(warn)
return name
class Group:
''' a group of ansible hosts '''
# __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
self.depth = 0
self.name = to_safe_group_name(name)
self.hosts = []
self._hosts = None
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
self.priority = 1
def __repr__(self):
return self.get_name()
def __str__(self):
return self.get_name()
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def serialize(self):
parent_groups = []
for parent in self.parent_groups:
parent_groups.append(parent.serialize())
self._hosts = None
result = dict(
name=self.name,
vars=self.vars.copy(),
parent_groups=parent_groups,
depth=self.depth,
hosts=self.hosts,
)
return result
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.depth = data.get('depth', 0)
self.hosts = data.get('hosts', [])
self._hosts = None
parent_groups = data.get('parent_groups', [])
for parent_data in parent_groups:
g = Group()
g.deserialize(parent_data)
self.parent_groups.append(g)
def _walk_relationship(self, rel, include_self=False, preserve_ordering=False):
'''
Given `rel` that is an iterable property of Group,
consitituting a directed acyclic graph among all groups,
Returns a set of all groups in full tree
A B C
| / | /
| / | /
D -> E
| / vertical connections
| / are directed upward
F
Called on F, returns set of (A, B, C, D, E)
'''
seen = set([])
unprocessed = set(getattr(self, rel))
if include_self:
unprocessed.add(self)
if preserve_ordering:
ordered = [self] if include_self else []
ordered.extend(getattr(self, rel))
while unprocessed:
seen.update(unprocessed)
new_unprocessed = set([])
for new_item in chain.from_iterable(getattr(g, rel) for g in unprocessed):
new_unprocessed.add(new_item)
if preserve_ordering:
if new_item not in seen:
ordered.append(new_item)
new_unprocessed.difference_update(seen)
unprocessed = new_unprocessed
if preserve_ordering:
return ordered
return seen
def get_ancestors(self):
return self._walk_relationship('parent_groups')
def get_descendants(self, **kwargs):
return self._walk_relationship('child_groups', **kwargs)
@property
def host_names(self):
if self._hosts is None:
self._hosts = set(self.hosts)
return self._hosts
def get_name(self):
return self.name
def add_child_group(self, group):
added = False
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if group not in self.child_groups:
# prepare list of group's new ancestors this edge creates
start_ancestors = group.get_ancestors()
new_ancestors = self.get_ancestors()
if group in new_ancestors:
raise AnsibleError("Adding group '%s' as child to '%s' creates a recursive dependency loop." % (to_native(group.name), to_native(self.name)))
new_ancestors.add(self)
new_ancestors.difference_update(start_ancestors)
added = True
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth + 1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
if self.name not in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
for h in group.get_hosts():
h.populate_ancestors(additions=new_ancestors)
self.clear_hosts_cache()
return added
def _check_children_depth(self):
depth = self.depth
start_depth = self.depth # self.depth could change over loop
seen = set([])
unprocessed = set(self.child_groups)
while unprocessed:
seen.update(unprocessed)
depth += 1
to_process = unprocessed.copy()
unprocessed = set([])
for g in to_process:
if g.depth < depth:
g.depth = depth
unprocessed.update(g.child_groups)
if depth - start_depth > len(seen):
raise AnsibleError("The group named '%s' has a recursive dependency loop." % to_native(self.name))
def add_host(self, host):
added = False
if host.name not in self.host_names:
self.hosts.append(host)
self._hosts.add(host.name)
host.add_group(self)
self.clear_hosts_cache()
added = True
return added
def remove_host(self, host):
removed = False
if host.name in self.host_names:
self.hosts.remove(host)
self._hosts.remove(host.name)
host.remove_group(self)
self.clear_hosts_cache()
removed = True
return removed
def set_variable(self, key, value):
if key == 'ansible_group_priority':
self.set_priority(int(value))
else:
if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
self.vars = combine_vars(self.vars, {key: value})
else:
self.vars[key] = value
def clear_hosts_cache(self):
self._hosts_cache = None
for g in self.get_ancestors():
g._hosts_cache = None
def get_hosts(self):
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
hosts = []
seen = {}
for kid in self.get_descendants(include_self=True, preserve_ordering=True):
kid_hosts = kid.hosts
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
if self.name == 'all' and kk.implicit:
continue
hosts.append(kk)
return hosts
def get_vars(self):
return self.vars.copy()
def set_priority(self, priority):
try:
self.priority = int(priority)
except TypeError:
# FIXME: warn about invalid priority
pass
| gpl-3.0 |
okor/thumbor | tests/error_handlers/test_file.py | 7 | 4551 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import json
import tempfile
from preggy import expect
from thumbor import __version__
from thumbor.error_handlers.file import ErrorHandler
from thumbor.config import Config
from thumbor.context import ServerParameters
from tests.base import TestCase
class FakeRequest(object):
def __init__(self):
self.headers = {
'header1': 'value1',
'Cookie': 'cookie1=value; cookie2=value2;'
}
self.url = "test/"
self.method = "GET"
self.arguments = []
self.body = "body"
self.query = "a=1&b=2"
self.remote_ip = "127.0.0.1"
def full_url(self):
return "http://test/%s" % self.url
class FakeHandler(object):
def __init__(self):
self.request = FakeRequest()
class InvalidFileErrorHandlerTestCase(TestCase):
def test_when_invalid_empty_configuration(self):
with expect.error_to_happen(RuntimeError):
ErrorHandler(self.config)
def test_WhenInvalidConfigurationOfFileNameWithContext_should_be_error(self):
cfg = Config(ERROR_FILE_NAME_USE_CONTEXT='server..port', ERROR_FILE_LOGGER='toto')
with expect.error_to_happen(RuntimeError):
ErrorHandler(cfg)
class BasicFileErrorHandlerTestCase(TestCase):
def get_config(self):
self.tmp = tempfile.NamedTemporaryFile(prefix='thumborTest.')
return Config(SECURITY_KEY='ACME-SEC', ERROR_FILE_LOGGER=self.tmp.name)
def get_server(self):
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return server
def test_when_error_occurs_should_have_called_client(self):
handler = ErrorHandler(self.config)
http_handler = FakeHandler()
handler.handle_error(self.context, http_handler, RuntimeError("Test"))
content = self.tmp.read()
log = json.loads(content)
del log['extra']['timestamp']
expect(log).to_be_like({
'Http': {
'url': 'http://test/test/',
'method': 'GET',
'data': [],
'body': "body",
'query_string': "a=1&b=2"
},
'interfaces.User': {
'ip': "127.0.0.1",
},
'exception': 'Test',
'extra': {
'thumbor-version': __version__,
'Headers': {
'header1': 'value1',
'Cookie': {
'cookie1': 'value',
'cookie2': 'value2'
}
},
}
})
class FileErrorHandlerTestCase(TestCase):
PORT = 8890
def get_config(self):
self.tmp = tempfile.NamedTemporaryFile(prefix='thumborTest.%i.' % self.PORT)
return Config(
SECURITY_KEY='ACME-SEC',
ERROR_FILE_LOGGER=self.tmp.name.replace('thumborTest.%i.' % self.PORT, 'thumborTest.%i.'),
ERROR_FILE_NAME_USE_CONTEXT='server.port'
)
def get_server(self):
server = ServerParameters(self.PORT, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return server
def test_when_error_occurs_i_use_context_should_have_called_client(self):
handler = ErrorHandler(self.config)
http_handler = FakeHandler()
handler.handle_error(self.context, http_handler, RuntimeError("Test"))
content = self.tmp.read()
# check against json version
log = json.loads(content)
del log['extra']['timestamp']
expect(log).to_be_like({
'Http': {
'url': 'http://test/test/',
'method': 'GET',
'data': [],
'body': "body",
'query_string': "a=1&b=2"
},
'interfaces.User': {
'ip': "127.0.0.1",
},
'exception': 'Test',
'extra': {
'thumbor-version': __version__,
'Headers': {
'header1': 'value1',
'Cookie': {
'cookie1': 'value',
'cookie2': 'value2'
}
},
}
})
| mit |
moreati/django | django/contrib/admin/templatetags/admin_urls.py | 553 | 1812 | from django import template
from django.contrib.admin.utils import quote
from django.core.urlresolvers import Resolver404, get_script_prefix, resolve
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse
register = template.Library()
@register.filter
def admin_urlname(value, arg):
return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg)
@register.filter
def admin_urlquote(value):
return quote(value)
@register.simple_tag(takes_context=True)
def add_preserved_filters(context, url, popup=False, to_field=None):
opts = context.get('opts')
preserved_filters = context.get('preserved_filters')
parsed_url = list(urlparse(url))
parsed_qs = dict(parse_qsl(parsed_url[4]))
merged_qs = dict()
if opts and preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters))
match_url = '/%s' % url.partition(get_script_prefix())[2]
try:
match = resolve(match_url)
except Resolver404:
pass
else:
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if changelist_url == current_url and '_changelist_filters' in preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters']))
merged_qs.update(preserved_filters)
if popup:
from django.contrib.admin.options import IS_POPUP_VAR
merged_qs[IS_POPUP_VAR] = 1
if to_field:
from django.contrib.admin.options import TO_FIELD_VAR
merged_qs[TO_FIELD_VAR] = to_field
merged_qs.update(parsed_qs)
parsed_url[4] = urlencode(merged_qs)
return urlunparse(parsed_url)
| bsd-3-clause |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/multiprocessing/dummy/connection.py | 168 | 2807 | #
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from Queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
| gpl-2.0 |
d40223223/2015cdb_g6-team | static/Brython3.1.1-20150328-091302/Lib/_imp.py | 625 | 2115 | """(Extremely) low-level import machinery bits as used by importlib and imp."""
class __loader__(object):pass
def _fix_co_filename(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:_fix_co_filename'))
def acquire_lock(*args,**kw):
"""acquire_lock() -> None Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety
when importing modules.
On platforms without threads, this function does nothing."""
pass #assume we are a platform without threads
#raise NotImplementedError("%s:not implemented" % ('_imp.py:acquire_lock'))
def extension_suffixes(*args,**kw):
"""extension_suffixes() -> list of strings Returns the list of file suffixes used to identify extension modules."""
return ['.pyd']
def get_frozen_object(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:get_frozen_object'))
def init_builtin(module,*args,**kw):
return __import__(module)
def init_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:init_frozen'))
def is_builtin(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_builtin'))
def is_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen'))
def is_frozen_package(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen_package'))
def load_dynamic(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:load_dynamic'))
def lock_held(*args,**kw):
"""lock_held() -> boolean Return True if the import lock is currently held, else False.
On platforms without threads, return False."""
return False
#raise NotImplementedError("%s:not implemented" % ('_imp.py:lock_held'))
def release_lock(*args,**kw):
"""release_lock() -> None Release the interpreter's import lock.
On platforms without threads, this function does nothing."""
pass #assume no threads
#raise NotImplementedError("%s:not implemented" % ('_imp.py:release_lock'))
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Cython-0.22.1/tests/run/for_in_iter.py | 26 | 2837 | # mode: run
# tag: forin
import sys
import cython
try:
from builtins import next
except ImportError:
def next(it):
return it.next()
def for_in_pyiter_pass(it):
"""
>>> it = Iterable(5)
>>> for_in_pyiter_pass(it)
>>> next(it)
Traceback (most recent call last):
StopIteration
"""
for item in it:
pass
def for_in_pyiter(it):
"""
>>> for_in_pyiter(Iterable(5))
[0, 1, 2, 3, 4]
"""
l = []
for item in it:
l.append(item)
return l
def for_in_list():
"""
>>> for_in_pyiter([1,2,3,4,5])
[1, 2, 3, 4, 5]
"""
@cython.test_assert_path_exists('//TupleNode//IntNode')
@cython.test_fail_if_path_exists('//ListNode//IntNode')
def for_in_literal_list():
"""
>>> for_in_literal_list()
[1, 2, 3, 4]
"""
l = []
for i in [1,2,3,4]:
l.append(i)
return l
@cython.test_assert_path_exists('//TupleNode//IntNode')
@cython.test_fail_if_path_exists('//ListNode//IntNode')
def for_in_literal_mult_list():
"""
>>> for_in_literal_mult_list()
[1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
"""
l = []
for i in [1,2,3,4] * 3:
l.append(i)
return l
class Iterable(object):
"""
>>> for_in_pyiter(Iterable(5))
[0, 1, 2, 3, 4]
"""
def __init__(self, N):
self.N = N
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i < self.N:
i = self.i
self.i += 1
return i
raise StopIteration
next = __next__
if sys.version_info[0] >= 3:
class NextReplacingIterable(object):
def __init__(self):
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i > 5:
raise StopIteration
self.i += 1
self.__next__ = self.next2
return 1
def next2(self):
self.__next__ = self.next3
return 2
def next3(self):
del self.__next__
raise StopIteration
else:
class NextReplacingIterable(object):
def __init__(self):
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i > 5:
raise StopIteration
self.i += 1
self.next = self.next2
return 1
def next2(self):
self.next = self.next3
return 2
def next3(self):
del self.next
raise StopIteration
def for_in_next_replacing_iter():
"""
>>> for_in_pyiter(NextReplacingIterable())
[1, 1, 1, 1, 1, 1]
"""
def for_in_gen(N):
"""
>>> for_in_pyiter(for_in_gen(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for i in range(N):
yield i
| mit |
Ma233/django-xadmin-fonts | setup.py | 19 | 1635 | #!/usr/bin/env python
from setuptools import setup
# version_tuple = __import__('xadmin.version').VERSION
# version = ".".join([str(v) for v in version_tuple])
setup(
name='django-xadmin',
version='0.5.0',
description='Drop-in replacement of Django admin comes with lots of goodies, fully extensible with plugin support, pretty UI based on Twitter Bootstrap.',
long_description=open('README.rst').read(),
author='sshwsfc',
author_email='[email protected]',
license=open('LICENSE').read(),
url='http://www.xadmin.io',
download_url='http://github.com/sshwsfc/django-xadmin/archive/master.zip',
packages=['xadmin', 'xadmin.plugins', 'xadmin.templatetags', 'xadmin.views'],
include_package_data=True,
install_requires=[
'setuptools',
'django>=1.5',
'django-crispy-forms>=1.4.0',
],
extras_require={
'Excel': ['xlwt', 'xlsxwriter'],
'Reversion': ['django-reversion'],
},
zip_safe=False,
keywords=['admin', 'django', 'xadmin', 'bootstrap'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
"Programming Language :: JavaScript",
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
| bsd-3-clause |
justinpotts/mozillians | mozillians/users/migrations/0058_auto__add_field_userprofile_geo_country__add_field_userprofile_geo_reg.py | 3 | 16957 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.geo_country'
db.add_column('profile', 'geo_country',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.Country'], null=True, on_delete=models.SET_NULL, blank=True),
keep_default=False)
# Adding field 'UserProfile.geo_region'
db.add_column('profile', 'geo_region',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.Region'], null=True, on_delete=models.SET_NULL, blank=True),
keep_default=False)
# Adding field 'UserProfile.geo_city'
db.add_column('profile', 'geo_city',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.City'], null=True, on_delete=models.SET_NULL, blank=True),
keep_default=False)
# Adding field 'UserProfile.lat'
db.add_column('profile', 'lat',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.lng'
db.add_column('profile', 'lng',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.geo_country'
db.delete_column('profile', 'geo_country_id')
# Deleting field 'UserProfile.geo_region'
db.delete_column('profile', 'geo_region_id')
# Deleting field 'UserProfile.geo_city'
db.delete_column('profile', 'geo_city_id')
# Deleting field 'UserProfile.lat'
db.delete_column('profile', 'lat')
# Deleting field 'UserProfile.lng'
db.delete_column('profile', 'lng')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'geo.city': {
'Meta': {'object_name': 'City'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Country']"}),
'e': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lng': ('django.db.models.fields.FloatField', [], {}),
'mapbox_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'n': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Region']", 'null': 'True', 'blank': 'True'}),
's': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'w': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'geo.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapbox_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '120'})
},
'geo.region': {
'Meta': {'object_name': 'Region'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapbox_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'accepting_new_members': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '10'}),
'curator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'groups_curated'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['users.UserProfile']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'functional_area': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'max_reminder': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'members_can_leave': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'new_member_criteria': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wiki': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'groups.groupmembership': {
'Meta': {'unique_together': "(('userprofile', 'group'),)", 'object_name': 'GroupMembership'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'userprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"})
},
'groups.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'users.externalaccount': {
'Meta': {'ordering': "['type']", 'unique_together': "(('identifier', 'type', 'user'),)", 'object_name': 'ExternalAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"})
},
'users.language': {
'Meta': {'ordering': "['code']", 'unique_together': "(('code', 'userprofile'),)", 'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'userprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"})
},
'users.usernameblacklist': {
'Meta': {'ordering': "['value']", 'object_name': 'UsernameBlacklist'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_regex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'users.userprofile': {
'Meta': {'ordering': "['full_name']", 'object_name': 'UserProfile', 'db_table': "'profile'"},
'allows_community_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allows_mozilla_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'basket_token': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'date_mozillian': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'date_vouched': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'geo_city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.City']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'geo_country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'geo_region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Region']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'through': "orm['groups.GroupMembership']", 'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ircname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'is_vouched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'photo': (u'sorl.thumbnail.fields.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'privacy_bio': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_city': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_country': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_date_mozillian': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_email': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_full_name': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_groups': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_ircname': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_languages': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_photo': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_region': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_skills': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_story_link': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_timezone': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_title': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_tshirt': ('mozillians.users.models.PrivacyField', [], {'default': '1'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Skill']"}),
'story_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '70', 'blank': 'True'}),
'tshirt': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'vouched_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchees'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['users.UserProfile']", 'blank': 'True', 'null': 'True'})
}
}
complete_apps = ['users']
| bsd-3-clause |
Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/spread/ui/tkutil.py | 11 | 12919 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Utilities for building L{PB<twisted.spread.pb>} clients with L{Tkinter}.
"""
from Tkinter import (
ACTIVE, Button, Canvas, E, END, Entry, Frame, Label, LEFT, Listbox,
mainloop, N, S, StringVar, Toplevel, Tk, W)
from tkSimpleDialog import _QueryString
from tkFileDialog import _Dialog
from twisted.spread import pb
from twisted import copyright
import string
#normalFont = Font("-adobe-courier-medium-r-normal-*-*-120-*-*-m-*-iso8859-1")
#boldFont = Font("-adobe-courier-bold-r-normal-*-*-120-*-*-m-*-iso8859-1")
#errorFont = Font("-adobe-courier-medium-o-normal-*-*-120-*-*-m-*-iso8859-1")
class _QueryPassword(_QueryString):
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name="entry",show="*")
self.entry.grid(row=1, padx=5, sticky=W+E)
if self.initialvalue:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def askpassword(title, prompt, **kw):
'''get a password from the user
@param title: the dialog title
@param prompt: the label text
@param **kw: see L{SimpleDialog} class
@returns: a string
'''
d = apply(_QueryPassword, (title, prompt), kw)
return d.result
def grid_setexpand(widget):
cols,rows=widget.grid_size()
for i in range(cols):
widget.columnconfigure(i,weight=1)
for i in range(rows):
widget.rowconfigure(i,weight=1)
class CList(Frame):
def __init__(self,parent,labels,disablesorting=0,**kw):
Frame.__init__(self,parent)
self.labels=labels
self.lists=[]
self.disablesorting=disablesorting
kw["exportselection"]=0
for i in range(len(labels)):
b=Button(self,text=labels[i],anchor=W,height=1,pady=0)
b.config(command=lambda s=self,i=i:s.setSort(i))
b.grid(column=i,row=0,sticky=N+E+W)
box=apply(Listbox,(self,),kw)
box.grid(column=i,row=1,sticky=N+E+S+W)
self.lists.append(box)
grid_setexpand(self)
self.rowconfigure(0,weight=0)
self._callall("bind",'<Button-1>',self.Button1)
self._callall("bind",'<B1-Motion>',self.Button1)
self.bind('<Up>',self.UpKey)
self.bind('<Down>',self.DownKey)
self.sort=None
def _callall(self,funcname,*args,**kw):
rets=[]
for l in self.lists:
func=getattr(l,funcname)
ret=apply(func,args,kw)
if ret!=None: rets.append(ret)
if rets: return rets
def Button1(self,e):
index=self.nearest(e.y)
self.select_clear(0,END)
self.select_set(index)
self.activate(index)
return "break"
def UpKey(self,e):
index=self.index(ACTIVE)
if index:
self.select_clear(0,END)
self.select_set(index-1)
return "break"
def DownKey(self,e):
index=self.index(ACTIVE)
if index!=self.size()-1:
self.select_clear(0,END)
self.select_set(index+1)
return "break"
def setSort(self,index):
if self.sort==None:
self.sort=[index,1]
elif self.sort[0]==index:
self.sort[1]=-self.sort[1]
else:
self.sort=[index,1]
self._sort()
def _sort(self):
if self.disablesorting:
return
if self.sort==None:
return
ind,direc=self.sort
li=list(self.get(0,END))
li.sort(lambda x,y,i=ind,d=direc:d*cmp(x[i],y[i]))
self.delete(0,END)
for l in li:
self._insert(END,l)
def activate(self,index):
self._callall("activate",index)
# def bbox(self,index):
# return self._callall("bbox",index)
def curselection(self):
return self.lists[0].curselection()
def delete(self,*args):
apply(self._callall,("delete",)+args)
def get(self,*args):
bad=apply(self._callall,("get",)+args)
if len(args)==1:
return bad
ret=[]
for i in range(len(bad[0])):
r=[]
for j in range(len(bad)):
r.append(bad[j][i])
ret.append(r)
return ret
def index(self,index):
return self.lists[0].index(index)
def insert(self,index,items):
self._insert(index,items)
self._sort()
def _insert(self,index,items):
for i in range(len(items)):
self.lists[i].insert(index,items[i])
def nearest(self,y):
return self.lists[0].nearest(y)
def see(self,index):
self._callall("see",index)
def size(self):
return self.lists[0].size()
def selection_anchor(self,index):
self._callall("selection_anchor",index)
select_anchor=selection_anchor
def selection_clear(self,*args):
apply(self._callall,("selection_clear",)+args)
select_clear=selection_clear
def selection_includes(self,index):
return self.lists[0].select_includes(index)
select_includes=selection_includes
def selection_set(self,*args):
apply(self._callall,("selection_set",)+args)
select_set=selection_set
def xview(self,*args):
if not args: return self.lists[0].xview()
apply(self._callall,("xview",)+args)
def yview(self,*args):
if not args: return self.lists[0].yview()
apply(self._callall,("yview",)+args)
class ProgressBar:
def __init__(self, master=None, orientation="horizontal",
min=0, max=100, width=100, height=18,
doLabel=1, appearance="sunken",
fillColor="blue", background="gray",
labelColor="yellow", labelFont="Verdana",
labelText="", labelFormat="%d%%",
value=0, bd=2):
# preserve various values
self.master=master
self.orientation=orientation
self.min=min
self.max=max
self.width=width
self.height=height
self.doLabel=doLabel
self.fillColor=fillColor
self.labelFont= labelFont
self.labelColor=labelColor
self.background=background
self.labelText=labelText
self.labelFormat=labelFormat
self.value=value
self.frame=Frame(master, relief=appearance, bd=bd)
self.canvas=Canvas(self.frame, height=height, width=width, bd=0,
highlightthickness=0, background=background)
self.scale=self.canvas.create_rectangle(0, 0, width, height,
fill=fillColor)
self.label=self.canvas.create_text(self.canvas.winfo_reqwidth() / 2,
height / 2, text=labelText,
anchor="c", fill=labelColor,
font=self.labelFont)
self.update()
self.canvas.pack(side='top', fill='x', expand='no')
def updateProgress(self, newValue, newMax=None):
if newMax:
self.max = newMax
self.value = newValue
self.update()
def update(self):
# Trim the values to be between min and max
value=self.value
if value > self.max:
value = self.max
if value < self.min:
value = self.min
# Adjust the rectangle
if self.orientation == "horizontal":
self.canvas.coords(self.scale, 0, 0,
float(value) / self.max * self.width, self.height)
else:
self.canvas.coords(self.scale, 0,
self.height - (float(value) /
self.max*self.height),
self.width, self.height)
# Now update the colors
self.canvas.itemconfig(self.scale, fill=self.fillColor)
self.canvas.itemconfig(self.label, fill=self.labelColor)
# And update the label
if self.doLabel:
if value:
if value >= 0:
pvalue = int((float(value) / float(self.max)) *
100.0)
else:
pvalue = 0
self.canvas.itemconfig(self.label, text=self.labelFormat
% pvalue)
else:
self.canvas.itemconfig(self.label, text='')
else:
self.canvas.itemconfig(self.label, text=self.labelFormat %
self.labelText)
self.canvas.update_idletasks()
class DirectoryBrowser(_Dialog):
command = "tk_chooseDirectory"
def askdirectory(**options):
"Ask for a directory to save to."
return apply(DirectoryBrowser, (), options).show()
class GenericLogin(Toplevel):
def __init__(self,callback,buttons):
Toplevel.__init__(self)
self.callback=callback
Label(self,text="Twisted v%s"%copyright.version).grid(column=0,row=0,columnspan=2)
self.entries={}
row=1
for stuff in buttons:
label,value=stuff[:2]
if len(stuff)==3:
dict=stuff[2]
else: dict={}
Label(self,text=label+": ").grid(column=0,row=row)
e=apply(Entry,(self,),dict)
e.grid(column=1,row=row)
e.insert(0,value)
self.entries[label]=e
row=row+1
Button(self,text="Login",command=self.doLogin).grid(column=0,row=row)
Button(self,text="Cancel",command=self.close).grid(column=1,row=row)
self.protocol('WM_DELETE_WINDOW',self.close)
def close(self):
self.tk.quit()
self.destroy()
def doLogin(self):
values={}
for k in self.entries.keys():
values[string.lower(k)]=self.entries[k].get()
self.callback(values)
self.destroy()
class Login(Toplevel):
def __init__(self,
callback,
referenced = None,
initialUser = "guest",
initialPassword = "guest",
initialHostname = "localhost",
initialService = "",
initialPortno = pb.portno):
Toplevel.__init__(self)
version_label = Label(self,text="Twisted v%s" % copyright.version)
self.pbReferenceable = referenced
self.pbCallback = callback
# version_label.show()
self.username = Entry(self)
self.password = Entry(self,show='*')
self.hostname = Entry(self)
self.service = Entry(self)
self.port = Entry(self)
self.username.insert(0,initialUser)
self.password.insert(0,initialPassword)
self.service.insert(0,initialService)
self.hostname.insert(0,initialHostname)
self.port.insert(0,str(initialPortno))
userlbl=Label(self,text="Username:")
passlbl=Label(self,text="Password:")
servicelbl=Label(self,text="Service:")
hostlbl=Label(self,text="Hostname:")
portlbl=Label(self,text="Port #:")
self.logvar=StringVar()
self.logvar.set("Protocol PB-%s"%pb.Broker.version)
self.logstat = Label(self,textvariable=self.logvar)
self.okbutton = Button(self,text="Log In", command=self.login)
version_label.grid(column=0,row=0,columnspan=2)
z=0
for i in [[userlbl,self.username],
[passlbl,self.password],
[hostlbl,self.hostname],
[servicelbl,self.service],
[portlbl,self.port]]:
i[0].grid(column=0,row=z+1)
i[1].grid(column=1,row=z+1)
z = z+1
self.logstat.grid(column=0,row=6,columnspan=2)
self.okbutton.grid(column=0,row=7,columnspan=2)
self.protocol('WM_DELETE_WINDOW',self.tk.quit)
def loginReset(self):
self.logvar.set("Idle.")
def loginReport(self, txt):
self.logvar.set(txt)
self.after(30000, self.loginReset)
def login(self):
host = self.hostname.get()
port = self.port.get()
service = self.service.get()
try:
port = int(port)
except:
pass
user = self.username.get()
pswd = self.password.get()
pb.connect(host, port, user, pswd, service,
client=self.pbReferenceable).addCallback(self.pbCallback).addErrback(
self.couldNotConnect)
def couldNotConnect(self,f):
self.loginReport("could not connect:"+f.getErrorMessage())
if __name__=="__main__":
root=Tk()
o=CList(root,["Username","Online","Auto-Logon","Gateway"])
o.pack()
for i in range(0,16,4):
o.insert(END,[i,i+1,i+2,i+3])
mainloop()
| gpl-3.0 |
ianstalk/Flexget | flexget/components/tmdb/api.py | 2 | 5008 | from flask import jsonify
from flask_restx import inputs
from flexget import plugin
from flexget.api import APIResource, api
from flexget.api.app import BadRequest, NotFoundError, etag
tmdb_api = api.namespace('tmdb', description='TMDB lookup endpoint')
class ObjectsContainer:
poster_object = {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'null']},
'movie_id': {'type': ['integer', 'null']},
'urls': {'type': 'object'},
'file_path': {'type': 'string'},
'width': {'type': 'integer'},
'height': {'type': 'integer'},
'aspect_ratio': {'type': 'number'},
'vote_average': {'type': 'number'},
'vote_count': {'type': 'integer'},
'language_code': {'type': ['string', 'null']},
},
'required': [
'id',
'movie_id',
'urls',
'file_path',
'width',
'height',
'aspect_ratio',
'vote_average',
'vote_count',
'language_code',
],
'additionalProperties': False,
}
movie_object = {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'imdb_id': {'type': 'string'},
'name': {'type': 'string'},
'original_name': {'type': ['string', 'null']},
'alternative_name': {'type': ['string', 'null']},
'year': {'type': 'integer'},
'runtime': {'type': 'integer'},
'language': {'type': 'string'},
'overview': {'type': 'string'},
'tagline': {'type': 'string'},
'rating': {'type': ['number', 'null']},
'votes': {'type': ['integer', 'null']},
'popularity': {'type': ['number', 'null']},
'adult': {'type': 'boolean'},
'budget': {'type': ['integer', 'null']},
'revenue': {'type': ['integer', 'null']},
'homepage': {'type': ['string', 'null'], 'format': 'uri'},
'posters': {'type': 'array', 'items': poster_object},
'backdrops': {'type': 'array', 'items': poster_object},
'genres': {'type': 'array', 'items': {'type': 'string'}},
'updated': {'type': 'string', 'format': 'date-time'},
'lookup_language': {'type': ['string', 'null']},
},
'required': [
'id',
'name',
'year',
'original_name',
'alternative_name',
'runtime',
'language',
'overview',
'tagline',
'rating',
'votes',
'popularity',
'adult',
'budget',
'revenue',
'homepage',
'genres',
'updated',
],
'additionalProperties': False,
}
description = 'Either title, TMDB ID or IMDB ID are required for a lookup'
return_schema = api.schema_model('tmdb_search_schema', ObjectsContainer.movie_object)
tmdb_parser = api.parser()
tmdb_parser.add_argument('title', help='Movie title')
tmdb_parser.add_argument('tmdb_id', help='TMDB ID')
tmdb_parser.add_argument('imdb_id', help='IMDB ID')
tmdb_parser.add_argument('language', help='ISO 639-1 language code')
tmdb_parser.add_argument('year', type=int, help='Movie year')
tmdb_parser.add_argument('only_cached', type=int, help='Return only cached results')
tmdb_parser.add_argument(
'include_posters', type=inputs.boolean, default=False, help='Include posters in response'
)
tmdb_parser.add_argument(
'include_backdrops', type=inputs.boolean, default=False, help='Include backdrops in response'
)
tmdb_parser.add_argument(
'include_backdrops', type=inputs.boolean, default=False, help='Include backdrops in response'
)
@tmdb_api.route('/movies/')
@api.doc(description=description)
class TMDBMoviesAPI(APIResource):
@etag(cache_age=3600)
@api.response(200, model=return_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
@api.doc(parser=tmdb_parser)
def get(self, session=None):
""" Get TMDB movie data """
args = tmdb_parser.parse_args()
title = args.get('title')
tmdb_id = args.get('tmdb_id')
imdb_id = args.get('imdb_id')
posters = args.pop('include_posters', False)
backdrops = args.pop('include_backdrops', False)
if not (title or tmdb_id or imdb_id):
raise BadRequest(description)
lookup = plugin.get('api_tmdb', 'tmdb.api').lookup
try:
movie = lookup(session=session, **args)
except LookupError as e:
raise NotFoundError(e.args[0])
return_movie = movie.to_dict()
if posters:
return_movie['posters'] = [p.to_dict() for p in movie.posters]
if backdrops:
return_movie['backdrops'] = [p.to_dict() for p in movie.backdrops]
return jsonify(return_movie)
| mit |
dendisuhubdy/tensorflow | tensorflow/contrib/linear_optimizer/__init__.py | 158 | 1308 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for training linear models.
## This package provides optimizers to train linear models.
@@SdcaModel
@@SparseFeatureColumn
@@SDCAOptimizer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.contrib.linear_optimizer.python.sdca_optimizer import SDCAOptimizer
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
Yannig/ansible | lib/ansible/modules/packaging/language/bundler.py | 12 | 7076 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Tim Hoiberg <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION='''
---
module: bundler
short_description: Manage Ruby Gem dependencies with Bundler
description:
- Manage installation and Gem version dependencies for Ruby using the Bundler gem
version_added: "2.0.0"
options:
executable:
description:
- The path to the bundler executable
required: false
default: null
state:
description:
- The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
required: false
choices: [present, latest]
default: present
chdir:
description:
- The directory to execute the bundler commands from. This directoy
needs to contain a valid Gemfile or .bundle/ directory
required: false
default: temporary working directory
exclude_groups:
description:
- A list of Gemfile groups to exclude during operations. This only
applies when state is C(present). Bundler considers this
a 'remembered' property for the Gemfile and will automatically exclude
groups in future operations even if C(exclude_groups) is not set
required: false
default: null
clean:
description:
- Only applies if state is C(present). If set removes any gems on the
target host that are not in the gemfile
required: false
choices: [yes, no]
default: "no"
gemfile:
description:
- Only applies if state is C(present). The path to the gemfile to use to install gems.
required: false
default: Gemfile in current directory
local:
description:
- If set only installs gems from the cache on the target host
required: false
choices: [yes, no]
default: "no"
deployment_mode:
description:
- Only applies if state is C(present). If set it will only install gems
that are in the default or production groups. Requires a Gemfile.lock
file to have been created prior
required: false
choices: [yes, no]
default: "no"
user_install:
description:
- Only applies if state is C(present). Installs gems in the local user's cache or for all users
required: false
choices: [yes, no]
default: "yes"
gem_path:
description:
- Only applies if state is C(present). Specifies the directory to
install the gems into. If C(chdir) is set then this path is relative to
C(chdir)
required: false
default: RubyGems gem paths
binstub_directory:
description:
- Only applies if state is C(present). Specifies the directory to
install any gem bins files to. When executed the bin files will run
within the context of the Gemfile and fail if any required gem
dependencies are not installed. If C(chdir) is set then this path is
relative to C(chdir)
required: false
default: null
extra_args:
description:
- A space separated string of additional commands that can be applied to
the Bundler command. Refer to the Bundler documentation for more
information
required: false
default: null
author: "Tim Hoiberg (@thoiberg)"
'''
EXAMPLES='''
# Installs gems from a Gemfile in the current directory
- bundler:
state: present
executable: ~/.rvm/gems/2.1.5/bin/bundle
# Excludes the production group from installing
- bundler:
state: present
exclude_groups: production
# Only install gems from the default and production groups
- bundler:
state: present
deployment_mode: yes
# Installs gems using a Gemfile in another directory
- bundler:
state: present
gemfile: ../rails_project/Gemfile
# Updates Gemfile in another directory
- bundler:
state: latest
chdir: ~/rails_project
'''
from ansible.module_utils.basic import AnsibleModule
def get_bundler_executable(module):
if module.params.get('executable'):
return module.params.get('executable').split(' ')
else:
return [ module.get_bin_path('bundle', True) ]
def main():
module = AnsibleModule(
argument_spec=dict(
executable=dict(default=None, required=False),
state=dict(default='present', required=False, choices=['present', 'latest']),
chdir=dict(default=None, required=False, type='path'),
exclude_groups=dict(default=None, required=False, type='list'),
clean=dict(default=False, required=False, type='bool'),
gemfile=dict(default=None, required=False, type='path'),
local=dict(default=False, required=False, type='bool'),
deployment_mode=dict(default=False, required=False, type='bool'),
user_install=dict(default=True, required=False, type='bool'),
gem_path=dict(default=None, required=False, type='path'),
binstub_directory=dict(default=None, required=False, type='path'),
extra_args=dict(default=None, required=False),
),
supports_check_mode=True
)
state = module.params.get('state')
chdir = module.params.get('chdir')
exclude_groups = module.params.get('exclude_groups')
clean = module.params.get('clean')
gemfile = module.params.get('gemfile')
local = module.params.get('local')
deployment_mode = module.params.get('deployment_mode')
user_install = module.params.get('user_install')
gem_path = module.params.get('gem_path')
binstub_directory = module.params.get('binstub_directory')
extra_args = module.params.get('extra_args')
cmd = get_bundler_executable(module)
if module.check_mode:
cmd.append('check')
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
if state == 'present':
cmd.append('install')
if exclude_groups:
cmd.extend(['--without', ':'.join(exclude_groups)])
if clean:
cmd.append('--clean')
if gemfile:
cmd.extend(['--gemfile', gemfile])
if local:
cmd.append('--local')
if deployment_mode:
cmd.append('--deployment')
if not user_install:
cmd.append('--system')
if gem_path:
cmd.extend(['--path', gem_path])
if binstub_directory:
cmd.extend(['--binstubs', binstub_directory])
else:
cmd.append('update')
if local:
cmd.append('--local')
if extra_args:
cmd.extend(extra_args.split(' '))
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
if __name__ == '__main__':
main()
| gpl-3.0 |
wizkid057/eloipool | bitcoin/txn.py | 16 | 4465 | # Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2012 Luke Dashjr <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bitcoin.script
from .varlen import varlenDecode, varlenEncode
from util import dblsha
from struct import pack, unpack
_nullprev = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'
class Txn:
def __init__(self, data=None):
if data:
self.data = data
self.idhash()
@classmethod
def new(cls):
o = cls()
o.version = 1
o.inputs = []
o.outputs = []
o.locktime = 0
return o
def setCoinbase(self, sigScript, seqno = 0xffffffff, height = None):
if not height is None:
# NOTE: This is required to be the minimum valid length by BIP 34
sigScript = bitcoin.script.encodeUNum(height) + sigScript
self.inputs = ( ((_nullprev, 0xffffffff), sigScript, seqno), )
def addInput(self, prevout, sigScript, seqno = 0xffffffff):
self.inputs.append( (prevout, sigScript, seqno) )
def addOutput(self, amount, pkScript):
self.outputs.append( (amount, pkScript) )
def disassemble(self, retExtra = False):
self.version = unpack('<L', self.data[:4])[0]
rc = [4]
(inputCount, data) = varlenDecode(self.data[4:], rc)
inputs = []
for i in range(inputCount):
prevout = (data[:32], unpack('<L', data[32:36])[0])
rc[0] += 36
(sigScriptLen, data) = varlenDecode(data[36:], rc)
sigScript = data[:sigScriptLen]
seqno = unpack('<L', data[sigScriptLen:sigScriptLen + 4])[0]
data = data[sigScriptLen + 4:]
rc[0] += sigScriptLen + 4
inputs.append( (prevout, sigScript, seqno) )
self.inputs = inputs
(outputCount, data) = varlenDecode(data, rc)
outputs = []
for i in range(outputCount):
amount = unpack('<Q', data[:8])[0]
rc[0] += 8
(pkScriptLen, data) = varlenDecode(data[8:], rc)
pkScript = data[:pkScriptLen]
data = data[pkScriptLen:]
rc[0] += pkScriptLen
outputs.append( (amount, pkScript) )
self.outputs = outputs
self.locktime = unpack('<L', data[:4])[0]
if not retExtra:
assert len(data) == 4
else:
assert data == self.data[rc[0]:]
data = data[4:]
rc[0] += 4
self.data = self.data[:rc[0]]
return data
def isCoinbase(self):
return len(self.inputs) == 1 and self.inputs[0][0] == (_nullprev, 0xffffffff)
def getCoinbase(self):
return self.inputs[0][1]
def assemble(self):
data = pack('<L', self.version)
inputs = self.inputs
data += varlenEncode(len(inputs))
for prevout, sigScript, seqno in inputs:
data += prevout[0] + pack('<L', prevout[1])
data += varlenEncode(len(sigScript)) + sigScript
data += pack('<L', seqno)
outputs = self.outputs
data += varlenEncode(len(outputs))
for amount, pkScript in outputs:
data += pack('<Q', amount)
data += varlenEncode(len(pkScript)) + pkScript
data += pack('<L', self.locktime)
self.data = data
self.idhash()
def idhash(self):
self.txid = dblsha(self.data)
# Txn tests
def _test():
d = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00'
t = Txn(d)
assert t.txid == b"C\xeczW\x9fUa\xa4*~\x967\xadAVg'5\xa6X\xbe'R\x18\x18\x01\xf7#\xba3\x16\xd2"
t.disassemble()
t.assemble()
assert t.data == d
assert not t.isCoinbase()
t = Txn.new()
t.addInput((b' '*32, 0), b'INPUT')
t.addOutput(0x10000, b'OUTPUT')
t.assemble()
assert t.txid == b'>`\x97\xecu\x8e\xb5\xef\x19k\x17d\x96sw\xb1\xf1\x9bO\x1c6\xa0\xbe\xf7N\xed\x13j\xfdHF\x1a'
t.disassemble()
t.assemble()
assert t.txid == b'>`\x97\xecu\x8e\xb5\xef\x19k\x17d\x96sw\xb1\xf1\x9bO\x1c6\xa0\xbe\xf7N\xed\x13j\xfdHF\x1a'
assert not t.isCoinbase()
t = Txn.new()
t.setCoinbase(b'COINBASE')
t.addOutput(0x10000, b'OUTPUT')
assert t.isCoinbase()
assert t.getCoinbase() == b'COINBASE'
t.assemble()
assert t.txid == b'n\xb9\xdc\xef\xe9\xdb(R\x8dC~-\xef~\x88d\x15+X\x13&\xb7\xbc$\xb1h\xf3g=\x9b~V'
_test()
| agpl-3.0 |
bigjosh/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| lgpl-2.1 |
IV-GII/SocialCookies | ENV1/lib/python2.7/site-packages/django/contrib/gis/db/models/sql/where.py | 118 | 3987 | from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import Constraint, WhereNode
from django.contrib.gis.db.models.fields import GeometryField
class GeoConstraint(Constraint):
"""
This subclass overrides `process` to better handle geographic SQL
construction.
"""
def __init__(self, init_constraint):
self.alias = init_constraint.alias
self.col = init_constraint.col
self.field = init_constraint.field
def process(self, lookup_type, value, connection):
if isinstance(value, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = GeoWhereNode._check_geo_field(value.opts, value.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
value.srid = geo_fld.srid
db_type = self.field.db_type(connection=connection)
params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection)
return (self.alias, self.col, db_type), params
class GeoWhereNode(WhereNode):
"""
Used to represent the SQL where-clause for spatial databases --
these are tied to the GeoQuery class that created it.
"""
def _prepare_data(self, data):
if isinstance(data, (list, tuple)):
obj, lookup_type, value = data
if ( isinstance(obj, Constraint) and
isinstance(obj.field, GeometryField) ):
data = (GeoConstraint(obj), lookup_type, value)
return super(GeoWhereNode, self)._prepare_data(data)
def make_atom(self, child, qn, connection):
lvalue, lookup_type, value_annot, params_or_value = child
if isinstance(lvalue, GeoConstraint):
data, params = lvalue.process(lookup_type, params_or_value, connection)
spatial_sql, spatial_params = connection.ops.spatial_lookup_sql(
data, lookup_type, params_or_value, lvalue.field, qn)
return spatial_sql, spatial_params + params
else:
return super(GeoWhereNode, self).make_atom(child, qn, connection)
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
| gpl-2.0 |
JioCloud/horizon | openstack_dashboard/test/test_data/glance_data.py | 2 | 8133 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.v1 import images
from openstack_dashboard.test.test_data import utils
def data(TEST):
TEST.images = utils.TestDataContainer()
TEST.snapshots = utils.TestDataContainer()
# Snapshots
snapshot_dict = {'name': u'snapshot',
'container_format': u'ami',
'id': 3,
'status': "active",
'owner': TEST.tenant.id,
'properties': {'image_type': u'snapshot'},
'is_public': False,
'protected': False}
snapshot_dict_no_owner = {'name': u'snapshot 2',
'container_format': u'ami',
'id': 4,
'status': "active",
'owner': None,
'properties': {'image_type': u'snapshot'},
'is_public': False,
'protected': False}
snapshot_dict_queued = {'name': u'snapshot 2',
'container_format': u'ami',
'id': 5,
'status': "queued",
'owner': TEST.tenant.id,
'properties': {'image_type': u'snapshot'},
'is_public': False,
'protected': False}
snapshot = images.Image(images.ImageManager(None), snapshot_dict)
TEST.snapshots.add(snapshot)
snapshot = images.Image(images.ImageManager(None), snapshot_dict_no_owner)
TEST.snapshots.add(snapshot)
snapshot = images.Image(images.ImageManager(None), snapshot_dict_queued)
TEST.snapshots.add(snapshot)
# Images
image_dict = {'id': '007e7d55-fe1e-4c5c-bf08-44b4a4964822',
'name': 'public_image',
'status': "active",
'size': 20 * 1024 ** 3,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'novaImage',
'properties': {'image_type': u'image'},
'is_public': True,
'protected': False,
'min_ram': 0,
'created_at': '2014-02-14T20:56:53'}
public_image = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': 'a001c047-22f8-47d0-80a1-8ec94a9524fe',
'name': 'private_image',
'status': "active",
'size': 10 * 1024 ** 2,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0,
'created_at': '2014-03-14T12:56:53'}
private_image = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': 'd6936c86-7fec-474a-85c5-5e467b371c3c',
'name': 'protected_images',
'status': "active",
'owner': TEST.tenant.id,
'size': 2 * 1024 ** 3,
'min_disk': 30,
'container_format': 'novaImage',
'properties': {'image_type': u'image'},
'is_public': True,
'protected': True,
'min_ram': 0,
'created_at': '2014-03-16T06:22:14'}
protected_image = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': '278905a6-4b52-4d1e-98f9-8c57bb25ba32',
'name': None,
'status': "active",
'size': 5 * 1024 ** 3,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'novaImage',
'properties': {'image_type': u'image'},
'is_public': True,
'protected': False,
'min_ram': 0}
public_image2 = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': '710a1acf-a3e3-41dd-a32d-5d6b6c86ea10',
'name': 'private_image 2',
'status': "active",
'size': 30 * 1024 ** 3,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0}
private_image2 = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': '7cd892fd-5652-40f3-a450-547615680132',
'name': 'private_image 3',
'status': "active",
'size': 2 * 1024 ** 3,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0}
private_image3 = images.Image(images.ImageManager(None), image_dict)
# A shared image. Not public and not local tenant.
image_dict = {'id': 'c8756975-7a3b-4e43-b7f7-433576112849',
'name': 'shared_image 1',
'status': "active",
'size': 8 * 1024 ** 3,
'min_disk': 0,
'owner': 'someothertenant',
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0}
shared_image1 = images.Image(images.ImageManager(None), image_dict)
# "Official" image. Public and tenant matches an entry
# in IMAGES_LIST_FILTER_TENANTS.
image_dict = {'id': 'f448704f-0ce5-4d34-8441-11b6581c6619',
'name': 'official_image 1',
'status': "active",
'size': 2 * 1024 ** 3,
'min_disk': 0,
'owner': 'officialtenant',
'container_format': 'aki',
'is_public': True,
'protected': False,
'min_ram': 0}
official_image1 = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': 'a67e7d45-fe1e-4c5c-bf08-44b4a4964822',
'name': 'multi_prop_image',
'status': "active",
'size': 20 * 1024 ** 3,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'novaImage',
'properties': {'description': u'a multi prop image',
'foo': u'foo val',
'bar': u'bar val'},
'is_public': True,
'protected': False}
multi_prop_image = images.Image(images.ImageManager(None), image_dict)
# An image without name being returned based on current api
image_dict = {'id': 'c8756975-7a3b-4e43-b7f7-433576112849',
'status': "active",
'size': 8 * 1024 ** 3,
'min_disk': 0,
'owner': 'someothertenant',
'container_format': 'aki',
'is_public': False,
'protected': False}
no_name_image = images.Image(images.ImageManager(None), image_dict)
TEST.images.add(public_image, private_image, protected_image,
public_image2, private_image2, private_image3,
shared_image1, official_image1, multi_prop_image)
TEST.empty_name_image = no_name_image
| apache-2.0 |
skevy/django | django/contrib/auth/models.py | 2 | 18017 | import datetime
import hashlib
import urllib
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import constant_time_compare
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
if algorithm == 'md5':
return hashlib.md5(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return hashlib.sha1(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return constant_time_compare(hsh, get_hexdigest(algo, salt, raw_password))
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = datetime.datetime.now()
user.save()
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only email messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given username, email and password.
"""
now = datetime.datetime.now()
# Normalize the address by lowercasing the domain part of the email
# address.
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = self.model(username=username, email=email, is_staff=False,
is_active=True, is_superuser=False, last_login=now,
date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
anon = user.is_anonymous()
for backend in auth.get_backends():
if not anon or backend.supports_anonymous_user:
if hasattr(backend, "get_all_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_all_permissions(user, obj)
)
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_perm"):
if obj is not None:
if (backend.supports_object_permissions and
backend.has_perm(user, perm, obj)):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class User(models.Model):
"""
Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
if raw_password is None:
self.set_unusable_password()
else:
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
if self.password is None \
or self.password == UNUSABLE_PASSWORD:
return False
else:
return True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
If an object is passed in, only permissions matching this object
are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_group_permissions(self, obj)
)
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object
is provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions.
If object is passed, it checks if the user has all required perms
for this object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"Sends an email to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| bsd-3-clause |
mheap/ansible | test/units/module_utils/urls/test_generic_urlparse.py | 144 | 1919 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.urls import generic_urlparse
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
def test_generic_urlparse():
url = 'https://ansible.com/blog'
parts = urlparse(url)
generic_parts = generic_urlparse(parts)
assert generic_parts.as_list() == list(parts)
assert urlunparse(generic_parts.as_list()) == url
def test_generic_urlparse_netloc():
url = 'https://ansible.com:443/blog'
parts = urlparse(url)
generic_parts = generic_urlparse(parts)
assert generic_parts.hostname == parts.hostname
assert generic_parts.hostname == 'ansible.com'
assert generic_parts.port == 443
assert urlunparse(generic_parts.as_list()) == url
def test_generic_urlparse_no_netloc():
url = 'https://user:[email protected]:443/blog'
parts = list(urlparse(url))
generic_parts = generic_urlparse(parts)
assert generic_parts.hostname == 'ansible.com'
assert generic_parts.port == 443
assert generic_parts.username == 'user'
assert generic_parts.password == 'passwd'
assert urlunparse(generic_parts.as_list()) == url
def test_generic_urlparse_no_netloc_no_auth():
url = 'https://ansible.com:443/blog'
parts = list(urlparse(url))
generic_parts = generic_urlparse(parts)
assert generic_parts.username is None
assert generic_parts.password is None
def test_generic_urlparse_no_netloc_no_host():
url = '/blog'
parts = list(urlparse(url))
generic_parts = generic_urlparse(parts)
assert generic_parts.username is None
assert generic_parts.password is None
assert generic_parts.port is None
assert generic_parts.hostname == ''
| gpl-3.0 |
v-iam/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/service_type_health_policy.py | 2 | 3884 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServiceTypeHealthPolicy(Model):
"""Represents the health policy used to evaluate the health of services
belonging to a service type.
.
:param max_percent_unhealthy_partitions_per_service: The maximum allowed
percentage of unhealthy partitions per service. Allowed values are Byte
values from zero to 100
The percentage represents the maximum tolerated percentage of partitions
that can be unhealthy before the service is considered in error.
If the percentage is respected but there is at least one unhealthy
partition, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy
partitions over the total number of partitions in the service.
The computation rounds up to tolerate one failure on small numbers of
partitions. Default percentage is zero.
. Default value: 0 .
:type max_percent_unhealthy_partitions_per_service: int
:param max_percent_unhealthy_replicas_per_partition: The maximum allowed
percentage of unhealthy replicas per partition. Allowed values are Byte
values from zero to 100.
The percentage represents the maximum tolerated percentage of replicas
that can be unhealthy before the partition is considered in error.
If the percentage is respected but there is at least one unhealthy
replica, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy replicas
over the total number of replicas in the partition.
The computation rounds up to tolerate one failure on small numbers of
replicas. Default percentage is zero.
. Default value: 0 .
:type max_percent_unhealthy_replicas_per_partition: int
:param max_percent_unhealthy_services: The maximum maximum allowed
percentage of unhealthy services. Allowed values are Byte values from
zero to 100.
The percentage represents the maximum tolerated percentage of services
that can be unhealthy before the application is considered in error.
If the percentage is respected but there is at least one unhealthy
service, the health is evaluated as Warning.
This is calculated by dividing the number of unhealthy services of the
specific service type over the total number of services of the specific
service type.
The computation rounds up to tolerate one failure on small numbers of
services. Default percentage is zero.
. Default value: 0 .
:type max_percent_unhealthy_services: int
"""
_attribute_map = {
'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'},
'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'},
'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'},
}
def __init__(self, max_percent_unhealthy_partitions_per_service=0, max_percent_unhealthy_replicas_per_partition=0, max_percent_unhealthy_services=0):
self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service
self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition
self.max_percent_unhealthy_services = max_percent_unhealthy_services
| mit |
40023256/2015cdag1man | static/Brython3.1.3-20150514-095342/Lib/unittest/test/test_suite.py | 791 | 12066 | import unittest
import sys
from .support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite())
,(unittest.TestSuite(), unittest.TestSuite([]))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1'))
,(unittest.TestSuite([]), _mk_TestSuite('test_1'))
,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3'))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
niieani/rethinkdb | external/gtest_1.6.0/test/gtest_xml_outfiles_test.py | 2526 | 5340 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "[email protected] (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO([email protected]): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| agpl-3.0 |
camradal/ansible | lib/ansible/modules/notification/sendgrid.py | 9 | 9032 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Makai <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
version_added: "2.0"
module: sendgrid
short_description: Sends an email with the SendGrid API
description:
- "Sends an email with a SendGrid account through their API, not through
the SMTP service."
notes:
- "This module is non-idempotent because it sends an email through the
external API. It is idempotent only in the case that the module fails."
- "Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need an active SendGrid
account."
- "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
you must pip install sendgrid"
- "since 2.2 username and password are not required if you supply an api_key"
requirements:
- sendgrid python library
options:
username:
description:
- username for logging into the SendGrid account.
- Since 2.2 it is only required if api_key is not supplied.
required: false
default: null
password:
description:
- password that corresponds to the username
- Since 2.2 it is only required if api_key is not supplied.
required: false
default: null
from_address:
description:
- the address in the "from" field for the email
required: true
to_addresses:
description:
- a list with one or more recipient email addresses
required: true
subject:
description:
- the desired subject for the email
required: true
api_key:
description:
- sendgrid API key to use instead of username/password
version_added: 2.2
required: false
default: null
cc:
description:
- a list of email addresses to cc
version_added: 2.2
required: false
default: null
bcc:
description:
- a list of email addresses to bcc
version_added: 2.2
required: false
default: null
attachments:
description:
- a list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs)
version_added: 2.2
required: false
default: null
from_name:
description:
- the name you want to appear in the from field, i.e 'John Doe'
version_added: 2.2
required: false
default: null
html_body:
description:
- whether the body is html content that should be rendered
version_added: 2.2
required: false
default: false
headers:
description:
- a dict to pass on as headers
version_added: 2.2
required: false
default: null
author: "Matt Makai (@makaimc)"
'''
EXAMPLES = '''
# send an email to a single recipient that the deployment was successful
- sendgrid:
username: "{{ sendgrid_username }}"
password: "{{ sendgrid_password }}"
from_address: "[email protected]"
to_addresses:
- "[email protected]"
subject: "Deployment success."
body: "The most recent Ansible deployment was successful."
delegate_to: localhost
# send an email to more than one recipient that the build failed
- sendgrid
username: "{{ sendgrid_username }}"
password: "{{ sendgrid_password }}"
from_address: "[email protected]"
to_addresses:
- "[email protected]"
- "[email protected]"
subject: "Build failure!."
body: "Unable to pull source repository from Git server."
delegate_to: localhost
'''
# =======================================
# sendgrid module support methods
#
import urllib
try:
import sendgrid
HAS_SENDGRID = True
except ImportError:
HAS_SENDGRID = False
def post_sendgrid_api(module, username, password, from_address, to_addresses,
subject, body, api_key=None, cc=None, bcc=None, attachments=None,
html_body=False, from_name=None, headers=None):
if not HAS_SENDGRID:
SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
AGENT = "Ansible"
data = {'api_user': username, 'api_key':password,
'from':from_address, 'subject': subject, 'text': body}
encoded_data = urllib.urlencode(data)
to_addresses_api = ''
for recipient in to_addresses:
if isinstance(recipient, unicode):
recipient = recipient.encode('utf-8')
to_addresses_api += '&to[]=%s' % recipient
encoded_data += to_addresses_api
headers = { 'User-Agent': AGENT,
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
else:
if api_key:
sg = sendgrid.SendGridClient(api_key)
else:
sg = sendgrid.SendGridClient(username, password)
message = sendgrid.Mail()
message.set_subject(subject)
for recip in to_addresses:
message.add_to(recip)
if cc:
for recip in cc:
message.add_cc(recip)
if bcc:
for recip in bcc:
message.add_bcc(recip)
if headers:
message.set_headers(headers)
if attachments:
for f in attachments:
name = os.path.basename(f)
message.add_attachment(name, f)
if from_name:
message.set_from('%s <%s.' % (from_name, from_address))
else:
message.set_from(from_address)
if html_body:
message.set_html(body)
else:
message.set_text(body)
return sg.send(message)
# =======================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
username=dict(required=False),
password=dict(required=False, no_log=True),
api_key=dict(required=False, no_log=True),
bcc=dict(required=False, type='list'),
cc=dict(required=False, type='list'),
headers=dict(required=False, type='dict'),
from_address=dict(required=True),
from_name=dict(required=False),
to_addresses=dict(required=True, type='list'),
subject=dict(required=True),
body=dict(required=True),
html_body=dict(required=False, default=False, type='bool'),
attachments=dict(required=False, type='list')
),
supports_check_mode=True,
mutually_exclusive = [
['api_key', 'password'],
['api_key', 'username']
],
required_together = [['username', 'password']],
)
username = module.params['username']
password = module.params['password']
api_key = module.params['api_key']
bcc = module.params['bcc']
cc = module.params['cc']
headers = module.params['headers']
from_name = module.params['from_name']
from_address = module.params['from_address']
to_addresses = module.params['to_addresses']
subject = module.params['subject']
body = module.params['body']
html_body = module.params['html_body']
attachments = module.params['attachments']
sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
module.fail_json(msg='You must install the sendgrid python library if you want to use any of the following arguments: api_key, bcc, cc, headers, from_name, html_body, attachments')
response, info = post_sendgrid_api(module, username, password,
from_address, to_addresses, subject, body, attachments=attachments,
bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
if not HAS_SENDGRID:
if info['status'] != 200:
module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
else:
if response != 200:
module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
module.exit_json(msg=subject, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mcsosa121/cafa | cafaenv/lib/python2.7/site-packages/django/contrib/admin/widgets.py | 345 | 14769 | """
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.db.models.deletion import CASCADE
from django.forms.utils import flatatt
from django.forms.widgets import RadioFieldRenderer
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import (
escape, escapejs, format_html, format_html_join, smart_urlquote,
)
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append('<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('SelectFilter.init("id_%s", "%s", %s); });</script>\n'
% (name, escapejs(self.verbose_name), int(self.is_stacked)))
return mark_safe(''.join(output))
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{} {}<br />{} {}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{}>\n{}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.model
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" title="%s"></a>' %
(related_url, url, name, _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.model.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join(force_text(v) for v in value)
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template = 'admin/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def render(self, name, value, *args, **kwargs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'widget': self.widget.render(name, value, *args, **kwargs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return mark_safe(render_to_string(self.template, context))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': smart_urlquote(value)}
html = format_html(
'<p class="url">{} <a{}>{}</a><br />{} {}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| mit |
AnsgarSchmidt/sensomatic | server/Chromecast.py | 1 | 6445 | import os
import sys
import pafy
import time
import Queue
import logging
import threading
import ConfigParser
import pychromecast
import paho.mqtt.client as mqtt
class Chromecast(threading.Thread):
def _readConfig(self):
update = False
if not os.path.isdir(self._homeDir):
print "Creating homeDir"
os.makedirs(self._homeDir)
if os.path.isfile(self._configFileName):
self._config.read(self._configFileName)
else:
print "Config file not found"
update = True
if not self._config.has_section('MQTT'):
print "Adding MQTT part"
update = True
self._config.add_section("MQTT")
if not self._config.has_option("MQTT", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("MQTT", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("MQTT", "ServerPort"):
print "No Server Port"
update = True
self._config.set("MQTT", "ServerPort", "1883")
if update:
with open(self._configFileName, 'w') as f:
self._config.write(f)
sys.exit(1)
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self._logger = logging.getLogger(__name__)
hdlr = logging.FileHandler('/tmp/sensomatic.log')
formatter = logging.Formatter('%(asctime)s %(name)s %(lineno)d %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self._logger.addHandler(hdlr)
self._logger.setLevel(logging.INFO)
self._homeDir = os.path.expanduser("~/.sensomatic")
self._configFileName = self._homeDir + '/config.ini'
self._config = ConfigParser.ConfigParser()
self._readConfig()
self._workingQueue = Queue.Queue()
self._mqclient = mqtt.Client("Chromecast", clean_session=True)
self._mqclient.on_connect = self._on_connect
self._mqclient.on_message = self._on_message
self._mqclient.on_disconnect = self._on_disconnect
self._mqclient.connect(self._config.get("MQTT", "ServerAddress"), self._config.get("MQTT", "ServerPort"), 60)
self._mqclient.loop_start()
self._chromecasts = {}
self._chromecasts_OK = False
def _on_connect(self, client, userdata, rc, msg):
self._logger.info("Connected Chromecast with result code %s" % rc)
self._mqclient.subscribe("chromecast/#")
def _on_message(self, client, userdata, msg):
self._workingQueue.put((msg.topic, msg.payload))
def _on_disconnect(self, client, userdata, msg):
self._logger.error("Disconnect Chromecast")
def _disconnectAll(self):
try:
for i in self._chromecasts:
i.disconnect()
self._chromecasts = {}
except Exception as e:
self._logger.error(e)
self._chromecasts = {}
def _discoverAll(self):
self._disconnectAll()
try:
casts = pychromecast.get_chromecasts(timeout=10)
for i in casts:
self._logger.info(i)
self._chromecasts[i.device.friendly_name] = i
self._chromecasts_OK = True
except Exception as e:
self._chromecasts_OK = False
self._logger.erro(e)
def run(self):
while True:
if not self._chromecasts_OK:
self._discoverAll()
try:
k, v = self._workingQueue.get()
keys = k.split("/")
self._logger.debug(k)
self._logger.debug(v)
if keys[1] in self._chromecasts.keys():
self._logger.debug("Found device")
cast = self._chromecasts[keys[1]]
if keys[2] == "playYoutube":
self._logger.info("playYoutube")
audiostream = pafy.new("https://www.youtube.com/watch?v=" + v).getbestaudio()
cast.wait()
mc = cast.media_controller
mc.play_media(audiostream.url, 'audio/' + audiostream.extension)
mc.block_until_active()
mc.play()
if keys[2] == "playMusicURL":
self._logger.info("playMusicURL")
cast.wait()
# We start one because this can be just a change of media source
mc = cast.media_controller
mc.play_media(v, 'audio/mpeg')
mc.block_until_active()
mc.play()
time.sleep(5)
counter = 0
while counter < 5 and cast.status.app_id is None:
self._logger.info("retry playMusicURL")
mc.play_media(v, 'audio/mpeg')
mc.block_until_active()
mc.play()
time.sleep(5)
counter += 1
if keys[2] == "volume":
self._logger.info("volume")
cast.wait()
cast.set_volume(float(v))
if keys[2] == "stop":
self._logger.info("stop")
cast.wait()
cast.quit_app()
except Exception as e:
self._logger.error("Error in processing")
self._logger.error(e)
self._chromecasts_OK = False
self._mqclient.publish(k, v)
if __name__ == '__main__':
c = Chromecast()
c.start()
time.sleep(60)
#c.playMusicURL('Chromeansi', 'http://inforadio.de/livemp3')
#time.sleep(1)
#c.volume('Chromeansi', 0.6)
#time.sleep(1)
#c.volume('Chromeansi', 0.1)
#time.sleep(1)
#c.stop('Chromeansi')
#print c.getVolume('Chromeansi')
#print c.getDisplayName('Chromeansi')
#c.stop('Chromeansi')
#c.test('Chromeansi')
#c.playYoutube('Chromeansi', '0fYL_qiDYf0')
#c.volume('Chromeansi', 0.6)
| apache-2.0 |
lattwood/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/read_checksum_from_png.py | 207 | 1877 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def read_checksum(filehandle):
# We expect the comment to be at the beginning of the file.
data = filehandle.read(2048)
comment_key = 'tEXtchecksum\x00'
comment_pos = data.find(comment_key)
if comment_pos == -1:
return
checksum_pos = comment_pos + len(comment_key)
return data[checksum_pos:checksum_pos + 32]
| bsd-3-clause |
dklann/rivendell | apis/pypad/scripts/pypad_live365.py | 2 | 2842 | #!%PYTHON_BANGPATH%
# pypad_live365.py
#
# Write PAD updates to Live365 stations
#
# (C) Copyright 2018-2019 Fred Gleason <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import sys
import syslog
import configparser
import pycurl
import pypad
from io import BytesIO
def eprint(*args,**kwargs):
print(*args,file=sys.stderr,**kwargs)
def ProcessPad(update):
n=1
section='Station'+str(n)
while(update.config().has_section(section)):
if update.shouldBeProcessed(section) and update.hasPadType(pypad.TYPE_NOW):
member=update.escape(update.config().get(section,'MemberName'),pypad.ESCAPE_URL)
password=update.escape(update.config().get(section,'Password'),pypad.ESCAPE_URL)
title=update.resolvePadFields(update.config().get(section,'TitleString'),pypad.ESCAPE_URL)
artist=update.resolvePadFields(update.config().get(section,'ArtistString'),pypad.ESCAPE_URL)
album=update.resolvePadFields(update.config().get(section,'AlbumString'),pypad.ESCAPE_URL)
seconds=str(update.padField(pypad.TYPE_NOW,pypad.FIELD_LENGTH)//1000)
buf=BytesIO()
curl=pycurl.Curl()
url='http://www.live365.com/cgi-bin/add_song.cgi?member_name='+member+'&password='+password+'&version=2&filename=Rivendell&seconds='+seconds+'&title='+title+'&artist='+artist+'&album='+album
curl.setopt(curl.URL,url)
curl.setopt(curl.WRITEDATA,buf)
curl.setopt(curl.FOLLOWLOCATION,True)
try:
curl.perform()
code=curl.getinfo(pycurl.RESPONSE_CODE)
if (code<200) or (code>=300):
update.syslog(syslog.LOG_WARNING,'['+section+'] returned response code '+str(code))
except pycurl.error:
update.syslog(syslog.LOG_WARNING,'['+section+'] failed: '+curl.errstr())
curl.close()
n=n+1
section='Station'+str(n)
#
# 'Main' function
#
rcvr=pypad.Receiver()
try:
rcvr.setConfigFile(sys.argv[3])
except IndexError:
eprint('pypad_live365.py: USAGE: cmd <hostname> <port> <config>')
sys.exit(1)
rcvr.setPadCallback(ProcessPad)
rcvr.start(sys.argv[1],int(sys.argv[2]))
| gpl-2.0 |
aungmyo/iso8583py | ISO8583/ISO8583.py | 6 | 50397 | """
(C) Copyright 2009 Igor V. Custodio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Igor Vitorio Custodio <[email protected]>'
__version__= '1.3.1'
__licence__ = 'GPL V3'
from ISOErrors import *
import struct
class ISO8583:
"""Main Class to work with ISO8583 packages.
Used to create, change, send, receive, parse or work with ISO8593 Package version 1993.
It's 100% Python :)
Enjoy it!
Thanks to: Vulcanno IT Solutions <http://www.vulcanno.com.br>
Licence: GPL Version 3
More information: http://code.google.com/p/iso8583py/
Example:
from ISO8583.ISO8583 import ISO8583
from ISO8583.ISOErrors import *
iso = ISO8583()
try:
iso.setMTI('0800')
iso.setBit(2,2)
iso.setBit(4,4)
iso.setBit(12,12)
iso.setBit(21,21)
iso.setBit(17,17)
iso.setBit(49,986)
iso.setBit(99,99)
except ValueToLarge, e:
print ('Value too large :( %s' % e)
except InvalidMTI, i:
print ('This MTI is wrong :( %s' % i)
print ('The Message Type Indication is = %s' %iso.getMTI())
print ('The Bitmap is = %s' %iso.getBitmap())
iso.showIsoBits();
print ('This is the ISO8583 complete package %s' % iso.getRawIso())
print ('This is the ISO8583 complete package to sent over the TCPIP network %s' % iso.getNetworkISO())
"""
#Attributes
# Bitsto be set 00000000 -> _BIT_POSITION_1 ... _BIT_POSITION_8
_BIT_POSITION_1 = 128 # 10 00 00 00
_BIT_POSITION_2 = 64 # 01 00 00 00
_BIT_POSITION_3 = 32 # 00 10 00 00
_BIT_POSITION_4 = 16 # 00 01 00 00
_BIT_POSITION_5 = 8 # 00 00 10 00
_BIT_POSITION_6 = 4 # 00 00 01 00
_BIT_POSITION_7 = 2 # 00 00 00 10
_BIT_POSITION_8 = 1 # 00 00 00 01
#Array to translate bit to position
_TMP = [0,_BIT_POSITION_8,_BIT_POSITION_1,_BIT_POSITION_2,_BIT_POSITION_3,_BIT_POSITION_4,_BIT_POSITION_5,_BIT_POSITION_6,_BIT_POSITION_7]
_BIT_DEFAULT_VALUE = 0
#ISO8583 contants
_BITS_VALUE_TYPE = {}
# Every _BITS_VALUE_TYPE has:
# _BITS_VALUE_TYPE[N] = [ X,Y, Z, W,K]
# N = bitnumber
# X = smallStr representation of the bit meanning
# Y = large str representation
# Z = type of the bit (B, N, A, AN, ANS, LL, LLL)
#W = size of the information that N need to has
# K = type os values a, an, n, ansb, b
_BITS_VALUE_TYPE[1] = ['BME','Bit Map Extended','B',16,'b']
_BITS_VALUE_TYPE[2] = ['2','Primary account number (PAN)','LL',19,'n']
_BITS_VALUE_TYPE[3] = ['3','Precessing code','N',6,'n']
_BITS_VALUE_TYPE[4] = ['4','Amount transaction','N',12,'n']
_BITS_VALUE_TYPE[5] = ['5','Amount reconciliation','N',12,'n']
_BITS_VALUE_TYPE[6] = ['6','Amount cardholder billing','N',12,'n']
_BITS_VALUE_TYPE[7] = ['7','Date and time transmission','N',10,'n']
_BITS_VALUE_TYPE[8] = ['8','Amount cardholder billing fee','N',8,'n']
_BITS_VALUE_TYPE[9] = ['9','Conversion rate reconciliation','N',8,'n']
_BITS_VALUE_TYPE[10] = ['10','Conversion rate cardholder billing','N',8,'n']
_BITS_VALUE_TYPE[11] = ['11','Systems trace audit number','N',6,'n']
_BITS_VALUE_TYPE[12] = ['12','Date and time local transaction','N',6,'n']
_BITS_VALUE_TYPE[13] = ['13','Date effective','N',4,'n']
_BITS_VALUE_TYPE[14] = ['14','Date expiration','N',4,'n']
_BITS_VALUE_TYPE[15] = ['15','Date settlement','N',4,'n']
_BITS_VALUE_TYPE[16] = ['16','Date conversion','N',4,'n']
_BITS_VALUE_TYPE[17] = ['17','Date capture','N',4,'n']
_BITS_VALUE_TYPE[18] = ['18','Message error indicator','N',4,'n']
_BITS_VALUE_TYPE[19] = ['19','Country code acquiring institution','N',3,'n']
_BITS_VALUE_TYPE[20] = ['20','Country code primary account number (PAN)','N',3,'n']
_BITS_VALUE_TYPE[21] = ['21','Transaction life cycle identification data','ANS',3,'n']
_BITS_VALUE_TYPE[22] = ['22','Point of service data code','N',3,'n']
_BITS_VALUE_TYPE[23] = ['23','Card sequence number','N',3,'n']
_BITS_VALUE_TYPE[24] = ['24','Function code','N',3,'n']
_BITS_VALUE_TYPE[25] = ['25','Message reason code','N',2,'n']
_BITS_VALUE_TYPE[26] = ['26','Merchant category code','N',2,'n']
_BITS_VALUE_TYPE[27] = ['27','Point of service capability','N',1,'n']
_BITS_VALUE_TYPE[28] = ['28','Date reconciliation','N',8,'n']
_BITS_VALUE_TYPE[29] = ['29','Reconciliation indicator','N',8,'n']
_BITS_VALUE_TYPE[30] = ['30','Amounts original','N',8,'n']
_BITS_VALUE_TYPE[31] = ['31','Acquirer reference number','N',8,'n']
_BITS_VALUE_TYPE[32] = ['32','Acquiring institution identification code','LL',11,'n']
_BITS_VALUE_TYPE[33] = ['33','Forwarding institution identification code','LL',11,'n']
_BITS_VALUE_TYPE[34] = ['34','Electronic commerce data','LL',28,'n']
_BITS_VALUE_TYPE[35] = ['35','Track 2 data','LL',37,'n']
_BITS_VALUE_TYPE[36] = ['36','Track 3 data','LLL',104,'n']
_BITS_VALUE_TYPE[37] = ['37','Retrieval reference number','N',12,'an']
_BITS_VALUE_TYPE[38] = ['38','Approval code','N',6,'an']
_BITS_VALUE_TYPE[39] = ['39','Action code','A',2,'an']
_BITS_VALUE_TYPE[40] = ['40','Service code','N',3,'an']
_BITS_VALUE_TYPE[41] = ['41','Card acceptor terminal identification','N',8,'ans']
_BITS_VALUE_TYPE[42] = ['42','Card acceptor identification code','A',15,'ans']
_BITS_VALUE_TYPE[43] = ['43','Card acceptor name/location','A',40,'asn']
_BITS_VALUE_TYPE[44] = ['44','Additional response data','LL',25,'an']
_BITS_VALUE_TYPE[45] = ['45','Track 1 data','LL',76,'an']
_BITS_VALUE_TYPE[46] = ['46','Amounts fees','LLL',999,'an']
_BITS_VALUE_TYPE[47] = ['47','Additional data national','LLL',999,'an']
_BITS_VALUE_TYPE[48] = ['48','Additional data private','LLL',999,'an']
_BITS_VALUE_TYPE[49] = ['49','Verification data','A',3,'a']
_BITS_VALUE_TYPE[50] = ['50','Currency code, settlement','AN',3,'an']
_BITS_VALUE_TYPE[51] = ['51','Currency code, cardholder billing','A',3,'a']
_BITS_VALUE_TYPE[52] = ['52','Personal identification number (PIN) data','B',16,'b']
_BITS_VALUE_TYPE[53] = ['53','Security related control information','LL',18,'n']
_BITS_VALUE_TYPE[54] = ['54','Amounts additional','LLL',120,'an']
_BITS_VALUE_TYPE[55] = ['55','Integrated circuit card (ICC) system related data','LLL',999,'ans']
_BITS_VALUE_TYPE[56] = ['56','Original data elements','LLL',999,'ans']
_BITS_VALUE_TYPE[57] = ['57','Authorisation life cycle code','LLL',999,'ans']
_BITS_VALUE_TYPE[58] = ['58','Authorising agent institution identification code','LLL',999,'ans']
_BITS_VALUE_TYPE[59] = ['59','Transport data','LLL',999,'ans']
_BITS_VALUE_TYPE[60] = ['60','Reserved for national use','LL',7,'ans']
_BITS_VALUE_TYPE[61] = ['61','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[62] = ['62','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[63] = ['63','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[64] = ['64','Message authentication code (MAC) field','B',16,'b']
_BITS_VALUE_TYPE[65] = ['65','Bitmap tertiary','B',16,'b']
_BITS_VALUE_TYPE[66] = ['66','Settlement code','N',1,'n']
_BITS_VALUE_TYPE[67] = ['67','Extended payment data','N',2,'n']
_BITS_VALUE_TYPE[68] = ['68','Receiving institution country code','N',3,'n']
_BITS_VALUE_TYPE[69] = ['69','Settlement institution county code','N',3,'n']
_BITS_VALUE_TYPE[70] = ['70','Network management Information code','N',3,'n']
_BITS_VALUE_TYPE[71] = ['71','Message number','N',4,'n']
_BITS_VALUE_TYPE[72] = ['72','Data record','LLL',999,'ans']
_BITS_VALUE_TYPE[73] = ['73','Date action','N',6,'n']
_BITS_VALUE_TYPE[74] = ['74','Credits, number','N',10,'n']
_BITS_VALUE_TYPE[75] = ['75','Credits, reversal number','N',10,'n']
_BITS_VALUE_TYPE[76] = ['76','Debits, number','N',10,'n']
_BITS_VALUE_TYPE[77] = ['77','Debits, reversal number','N',10,'n']
_BITS_VALUE_TYPE[78] = ['78','Transfer number','N',10,'n']
_BITS_VALUE_TYPE[79] = ['79','Transfer, reversal number','N',10,'n']
_BITS_VALUE_TYPE[80] = ['80','Inquiries number','N',10,'n']
_BITS_VALUE_TYPE[81] = ['81','Authorizations, number','N',10,'n']
_BITS_VALUE_TYPE[82] = ['82','Credits, processing fee amount','N',12,'n']
_BITS_VALUE_TYPE[83] = ['83','Credits, transaction fee amount','N',12,'n']
_BITS_VALUE_TYPE[84] = ['84','Debits, processing fee amount','N',12,'n']
_BITS_VALUE_TYPE[85] = ['85','Debits, transaction fee amount','N',12,'n']
_BITS_VALUE_TYPE[86] = ['86','Credits, amount','N',15,'n']
_BITS_VALUE_TYPE[87] = ['87','Credits, reversal amount','N',15,'n']
_BITS_VALUE_TYPE[88] = ['88','Debits, amount','N',15,'n']
_BITS_VALUE_TYPE[89] = ['89','Debits, reversal amount','N',15,'n']
_BITS_VALUE_TYPE[90] = ['90','Original data elements','N',42,'n']
_BITS_VALUE_TYPE[91] = ['91','File update code','AN',1,'an']
_BITS_VALUE_TYPE[92] = ['92','File security code','N',2,'n']
_BITS_VALUE_TYPE[93] = ['93','Response indicator','N',5,'n']
_BITS_VALUE_TYPE[94] = ['94','Service indicator','AN',7,'an']
_BITS_VALUE_TYPE[95] = ['95','Replacement amounts','AN',42,'an']
_BITS_VALUE_TYPE[96] = ['96','Message security code','AN',8,'an']
_BITS_VALUE_TYPE[97] = ['97','Amount, net settlement','N',16,'n']
_BITS_VALUE_TYPE[98] = ['98','Payee','ANS',25,'ans']
_BITS_VALUE_TYPE[99] = ['99','Settlement institution identification code','LL',11,'n']
_BITS_VALUE_TYPE[100] = ['100','Receiving institution identification code','LL',11,'n']
_BITS_VALUE_TYPE[101] = ['101','File name','ANS',17,'ans']
_BITS_VALUE_TYPE[102] = ['102','Account identification 1','LL',28,'ans']
_BITS_VALUE_TYPE[103] = ['103','Account identification 2','LL',28,'ans']
_BITS_VALUE_TYPE[104] = ['104','Transaction description','LLL',100,'ans']
_BITS_VALUE_TYPE[105] = ['105','Reserved for ISO use','LLL',999,'ans']
_BITS_VALUE_TYPE[106] = ['106','Reserved for ISO use','LLL',999,'ans']
_BITS_VALUE_TYPE[107] = ['107','Reserved for ISO use','LLL',999,'ans']
_BITS_VALUE_TYPE[108] = ['108','Reserved for ISO use','LLL',999,'ans']
_BITS_VALUE_TYPE[109] = ['109','Reserved for ISO use','LLL',999,'ans']
_BITS_VALUE_TYPE[110] = ['110','Reserved for ISO use','LLL',999,'ans']
_BITS_VALUE_TYPE[111] = ['111','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[112] = ['112','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[113] = ['113','Reserved for private use','LL',11,'n']
_BITS_VALUE_TYPE[114] = ['114','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[115] = ['115','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[116] = ['116','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[117] = ['117','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[118] = ['118','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[119] = ['119','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[120] = ['120','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[121] = ['121','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[122] = ['122','Reserved for national use','LLL',999,'ans']
_BITS_VALUE_TYPE[123] = ['123','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[124] = ['124','Info Text','LLL',255,'ans']
_BITS_VALUE_TYPE[125] = ['125','Network management information','LL',50,'ans']
_BITS_VALUE_TYPE[126] = ['126','Issuer trace id','LL',6,'ans']
_BITS_VALUE_TYPE[127] = ['127','Reserved for private use','LLL',999,'ans']
_BITS_VALUE_TYPE[128] = ['128','Message authentication code (MAC) field','B',16,'b']
################################################################################################
#Default constructor of the ISO8583 Object
def __init__(self,iso="", debug=False):
"""Default Constructor of ISO8583 Package.
It inicialize a "brand new" ISO8583 package
Example: To Enable debug you can use:
pack = ISO8583(debug=True)
@param: iso a String that represents the ASCII of the package. The same that you need to pass to setIsoContent() method.
@param: debug (True or False) default False -> Used to print some debug infos. Only use if want that messages!
"""
#Bitmap internal representation
self.BITMAP = []
#Values
self.BITMAP_VALUES = []
#Bitmap ASCII representantion
self.BITMAP_HEX = ''
# MTI
self.MESSAGE_TYPE_INDICATION = '';
#Debug ?
self.DEBUG = debug
self.__inicializeBitmap()
self.__inicializeBitmapValues()
if iso != "":
self.setIsoContent(iso)
################################################################################################
################################################################################################
#Return bit type
def getBitType(self,bit):
"""Method that return the bit Type
@param: bit -> Bit that will be searched and whose type will be returned
@return: str that represents the type of the bit
"""
return self._BITS_VALUE_TYPE[bit][2]
################################################################################################
################################################################################################
#Return bit limit
def getBitLimit(self,bit):
"""Method that return the bit limit (Max size)
@param: bit -> Bit that will be searched and whose limit will be returned
@return: int that indicate the limit of the bit
"""
return self._BITS_VALUE_TYPE[bit][3]
################################################################################################
################################################################################################
#Return bit value type
def getBitValueType(self,bit):
"""Method that return the bit value type
@param: bit -> Bit that will be searched and whose value type will be returned
@return: str that indicate the valuye type of the bit
"""
return self._BITS_VALUE_TYPE[bit][4]
################################################################################################
################################################################################################
#Return large bit name
def getLargeBitName(self,bit):
"""Method that return the large bit name
@param: bit -> Bit that will be searched and whose name will be returned
@return: str that represents the name of the bit
"""
return self._BITS_VALUE_TYPE[bit][1]
################################################################################################
################################################################################################
# Set the MTI
def setTransationType(self,type):
"""Method that set Transation Type (MTI)
@param: type -> MTI to be setted
@raise: ValueToLarge Exception
"""
type = "%s" %type
if len(type) > 4:
type = type[0:3]
raise ValueToLarge('Error: value up to size! MTI limit size = 4')
typeT = "";
if len(type) < 4:
for cont in range(len(type),4):
typeT += "0"
self.MESSAGE_TYPE_INDICATION = "%s%s" % (typeT,type)
################################################################################################
################################################################################################
# setMTI too
def setMTI(self,type):
"""Method that set Transation Type (MTI)
In fact, is an alias to "setTransationType" method
@param: type -> MTI to be setted
"""
self.setTransationType(type)
################################################################################################
################################################################################################
#Method that put "zeros" inside bitmap
def __inicializeBitmap(self):
"""Method that inicialize/reset a internal bitmap representation
It's a internal method, so don't call!
"""
if self.DEBUG == True:
print ('Init bitmap')
if len(self.BITMAP) == 16:
for cont in range(0,16):
self.BITMAP[cont] = self._BIT_DEFAULT_VALUE
else:
for cont in range(0,16):
self.BITMAP.append(self._BIT_DEFAULT_VALUE)
################################################################################################
################################################################################################
#init with "0" the array of values
def __inicializeBitmapValues(self):
"""Method that inicialize/reset a internal array used to save bits and values
It's a internal method, so don't call!
"""
if self.DEBUG == True:
print ('Init bitmap_values')
if len(self.BITMAP_VALUES) == 128:
for cont in range(0,129):
self.BITMAP_VALUES[cont] = self._BIT_DEFAULT_VALUE
else:
for cont in range(0,129):
self.BITMAP_VALUES.append(self._BIT_DEFAULT_VALUE)
################################################################################################
################################################################################################
# Set a value to a bit
def setBit(self, bit, value):
"""Method used to set a bit with a value.
It's one of the most important method to use when using this library
@param: bit -> bit number that want to be setted
@param: value -> the value of the bit
@return: True/False default True -> To be used in the future!
@raise: BitInexistent Exception, ValueToLarge Exception
"""
if self.DEBUG == True:
print ('Setting bit inside bitmap bit[%s] = %s') % (bit, value)
if bit < 1 or bit > 128:
raise BitInexistent("Bit number %s dosen't exist!" % bit)
# caculate the position insede bitmap
pos =1
if self.getBitType(bit) == 'LL':
self.__setBitTypeLL(bit, value)
if self.getBitType(bit) == 'LLL':
self.__setBitTypeLLL(bit, value)
if self.getBitType(bit) == 'N' :
self.__setBitTypeN(bit, value)
if self.getBitType(bit) == 'A':
self.__setBitTypeA(bit, value)
if self.getBitType(bit) == 'ANS' or self.getBitType(bit) == 'B':
self.__setBitTypeANS(bit, value)
if self.getBitType(bit) == 'B':
self.__setBitTypeB(bit, value)
#Continuation bit?
if bit > 64:
self.BITMAP[0] = self.BITMAP[0] | self._TMP[2] # need to set bit 1 of first "bit" in bitmap
if (bit % 8) == 0:
pos = (bit / 8) - 1
else:
pos = (bit /8)
#need to check if the value can be there .. AN , N ... etc ... and the size
self.BITMAP[pos] = self.BITMAP[pos] | self._TMP[ (bit%8) +1]
return True
################################################################################################
################################################################################################
#print bitmap
def showBitmap(self):
"""Method that print the bitmap in ASCII form
Hint: Try to use getBitmap method and format your own print :)
"""
self.__buildBitmap()
# printing
print (self.BITMAP_HEX)
################################################################################################
################################################################################################
#Build a bitmap
def __buildBitmap(self):
"""Method that build the bitmap ASCII
It's a internal method, so don't call!
"""
self.BITMAP_HEX = ''
for c in range(0,16):
if (self.BITMAP[0] & self._BIT_POSITION_1) != self._BIT_POSITION_1:
# Only has the first bitmap
if self.DEBUG == True:
print ('%d Bitmap = %d(Decimal) = %s (hexa) ' %(c, self.BITMAP[c], hex(self.BITMAP[c])))
tm = hex(self.BITMAP[c])[2:]
if len(tm) != 2:
tm = '0' + tm
self.BITMAP_HEX += tm
if c == 7:
break
else: # second bitmap
if self.DEBUG == True:
print ('%d Bitmap = %d(Decimal) = %s (hexa) ' %(c, self.BITMAP[c], hex(self.BITMAP[c])))
tm = hex(self.BITMAP[c])[2:]
if len(tm) != 2:
tm = '0' + tm
self.BITMAP_HEX += tm
################################################################################################
################################################################################################
#Get a bitmap from str
def __getBitmapFromStr(self, bitmap):
"""Method that receive a bitmap str and transfor it to ISO8583 object readable.
@param: bitmap -> bitmap str to be readable
It's a internal method, so don't call!
"""
#Need to check if the size is correct etc...
cont = 0
if self.BITMAP_HEX != '':
self.BITMAP_HEX = ''
for x in range(0,32,2):
if (int(bitmap[0:2],16) & self._BIT_POSITION_1) != self._BIT_POSITION_1: # Only 1 bitmap
if self.DEBUG == True:
print ('Token[%d] %s converted to int is = %s' %(x, bitmap[x:x+2], int(bitmap[x:x+2],16)))
self.BITMAP_HEX += bitmap[x:x+2]
self.BITMAP[cont] = int(bitmap[x:x+2],16)
if x == 14:
break
else: # Second bitmap
if self.DEBUG == True:
print ('Token[%d] %s converted to int is = %s' %(x, bitmap[x:x+2], int(bitmap[x:x+2],16)))
self.BITMAP_HEX += bitmap[x:x+2]
self.BITMAP[cont] = int(bitmap[x:x+2],16)
cont += 1
################################################################################################
################################################################################################
# print bit array that is present in the bitmap
def showBitsFromBitmapStr(self, bitmap):
"""Method that receive a bitmap str, process it, and print a array with bits this bitmap string represents.
Usualy is used to debug things.
@param: bitmap -> bitmap str to be analized and translated to "bits"
"""
bits = self.__inicializeBitsFromBitmapStr(bitmap)
print ('Bits inside %s = %s' % (bitmap,bits))
################################################################################################
################################################################################################
#inicialize a bitmap using ASCII str
def __inicializeBitsFromBitmapStr(self, bitmap):
"""Method that receive a bitmap str, process it, and prepare ISO8583 object to understand and "see" the bits and values inside the ISO ASCII package.
It's a internal method, so don't call!
@param: bitmap -> bitmap str to be analized and translated to "bits"
"""
bits = []
for c in range(0,16):
for d in range(1,9):
if self.DEBUG == True:
print ('Value (%d)-> %s & %s = %s' % (d,self.BITMAP[c] , self._TMP[d], (self.BITMAP[c] & self._TMP[d]) ))
if (self.BITMAP[c] & self._TMP[d]) == self._TMP[d]:
if d == 1: # e o 8 bit
if self.DEBUG == True:
print ('Bit %s is present !!!' % ((c +1)* 8))
bits.append((c +1)* 8)
self.BITMAP_VALUES[(c +1)* 8] = 'X'
else:
if (c == 0) & (d == 2): # Continuation bit
if self.DEBUG == True:
print ('Bit 1 is present !!!')
bits.append(1)
else:
if self.DEBUG == True:
print ('Bit %s is present !!!' % (c * 8 + d - 1))
bits.append(c * 8 + d - 1)
self.BITMAP_VALUES[c * 8 + d - 1] = 'X'
bits.sort()
return bits
################################################################################################
################################################################################################
#return a array of bits, when processing the bitmap
def __getBitsFromBitmap(self):
"""Method that process the bitmap and return a array with the bits presents inside it.
It's a internal method, so don't call!
"""
bits = []
for c in range(0,16):
for d in range(1,9):
if self.DEBUG == True:
print ('Value (%d)-> %s & %s = %s' % (d,self.BITMAP[c] , self._TMP[d], (self.BITMAP[c] & self._TMP[d]) ))
if (self.BITMAP[c] & self._TMP[d]) == self._TMP[d]:
if d == 1: # e o 8 bit
if self.DEBUG == True:
print ('Bit %s is present !!!' % ((c +1)* 8))
bits.append((c +1)* 8)
else:
if (c == 0) & (d == 2): # Continuation bit
if self.DEBUG == True:
print ('Bit 1 is present !!!')
bits.append(1)
else:
if self.DEBUG == True:
print ('Bit %s is present !!!' % (c * 8 + d - 1))
bits.append(c * 8 + d - 1)
bits.sort()
return bits
################################################################################################
################################################################################################
#Set of type LL
def __setBitTypeLL(self, bit, value):
"""Method that set a bit with value in form LL
It put the size in front of the value
Example: pack.setBit(99,'123') -> Bit 99 is a LL type, so this bit, in ASCII form need to be 03123. To understand, 03 is the size of the information and 123 is the information/value
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > 99:
#value = value[0:99]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
if len(value) > self.getBitLimit(bit):
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
size ="%s"% len(value)
self.BITMAP_VALUES[bit] = "%s%s" %( size.zfill(2), value)
################################################################################################
################################################################################################
#Set of type LLL
def __setBitTypeLLL(self, bit, value):
"""Method that set a bit with value in form LLL
It put the size in front of the value
Example: pack.setBit(104,'12345ABCD67890') -> Bit 104 is a LLL type, so this bit, in ASCII form need to be 01412345ABCD67890.
To understand, 014 is the size of the information and 12345ABCD67890 is the information/value
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > 999:
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
if len(value) > self.getBitLimit(bit):
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
size ="%s"% len(value)
self.BITMAP_VALUES[bit] = "%s%s" %( size.zfill(3), value)
################################################################################################
################################################################################################
# Set of type N,
def __setBitTypeN(self, bit, value):
"""Method that set a bit with value in form N
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a N type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# Set of type A
def __setBitTypeA(self, bit, value):
"""Method that set a bit with value in form A
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a A type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# Set of type B
def __setBitTypeB(self, bit, value):
"""Method that set a bit with value in form B
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a B type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# Set of type ANS
def __setBitTypeANS(self, bit, value):
"""Method that set a bit with value in form ANS
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a ANS type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (bit,self.getBitType(bit),self.getBitLimit(bit)) )
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# print os bits insede iso
def showIsoBits(self):
"""Method that show in detail a list of bits , values and types inside the object
Example: output to
(...)
iso.setBit(2,2)
iso.setBit(4,4)
(...)
iso.showIsoBits()
(...)
Bit[2] of type LL has limit 19 = 012
Bit[4] of type N has limit 12 = 000000000004
(...)
"""
for cont in range(0,129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
print("Bit[%s] of type %s has limit %s = %s"%(cont,self.getBitType(cont),self.getBitLimit(cont), self.BITMAP_VALUES[cont]) )
################################################################################################
################################################################################################
# print Raw iso
def showRawIso(self):
"""Method that print ISO8583 ASCII complete representation
Example:
iso = ISO8583()
iso.setMTI('0800')
iso.setBit(2,2)
iso.setBit(4,4)
iso.setBit(12,12)
iso.setBit(17,17)
iso.setBit(99,99)
iso.showRawIso()
output (print) -> 0800d010800000000000000000002000000001200000000000400001200170299
Hint: Try to use getRawIso method and format your own print :)
"""
resp = self.getRawIso()
print(resp)
################################################################################################
################################################################################################
# Return raw iso
def getRawIso(self):
"""Method that return ISO8583 ASCII complete representation
Example:
iso = ISO8583()
iso.setMTI('0800')
iso.setBit(2,2)
iso.setBit(4,4)
iso.setBit(12,12)
iso.setBit(17,17)
iso.setBit(99,99)
str = iso.getRawIso()
print ('This is the ASCII package %s' % str)
output (print) -> This is the ASCII package 0800d010800000000000000000002000000001200000000000400001200170299
@return: str with complete ASCII ISO8583
@raise: InvalidMTI Exception
"""
self.__buildBitmap()
if self.MESSAGE_TYPE_INDICATION == '':
raise InvalidMTI('Check MTI! Do you set it?')
resp = "";
resp += self.MESSAGE_TYPE_INDICATION
resp += self.BITMAP_HEX
for cont in range(0,129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
resp = "%s%s"%(resp, self.BITMAP_VALUES[cont])
return resp
################################################################################################
################################################################################################
#Redefine a bit
def redefineBit(self,bit, smallStr, largeStr, bitType, size, valueType ):
"""Method that redefine a bit structure in global scope!
Can be used to personalize ISO8583 structure to another specification (ISO8583 1987 for example!)
Hint: If you have a lot of "ValueToLarge Exception" maybe the especification that you are using is different of mine. So you will need to use this method :)
@param: bit -> bit to be redefined
@param: smallStr -> a small String representantion of the bit, used to build "user friendly prints", example "2" for bit 2
@param: largeStr -> a large String representantion of the bit, used to build "user friendly prints" and to be used to inform the "main use of the bit",
example "Primary account number (PAN)" for bit 2
@param: bitType -> type the bit, used to build the values, example "LL" for bit 2. Need to be one of (B, N, AN, ANS, LL, LLL)
@param: size -> limit size the bit, used to build/complete the values, example "19" for bit 2.
@param: valueType -> value type the bit, used to "validate" the values, example "n" for bit 2. This mean that in bit 2 we need to have only numeric values.
Need to be one of (a, an, n, ansb, b)
@raise: BitInexistent Exception, InvalidValueType Exception
"""
if self.DEBUG == True:
print ('Trying to redefine the bit with (self,%s,%s,%s,%s,%s,%s)' % (bit, smallStr, largeStr, bitType, size, valueType))
#validating bit position
if bit == 1 or bit == 64 or bit < 0 or bit > 128:
raise BitInexistent("Error %d cannot be changed because has a invalid number!" % bit)
#need to validate if the type and size is compatible! example slimit = 100 and type = LL
if bitType == "B" or bitType == "N" or bitType == "AN" or bitType == "ANS" or bitType == "LL" or bitType == "LLL":
if valueType == "a" or valueType == "n" or valueType == "ansb" or valueType == "ans" or valueType == "b" or valueType == "an" :
self._BITS_VALUE_TYPE[bit] = [smallStr, largeStr, bitType, size, valueType]
if self.DEBUG == True:
print ('Bit %d redefined!' % bit)
else:
raise InvalidValueType("Error bit %d cannot be changed because %s is not a valid valueType (a, an, n ansb, b)!" % (bit,valueType))
#return
else:
raise InvalidBitType("Error bit %d cannot be changed because %s is not a valid bitType (Hex, N, AN, ANS, LL, LLL)!" % (bit,bitType))
#return
################################################################################################
################################################################################################
#a partir de um trem de string, pega o MTI
def __setMTIFromStr(self,iso):
"""Method that get the first 4 characters to be the MTI.
It's a internal method, so don't call!
"""
self.MESSAGE_TYPE_INDICATION = iso[0:4]
if self.DEBUG == True:
print ('MTI found was %s' % self.MESSAGE_TYPE_INDICATION)
################################################################################################
################################################################################################
#return the MTI
def getMTI(self):
"""Method that return the MTI of the package
@return: str -> with the MTI
"""
#Need to validate if the MTI was setted ...etc ...
return self.MESSAGE_TYPE_INDICATION
################################################################################################
################################################################################################
#Return the bitmap
def getBitmap(self):
"""Method that return the ASCII Bitmap of the package
@return: str -> with the ASCII Bitmap
"""
if self.BITMAP_HEX == '':
self.__buildBitmap()
return self.BITMAP_HEX
################################################################################################
################################################################################################
#return the Varray of values
def getValuesArray(self):
"""Method that return an internal array of the package
@return: array -> with all bits, presents or not in the bitmap
"""
return self.BITMAP_VALUES
################################################################################################
################################################################################################
#Receive a str and interpret it to bits and values
def __getBitFromStr(self,strWithoutMtiBitmap):
"""Method that receive a string (ASCII) without MTI and Bitmaps (first and second), understand it and remove the bits values
@param: str -> with all bits presents whithout MTI and bitmap
It's a internal method, so don't call!
"""
if self.DEBUG == True:
print ('This is the input string <%s>' % strWithoutMtiBitmap)
offset = 0;
# jump bit 1 because it was alread defined in the "__inicializeBitsFromBitmapStr"
for cont in range(2,129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
if self.DEBUG == True:
print ('String = %s offset = %s bit = %s' % (strWithoutMtiBitmap[offset:],offset,cont))
if self.getBitType(cont) == 'LL':
valueSize = int(strWithoutMtiBitmap[offset:offset +2])
if self.DEBUG == True:
print ('Size of the message in LL = %s' %valueSize)
if valueSize > self.getBitLimit(cont):
raise ValueToLarge("This bit is larger than the especification!")
self.BITMAP_VALUES[cont] = strWithoutMtiBitmap[offset:offset+2] + strWithoutMtiBitmap[offset+2:offset+2+valueSize]
if self.DEBUG == True:
print ('\tSetting bit %s value %s' % (cont,self.BITMAP_VALUES[cont]))
offset += valueSize + 2
if self.getBitType(cont) == 'LLL':
valueSize = int(strWithoutMtiBitmap[offset:offset +3])
if self.DEBUG == True:
print ('Size of the message in LLL = %s' %valueSize)
if valueSize > self.getBitLimit(cont):
raise ValueToLarge("This bit is larger than the especification!")
self.BITMAP_VALUES[cont] = strWithoutMtiBitmap[offset:offset+3] + strWithoutMtiBitmap[offset+3:offset+3+valueSize]
if self.DEBUG == True:
print ('\tSetting bit %s value %s' % (cont,self.BITMAP_VALUES[cont]))
offset += valueSize + 3
# if self.getBitType(cont) == 'LLLL':
# valueSize = int(strWithoutMtiBitmap[offset:offset +4])
# if valueSize > self.getBitLimit(cont):
# raise ValueToLarge("This bit is larger than the especification!")
# self.BITMAP_VALUES[cont] = '(' + strWithoutMtiBitmap[offset:offset+4] + ')' + strWithoutMtiBitmap[offset+4:offset+4+valueSize]
# offset += valueSize + 4
if self.getBitType(cont) == 'N' or self.getBitType(cont) == 'A' or self.getBitType(cont) == 'ANS' or self.getBitType(cont) == 'B' or self.getBitType(cont) == 'AN' :
self.BITMAP_VALUES[cont] = strWithoutMtiBitmap[offset:self.getBitLimit(cont)+offset]
if self.DEBUG == True:
print ('\tSetting bit %s value %s' % (cont,self.BITMAP_VALUES[cont]))
offset += self.getBitLimit(cont)
################################################################################################
################################################################################################
#Parse a ASCII iso to object
def setIsoContent(self,iso):
"""Method that receive a complete ISO8583 string (ASCII) understand it and remove the bits values
Example:
iso = '0210B238000102C080040000000000000002100000000000001700010814465469421614465701081100301000000N399915444303500019991544986020 Value not allowed009000095492'
i2 = ISO8583()
# in this case, we need to redefine a bit because default bit 42 is LL and in this especification is "N"
# the rest remain, so we use "get" :)
i2.redefineBit(42, '42', i2.getLargeBitName(42), 'N', i2.getBitLimit(42), i2.getBitValueType(42) )
i2.setIsoContent(iso2)
print ('Bitmap = %s' %i2.getBitmap())
print ('MTI = %s' %i2.getMTI() )
print ('This ISO has bits:')
v3 = i2.getBitsAndValues()
for v in v3:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
@param: str -> complete ISO8583 string
@raise: InvalidIso8583 Exception
"""
if len(iso) < 20:
raise InvalidIso8583('This is not a valid iso!!')
if self.DEBUG == True:
print ('ASCII to process <%s>' % iso)
self.__setMTIFromStr(iso)
isoT = iso[4:]
self.__getBitmapFromStr(isoT)
self.__inicializeBitsFromBitmapStr(self.BITMAP_HEX)
if self.DEBUG == True:
print ('This is the array of bits (before) %s ' % self.BITMAP_VALUES)
self.__getBitFromStr(iso[4+len(self.BITMAP_HEX):])
if self.DEBUG == True:
print ('This is the array of bits (after) %s ' % self.BITMAP_VALUES)
################################################################################################
################################################################################################
#Method that compare 2 isos
def __cmp__(self,obj2):
"""Method that compare two objects in "==", "!=" and other things
Example:
p1 = ISO8583()
p1.setMTI('0800')
p1.setBit(2,2)
p1.setBit(4,4)
p1.setBit(12,12)
p1.setBit(17,17)
p1.setBit(99,99)
#get the rawIso and save in the iso variable
iso = p1.getRawIso()
p2 = ISO8583()
p2.setIsoContent(iso)
print ('Is equivalent?')
if p1 == p1:
print ('Yes :)')
else:
print ('Noooooooooo :(')
@param: obj2 -> object that will be compared
@return: <0 if is not equal, 0 if is equal
"""
ret = -1 # By default is different
if (self.getMTI() == obj2.getMTI()) and (self.getBitmap() == obj2.getBitmap()) and (self.getValuesArray() == obj2.getValuesArray()):
ret = 0
return ret
################################################################################################
################################################################################################
# Method that return a array with bits and values inside the iso package
def getBitsAndValues(self):
"""Method that return an array of bits, values, types etc.
Each array value is a dictionary with: {'bit':X ,'type': Y, 'value': Z} Where:
bit: is the bit number
type: is the bit type
value: is the bit value inside this object
so the Generic array returned is: [ (...),{'bit':X,'type': Y, 'value': Z}, (...)]
Example:
p1 = ISO8583()
p1.setMTI('0800')
p1.setBit(2,2)
p1.setBit(4,4)
p1.setBit(12,12)
p1.setBit(17,17)
p1.setBit(99,99)
v1 = p1.getBitsAndValues()
for v in v1:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
@return: array of values.
"""
ret = []
for cont in range(2,129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
_TMP = {}
_TMP['bit'] = "%d" % cont
_TMP['type'] = self.getBitType(cont)
_TMP['value'] = self.BITMAP_VALUES[cont]
ret.append(_TMP)
return ret
################################################################################################
################################################################################################
# Method that return a array with bits and values inside the iso package
def getBit(self,bit):
"""Return the value of the bit
@param: bit -> the number of the bit that you want the value
@raise: BitInexistent Exception, BitNotSet Exception
"""
if bit < 1 or bit > 128:
raise BitInexistent("Bit number %s dosen't exist!" % bit)
#Is that bit set?
isThere = False
arr = self.__getBitsFromBitmap()
if self.DEBUG == True:
print ('This is the array of bits inside the bitmap %s' % arr)
for v in arr:
if v == bit:
value = self.BITMAP_VALUES[bit]
isThere = True
break
if isThere:
return value
else:
raise BitNotSet("Bit number %s was not set!" % bit)
################################################################################################
################################################################################################
#Method that return ISO8583 to TCPIP network form, with the size in the beginning.
def getNetworkISO(self, bigEndian=True):
"""Method that return ISO8583 ASCII package with the size in the beginning
By default, it return the package with size represented with big-endian.
Is the same that:
import struct
(...)
iso = ISO8583()
iso.setBit(3,'300000')
(...)
ascii = iso.getRawIso()
# Example: big-endian
# To little-endian, replace '!h' with '<h'
netIso = struct.pack('!h',len(iso))
netIso += ascii
# Example: big-endian
# To little-endian, replace 'iso.getNetworkISO()' with 'iso.getNetworkISO(False)'
print ('This <%s> the same that <%s>' % (iso.getNetworkISO(),netIso))
@param: bigEndian (True|False) -> if you want that the size be represented in this way.
@return: size + ASCII ISO8583 package ready to go to the network!
@raise: InvalidMTI Exception
"""
netIso = ""
asciiIso = self.getRawIso()
if bigEndian:
netIso = struct.pack('!h',len(asciiIso))
if self.DEBUG == True:
print ('Pack Big-endian')
else:
netIso = struct.pack('<h',len(asciiIso))
if self.DEBUG == True:
print ('Pack Little-endian')
netIso += asciiIso
return netIso
################################################################################################
################################################################################################
# Method that recieve a ISO8583 ASCII package in the network form and parse it.
def setNetworkISO(self,iso, bigEndian=True):
"""Method that receive sie + ASCII ISO8583 package and transfor it in the ISO8583 object.
By default, it recieve the package with size represented with big-endian.
Is the same that:
import struct
(...)
iso = ISO8583()
iso.setBit(3,'300000')
(...)
# Example: big-endian
# To little-endian, replace 'iso.getNetworkISO()' with 'iso.getNetworkISO(False)'
netIso = iso.getNetworkISO()
newIso = ISO8583()
# Example: big-endian
# To little-endian, replace 'newIso.setNetworkISO()' with 'newIso.setNetworkISO(False)'
newIso.setNetworkISO(netIso)
#Is the same that:
#size = netIso[0:2]
## To little-endian, replace '!h' with '<h'
#size = struct.unpack('!h',size )
#newIso.setIsoContent(netIso[2:size])
arr = newIso.getBitsAndValues()
for v in arr:
print ('Bit %s Type %s Value = %s' % (v['bit'],v['type'],v['value']))
@param: iso -> str that represents size + ASCII ISO8583 package
@param: bigEndian (True|False) -> Codification of the size.
@raise: InvalidIso8583 Exception
"""
if len(iso) < 24:
raise InvalidIso8583('This is not a valid iso!!Invalid Size')
size = iso[0:2]
if bigEndian:
size = struct.unpack('!h',size)
if self.DEBUG == True:
print ('Unpack Big-endian')
else:
size = struct.unpack('<h',size)
if self.DEBUG == True:
print ('Unpack Little-endian')
if len(iso) != (size[0] + 2):
raise InvalidIso8583('This is not a valid iso!!The ISO8583 ASCII(%s) is less than the size %s!' % (len(iso[2:]),size[0]))
self.setIsoContent(iso[2:])
################################################################################################
| gpl-3.0 |
superchilli/webapp | venv/lib/python2.7/site-packages/setuptools/ssl_support.py | 104 | 8220 | import os
import socket
import atexit
import re
import functools
from setuptools.extern.six.moves import urllib, http_client, map, filter
from pkg_resources import ResolutionError, ExtractionError
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
/usr/local/share/certs/ca-root-nss.crt
/etc/ssl/ca-bundle.pem
""".strip().split()
try:
HTTPSHandler = urllib.request.HTTPSHandler
HTTPSConnection = http_client.HTTPSConnection
except AttributeError:
HTTPSHandler = HTTPSConnection = object
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib.request.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
# from jaraco.functools
def once(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(func, 'always_returns'):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return wrapper
@once
def get_win_certfile():
try:
import wincertstore
except ImportError:
return None
class CertFile(wincertstore.CertFile):
def __init__(self):
super(CertFile, self).__init__()
atexit.register(self.close)
def close(self):
try:
super(CertFile, self).close()
except OSError:
pass
_wincerts = CertFile()
_wincerts.addstore('CA')
_wincerts.addstore('ROOT')
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
extant_cert_paths = filter(os.path.isfile, cert_paths)
return (
get_win_certfile()
or next(extant_cert_paths, None)
or _certifi_where()
)
def _certifi_where():
try:
return __import__('certifi').where()
except (ImportError, ResolutionError, ExtractionError):
pass
| mit |
wonder-sk/QGIS | python/plugins/processing/gui/GeometryPredicateSelectionPanel.py | 5 | 4757 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PredicatePanel.py
---------------------
Date : January 2015
Copyright : (C) 2015 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
Contributors : Arnaud Morvan
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Arnaud Morvan'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Arnaud Morvan'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QCheckBox
from qgis.core import Qgis, QgsVectorLayer, QgsWkbTypes, QgsWkbTypes
from processing.core.parameters import ParameterGeometryPredicate
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetGeometryPredicateSelector.ui'))
class GeometryPredicateSelectionPanel(BASE, WIDGET):
unusablePredicates = {
QgsWkbTypes.PointGeometry: {
QgsWkbTypes.PointGeometry: ('touches', 'crosses'),
QgsWkbTypes.LineGeometry: ('equals', 'contains', 'overlaps'),
QgsWkbTypes.PolygonGeometry: ('equals', 'contains', 'overlaps')
},
QgsWkbTypes.LineGeometry: {
QgsWkbTypes.PointGeometry: ('equals', 'within', 'overlaps'),
QgsWkbTypes.LineGeometry: [],
QgsWkbTypes.PolygonGeometry: ('equals', 'contains', 'overlaps')
},
QgsWkbTypes.PolygonGeometry: {
QgsWkbTypes.PointGeometry: ('equals', 'within', 'overlaps'),
QgsWkbTypes.LineGeometry: ('equals', 'within', 'overlaps'),
QgsWkbTypes.PolygonGeometry: ('crosses')
}
}
def __init__(self,
enabledPredicated=ParameterGeometryPredicate.predicates,
rows=4):
super(GeometryPredicateSelectionPanel, self).__init__(None)
self.setupUi(self)
self.enabledPredicated = enabledPredicated
self.leftLayer = None
self.rightLayer = None
self.setRows(rows)
self.updatePredicates()
def onLeftLayerChange(self):
sender = self.sender()
self.leftLayer = sender.itemData(sender.currentIndex())
self.updatePredicates()
def onRightLayerChange(self):
sender = self.sender()
self.rightLayer = sender.itemData(sender.currentIndex())
self.updatePredicates()
def updatePredicates(self):
if (isinstance(self.leftLayer, QgsVectorLayer)
and isinstance(self.rightLayer, QgsVectorLayer)):
leftType = self.leftLayer.geometryType()
rightType = self.rightLayer.geometryType()
unusablePredicates = self.unusablePredicates[leftType][rightType]
else:
unusablePredicates = []
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
widget.setEnabled(predicate in self.enabledPredicated
and predicate not in unusablePredicates)
def setRows(self, rows):
widgets = []
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
self.gridLayout.removeWidget(widget)
widgets.append(widget)
for i in range(0, len(widgets)):
widget = widgets[i]
self.gridLayout.addWidget(widget, i % rows, i / rows)
def getWidget(self, predicate):
return self.findChild(QCheckBox, predicate + 'Box')
def value(self):
values = []
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
if widget.isEnabled() and widget.isChecked():
values.append(predicate)
return values
def setValue(self, values):
if values:
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
widget.setChecked(predicate in values)
return True
| gpl-2.0 |
xin3liang/platform_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/style/filereader_unittest.py | 51 | 6710 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.logtesting import LoggingTestCase
from webkitpy.style.checker import ProcessorBase
from webkitpy.style.filereader import TextFileReader
class TextFileReaderTest(LoggingTestCase):
class MockProcessor(ProcessorBase):
"""A processor for test purposes.
This processor simply records the parameters passed to its process()
method for later checking by the unittest test methods.
"""
def __init__(self):
self.processed = []
"""The parameters passed for all calls to the process() method."""
def should_process(self, file_path):
return not file_path.endswith('should_not_process.txt')
def process(self, lines, file_path, test_kwarg=None):
self.processed.append((lines, file_path, test_kwarg))
def setUp(self):
LoggingTestCase.setUp(self)
# FIXME: This should be a MockFileSystem once TextFileReader is moved entirely on top of FileSystem.
self.filesystem = FileSystem()
self._temp_dir = str(self.filesystem.mkdtemp())
self._processor = TextFileReaderTest.MockProcessor()
self._file_reader = TextFileReader(self.filesystem, self._processor)
def tearDown(self):
LoggingTestCase.tearDown(self)
self.filesystem.rmtree(self._temp_dir)
def _create_file(self, rel_path, text):
"""Create a file with given text and return the path to the file."""
# FIXME: There are better/more secure APIs for creating tmp file paths.
file_path = self.filesystem.join(self._temp_dir, rel_path)
self.filesystem.write_text_file(file_path, text)
return file_path
def _passed_to_processor(self):
"""Return the parameters passed to MockProcessor.process()."""
return self._processor.processed
def _assert_file_reader(self, passed_to_processor, file_count):
"""Assert the state of the file reader."""
self.assertEqual(passed_to_processor, self._passed_to_processor())
self.assertEqual(file_count, self._file_reader.file_count)
def test_process_file__does_not_exist(self):
try:
self._file_reader.process_file('does_not_exist.txt')
except SystemExit, err:
self.assertEqual(str(err), '1')
else:
self.fail('No Exception raised.')
self._assert_file_reader([], 1)
self.assertLog(["ERROR: File does not exist: 'does_not_exist.txt'\n"])
def test_process_file__is_dir(self):
temp_dir = self.filesystem.join(self._temp_dir, 'test_dir')
self.filesystem.maybe_make_directory(temp_dir)
self._file_reader.process_file(temp_dir)
# Because the log message below contains exception text, it is
# possible that the text varies across platforms. For this reason,
# we check only the portion of the log message that we control,
# namely the text at the beginning.
log_messages = self.logMessages()
# We remove the message we are looking at to prevent the tearDown()
# from raising an exception when it asserts that no log messages
# remain.
message = log_messages.pop()
self.assertTrue(message.startswith("WARNING: Could not read file. Skipping: '%s'\n " % temp_dir))
self._assert_file_reader([], 1)
def test_process_file__should_not_process(self):
file_path = self._create_file('should_not_process.txt', 'contents')
self._file_reader.process_file(file_path)
self._assert_file_reader([], 1)
def test_process_file__multiple_lines(self):
file_path = self._create_file('foo.txt', 'line one\r\nline two\n')
self._file_reader.process_file(file_path)
processed = [(['line one\r', 'line two', ''], file_path, None)]
self._assert_file_reader(processed, 1)
def test_process_file__file_stdin(self):
file_path = self._create_file('-', 'file contents')
self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
processed = [(['file contents'], file_path, 'foo')]
self._assert_file_reader(processed, 1)
def test_process_file__with_kwarg(self):
file_path = self._create_file('foo.txt', 'file contents')
self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
processed = [(['file contents'], file_path, 'foo')]
self._assert_file_reader(processed, 1)
def test_process_paths(self):
# We test a list of paths that contains both a file and a directory.
dir = self.filesystem.join(self._temp_dir, 'foo_dir')
self.filesystem.maybe_make_directory(dir)
file_path1 = self._create_file('file1.txt', 'foo')
rel_path = self.filesystem.join('foo_dir', 'file2.txt')
file_path2 = self._create_file(rel_path, 'bar')
self._file_reader.process_paths([dir, file_path1])
processed = [(['bar'], file_path2, None),
(['foo'], file_path1, None)]
self._assert_file_reader(processed, 2)
def test_count_delete_only_file(self):
self._file_reader.count_delete_only_file()
delete_only_file_count = self._file_reader.delete_only_file_count
self.assertEqual(delete_only_file_count, 1)
| bsd-3-clause |
sebgoa/client-python | kubernetes/client/models/v1alpha1_policy_rule.py | 2 | 8145 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1PolicyRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_groups=None, non_resource_ur_ls=None, resource_names=None, resources=None, verbs=None):
"""
V1alpha1PolicyRule - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_groups': 'list[str]',
'non_resource_ur_ls': 'list[str]',
'resource_names': 'list[str]',
'resources': 'list[str]',
'verbs': 'list[str]'
}
self.attribute_map = {
'api_groups': 'apiGroups',
'non_resource_ur_ls': 'nonResourceURLs',
'resource_names': 'resourceNames',
'resources': 'resources',
'verbs': 'verbs'
}
self._api_groups = api_groups
self._non_resource_ur_ls = non_resource_ur_ls
self._resource_names = resource_names
self._resources = resources
self._verbs = verbs
@property
def api_groups(self):
"""
Gets the api_groups of this V1alpha1PolicyRule.
APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.
:return: The api_groups of this V1alpha1PolicyRule.
:rtype: list[str]
"""
return self._api_groups
@api_groups.setter
def api_groups(self, api_groups):
"""
Sets the api_groups of this V1alpha1PolicyRule.
APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.
:param api_groups: The api_groups of this V1alpha1PolicyRule.
:type: list[str]
"""
self._api_groups = api_groups
@property
def non_resource_ur_ls(self):
"""
Gets the non_resource_ur_ls of this V1alpha1PolicyRule.
NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.
:return: The non_resource_ur_ls of this V1alpha1PolicyRule.
:rtype: list[str]
"""
return self._non_resource_ur_ls
@non_resource_ur_ls.setter
def non_resource_ur_ls(self, non_resource_ur_ls):
"""
Sets the non_resource_ur_ls of this V1alpha1PolicyRule.
NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.
:param non_resource_ur_ls: The non_resource_ur_ls of this V1alpha1PolicyRule.
:type: list[str]
"""
self._non_resource_ur_ls = non_resource_ur_ls
@property
def resource_names(self):
"""
Gets the resource_names of this V1alpha1PolicyRule.
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
:return: The resource_names of this V1alpha1PolicyRule.
:rtype: list[str]
"""
return self._resource_names
@resource_names.setter
def resource_names(self, resource_names):
"""
Sets the resource_names of this V1alpha1PolicyRule.
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
:param resource_names: The resource_names of this V1alpha1PolicyRule.
:type: list[str]
"""
self._resource_names = resource_names
@property
def resources(self):
"""
Gets the resources of this V1alpha1PolicyRule.
Resources is a list of resources this rule applies to. ResourceAll represents all resources.
:return: The resources of this V1alpha1PolicyRule.
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""
Sets the resources of this V1alpha1PolicyRule.
Resources is a list of resources this rule applies to. ResourceAll represents all resources.
:param resources: The resources of this V1alpha1PolicyRule.
:type: list[str]
"""
self._resources = resources
@property
def verbs(self):
"""
Gets the verbs of this V1alpha1PolicyRule.
Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
:return: The verbs of this V1alpha1PolicyRule.
:rtype: list[str]
"""
return self._verbs
@verbs.setter
def verbs(self, verbs):
"""
Sets the verbs of this V1alpha1PolicyRule.
Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
:param verbs: The verbs of this V1alpha1PolicyRule.
:type: list[str]
"""
if verbs is None:
raise ValueError("Invalid value for `verbs`, must not be `None`")
self._verbs = verbs
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1PolicyRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.