python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
import cupy
import cudf
class TransactionCostNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.INPUT_PORT_NAME = 'logreturn_df'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:logreturn_df}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Compute the Transaction Cost",
"type": "object",
"properties": {
"cost": {
'type': "number",
"title": "transaction cost",
"default": 2e-4
},
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
sub_dict = {
'year': 'int16',
'month': 'int16',
'sample_id': 'int64',
}
required = {
}
required.update(sub_dict)
meta_inports[self.INPUT_PORT_NAME] = required
json_drawdown = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
assets = len(input_meta[self.INPUT_PORT_NAME]) - 3
for i in range(assets):
json_drawdown[i] = 'float64'
json_drawdown.update(sub_dict)
meta_outports[self.OUTPUT_PORT_NAME] = json_drawdown
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
input_meta = self.get_input_meta()
assets = len(input_meta[self.INPUT_PORT_NAME]) - 3
all_sample_ids = df['sample_id'].unique()
total_samples = len(all_sample_ids)
cost = self.conf.get('cost', 2e-4)
data = df[list(range(assets))].values
r = data.reshape(total_samples, -1, assets)
tcost = cupy.abs(r[:, 1:, :] - r[:, :-1, :])
tcost = cupy.pad(tcost, ((0, 0), (1, 0), (0, 0)), mode='constant')
tcost = tcost * cost
tcost = tcost.reshape(-1, assets)
cost_df = cudf.DataFrame(tcost)
cost_df.index = df.index
cost_df['year'] = df['year']
cost_df['month'] = df['month']
cost_df['sample_id'] = df['sample_id']
output = {}
output.update({self.OUTPUT_PORT_NAME: cost_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/transactionCostNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
class LogReturnNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.INPUT_PORT_NAME = "in"
self.OUTPUT_PORT_NAME = "out"
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
},
}
required = {
"date": "datetime64[ns]",
'sample_id': 'int64',
'year': 'int16',
'month': 'int16',
}
meta_inports = {
self.INPUT_PORT_NAME: required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Compute the log return dataframe",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
# df = df.drop('datetime', axis=1)
output = {}
col = list(df.columns)
col.remove('date')
col.remove('sample_id')
col.remove('year')
col.remove('month')
logprice = df[col].log()
log_return = logprice - logprice.shift(1)
log_return['date'] = df['date']
log_return['sample_id'] = df['sample_id']
log_return['year'] = df['year']
log_return['month'] = df['month']
log_return['corrupted'] = df['sample_id'] - \
df['sample_id'].shift(1)
log_return = log_return.dropna()
corrupted = log_return['corrupted'] == 1
# print('corruped rows', corrupted.sum())
log_return[corrupted] = None
log_return = log_return.dropna()
log_return = log_return.drop('corrupted', axis=1)
output.update({self.OUTPUT_PORT_NAME: log_return})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/logReturnNode.py |
from setuptools import setup, find_packages
setup(
name='greenflow_dask_plugin',
version='0.0.1',
packages=find_packages(include=['greenflow_dask_plugin']),
install_requires=[
"greenflow"
],
entry_points={
'greenflow.plugin': [
'greenflow_dask_plugin = greenflow_dask_plugin',
],
}
)
| fsi-samples-main | gQuant/plugins/dask_plugin/setup.py |
from .daskComputeNode import DaskComputeNode
from .persistNode import PersistNode
from .simpleParallelNode import SimpleParallelNode
__all__ = ["DaskComputeNode", "PersistNode", "SimpleParallelNode"]
| fsi-samples-main | gQuant/plugins/dask_plugin/greenflow_dask_plugin/__init__.py |
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from dask.dataframe import DataFrame as DaskDataFrame
import dask.distributed
class DaskComputeNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
dy = PortsSpecSchema.dynamic
self.INPUT_PORT_NAME = 'in'
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"dask_cudf.DataFrame", "dask.dataframe.DataFrame",
"builtins.object"
],
dy: {
PortsSpecSchema.DYN_MATCH: ["cudf.DataFrame"]
}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=None
)
def conf_schema(self):
json = {
"title": "Compute the dask dataframe",
"type": "object",
"properties": {
},
}
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
# df = df.drop('datetime', axis=1)
input_connections = self.get_connected_inports()
determined_type = None
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
determined_type = input_connections[port_name]
output = {}
if (determined_type[0] is not None and
issubclass(determined_type[0], DaskDataFrame)):
client = dask.distributed.client.default_client()
objs = []
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
df = inputs[port_name]
objs.append(df)
objs = client.compute(objs)
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
output[port_name] = objs.pop(0).result()
else:
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
df = inputs[port_name]
output[port_name] = df
return output
| fsi-samples-main | gQuant/plugins/dask_plugin/greenflow_dask_plugin/daskComputeNode.py |
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from dask.dataframe import DataFrame as DaskDataFrame
import dask.distributed
class PersistNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
dy = PortsSpecSchema.dynamic
self.INPUT_PORT_NAME = 'in'
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"dask_cudf.DataFrame", "dask.dataframe.DataFrame",
"builtins.object"
],
dy: {
PortsSpecSchema.DYN_MATCH: True
}
},
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=None
)
def conf_schema(self):
json = {
"title": "Persist the dask dataframe",
"type": "object",
"properties": {
},
}
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
# df = df.drop('datetime', axis=1)
input_connections = self.get_connected_inports()
determined_type = None
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
determined_type = input_connections[port_name]
output = {}
if (determined_type[0] is not None and issubclass(determined_type[0],
DaskDataFrame)):
client = dask.distributed.client.default_client()
input_connections = self.get_connected_inports()
objs = []
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
df = inputs[port_name]
objs.append(df)
objs = client.persist(objs)
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
output[port_name] = objs.pop(0)
else:
for port_name in input_connections.keys():
if port_name != self.INPUT_PORT_NAME:
df = inputs[port_name]
output[port_name] = df
return output
| fsi-samples-main | gQuant/plugins/dask_plugin/greenflow_dask_plugin/persistNode.py |
from greenflow.plugin_nodes import CompositeNode
from greenflow.plugin_nodes import ContextCompositeNode
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema,
NodePorts)
from greenflow.plugin_nodes.util.json_util import parse_config
from dask.dataframe import DataFrame as DaskDataFrame
import dask
from jsonpath_ng import parse
import dask.distributed
__all__ = ["SimpleParallelNode"]
class SimpleParallelNode(CompositeNode):
"""
A SimpleParallelNode that can take a single GPU/CPU workflow, and convert
it to output Dask dataframe. Each partition in the Dask dataframe will be
computed in parallel in different GPU/CPUs.
In the SimpleParallelNode configuration, user just need to set the
taskgraph, the inputs and outputs of the taskgraph,
the context parameters for the taskgraph. Most importantly,
it can map the iteration id (or the Dask Dataframe partition id) to any
number typed configuration item of the taskgraph.
E.g. it can be used to set the randomn seed number for each of the
iteration runs.
"""
def init(self):
super().init()
def ports_setup(self):
ports = super().ports_setup()
port_type = PortsSpecSchema.port_type
inports = ports.inports
outports = ports.outports
for k in outports.keys():
outports[k][port_type] = [DaskDataFrame]
output_port = NodePorts(inports=inports, outports=outports)
return output_port
def meta_setup(self):
out_meta = super().meta_setup()
return out_meta
def conf_schema(self):
task_graph = self.task_graph
replacementObj = self.replacementObj
# cache_key, task_graph, replacementObj = self._compute_has
# cache_key, task_graph, replacementObj = self._compute_hash_key()
# if cache_key in CACHE_SCHEMA:
# return CACHE_SCHEMA[cache_key]
conf = ContextCompositeNode.conf_schema(self)
json = {
"title": "Simple Parallel Node",
"type": "object",
"description": """The SimpleParallelNode is used to parallelize the
embarrassingly parallelizable cudf computation taskgraph.""",
"properties": {
"taskgraph": {
"type": "string",
"description": "the taskgraph filepath"
},
"input": {
"type": "array",
"description": "the input node ids",
"items": {
"type": "string"
}
},
"output": {
"type": "array",
"description": "the output node ids",
"items": {
"type": "string"
},
},
"iterations": {
"type": "integer",
"description": "the number of iterations",
"items": {
"type": "string"
}
},
},
"required": ["taskgraph"]
}
ui = {
"taskgraph": {"ui:widget": "TaskgraphSelector"},
}
types = []
if 'taskgraph' in self.conf:
if 'context' in conf.json['properties']:
json['properties']['context'] = conf.json['properties'][
'context']
json['properties']['map'] = {
"type": "array",
"description": """The iteration number maps to""",
"items": {
"type": "object",
"properties": {
"node_id": {
"type": "string",
"enum": []
}
},
"dependencies": {
"node_id": {
"oneOf": [],
}
}
}
}
all_fields = parse_config(replacementObj)
types = list(all_fields.keys())
if 'number' in types:
ty = 'number'
type_container = all_fields[ty]
ids = list(type_container.keys())
json['properties']['map']['items']['properties']['node_id'][
'enum'] = ids
idlist = json['properties']['map']['items']['dependencies'][
'node_id']['oneOf']
for subid in ids:
id_obj = {
"properties": {
"node_id": {
"type": "string"
},
"xpath": {
"type": "string",
}
}
}
content = type_container[subid]
paths = [i['path'] for i in content]
names = [i['item'] for i in content]
id_obj['properties']['node_id']['enum'] = [subid]
id_obj['properties']['xpath']['enum'] = paths
id_obj['properties']['xpath']['enumNames'] = names
idlist.append(id_obj)
pandas_df_name = 'pandas.core.frame.DataFrame'
cudf_df_name = 'cudf.core.dataframe.DataFrame'
if 'taskgraph' in self.conf:
def inputNode_fun(inputNode, in_ports):
pass
def outNode_fun(outNode, out_ports):
pass
self._make_sub_graph_connection(task_graph,
inputNode_fun, outNode_fun)
ids_in_graph = []
in_ports = []
out_ports = []
for t in task_graph:
node_id = t.get('id')
if node_id != '':
node = task_graph[node_id]
all_ports = node.ports_setup()
for port in all_ports.inports.keys():
in_ports.append(node_id+'.'+port)
for port in all_ports.outports.keys():
types = all_ports.outports[port][
PortsSpecSchema.port_type]
correct_type = False
if isinstance(types, list):
t_names = [
t.__module__ + '.' + t.__name__ for t in types
]
if (pandas_df_name in t_names
or cudf_df_name in t_names):
correct_type = True
else:
t_names = types.__module__ + '.' + types.__name__
if (pandas_df_name == t_names
or cudf_df_name == t_names):
correct_type = True
if correct_type:
out_ports.append(node_id+'.'+port)
ids_in_graph.append(node_id)
json['properties']['input']['items']['enum'] = in_ports
json['properties']['output']['items']['enum'] = out_ports
out_schema = ConfSchema(json=json, ui=ui)
# CACHE_SCHEMA[cache_key] = out_schema
return out_schema
def conf_update(self):
"""
run after init, used to update configuration
"""
pass
def update_replace(self, replaceObj, task_graph, **kwargs):
"""
this method is called in the update
@para replaceObj is a dictionary of the configuration
@para task_graph is the task_graph loaded for this composite node
@iternum integer, the iteration number
It is intented to construct a new python dictionary to pass to the
task_graph.run replace argument. So the composite taskgraph can run
with different configurations.
"""
ContextCompositeNode.update_replace(self, replaceObj, task_graph,
**kwargs)
# replace the numbers from the context
if 'map' in self.conf and 'iternum' in kwargs:
for i in range(len(self.conf['map'])):
val = kwargs['iternum']
map_obj = self.conf['map'][i]
xpath = map_obj['xpath']
expr = parse(xpath)
expr.update(replaceObj, val)
def process(self, inputs, **kwargs):
output = {}
# more_output = self._process(inputs)
# output.update(more_output)
iterations = self.conf['iterations']
out_dfs = [
dask.delayed(CompositeNode.process)(self, inputs, iternum=i)
for i in range(iterations)
]
client = dask.distributed.client.default_client()
out_dfs = client.persist(out_dfs)
meta = self.meta_setup().outports
ports = self.ports_setup()
for name in ports.outports.keys():
if self.outport_connected(name):
meta_data = meta[name]
objs = [i[name] for i in out_dfs]
dask_df = dask.dataframe.from_delayed(objs, meta=meta_data)
output[name] = dask_df
return output
| fsi-samples-main | gQuant/plugins/dask_plugin/greenflow_dask_plugin/simpleParallelNode.py |
############################################################################
##
## Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
##
## NVIDIA Sample Code
##
## Please refer to the NVIDIA end user license agreement (EULA) associated
## with this source code for terms and conditions that govern your use of
## this software. Any use, reproduction, disclosure, or distribution of
## this software and related documentation outside the terms of the EULA
## is strictly prohibited.
##
############################################################################
import torch
import torch.nn as nn
class binaryClassification(nn.Module):
def __init__(self, cat_cards, numer_dims=10):
"""
cat_cards (list): list of integers, where each integer is the cardinality of the SORTED column names
numer_dims (int): number of numerical dimensions
"""
super(binaryClassification, self).__init__()
self.num_cats = len(cat_cards)
# Uncomment lines below to enable categorical embeddings as well as line in forward method
# self.embeddings = nn.ModuleList([nn.Embedding(cat_card, min(50, cat_card//2 + 1)) for
# cat_card in cat_cards])
# total_embed_dims = sum(i.embedding_dim for i in self.embeddings)
# Number of input features is X_train.shape[1].
# self.layer_1 = nn.Linear(numer_dims + total_embed_dims, 512)
self.layer_1 = nn.Linear(self.num_cats + numer_dims, 512)
self.layer_2 = nn.Linear(512, 512)
self.layer_3 = nn.Linear(512, 512)
self.layer_4 = nn.Linear(512, 512)
self.layer_5 = nn.Linear(512, 512)
self.layer_out = nn.Linear(512, 1)
self.prelu1 = nn.PReLU()
self.prelu2 = nn.PReLU()
self.prelu3 = nn.PReLU()
self.prelu4 = nn.PReLU()
self.prelu5 = nn.PReLU()
self.dropout1 = nn.Dropout(p=0.1)
self.dropout2 = nn.Dropout(p=0.1)
self.dropout3 = nn.Dropout(p=0.1)
self.dropout4 = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(512)
self.batchnorm2 = nn.BatchNorm1d(512)
self.batchnorm3 = nn.BatchNorm1d(512)
self.batchnorm4 = nn.BatchNorm1d(512)
self.batchnorm5 = nn.BatchNorm1d(512)
def forward(self, cat_inputs, numer_inputs):
# inputs = torch.cat([self.embeddings[col](cat_inputs[:, col]) for col in range(self.num_cats)]+[numer_inputs], dim=1)
inputs = torch.cat([cat_inputs, numer_inputs], dim=1)
x = self.prelu1(self.layer_1(inputs))
x = self.batchnorm1(x)
x = self.dropout1(x)
x = self.prelu2(self.layer_2(x))
x = self.batchnorm2(x)
x = self.dropout2(x)
x = self.prelu3(self.layer_3(x))
x = self.batchnorm3(x)
x = self.dropout3(x)
x = self.prelu4(self.layer_4(x))
x = self.batchnorm4(x)
x = self.dropout4(x)
x = self.prelu5(self.layer_5(x))
x = self.batchnorm5(x)
x = self.layer_out(x)
return x
| fsi-samples-main | credit_default_risk/clfmodel.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, ToTensor, Normalize
from nvflare.apis.dxo import from_shareable, DataKind, DXO
from nvflare.apis.executor import Executor
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from lstm_network import TabLSTM
from tabformer_dataset import TabformerDataset
class TabformerValidator(Executor):
def __init__(self, dataset_base_dir, batch_size = 1024, validate_task_name=AppConstants.TASK_VALIDATION):
super(TabformerValidator, self).__init__()
self.dataset_base_dir = dataset_base_dir
self._batch_size = batch_size
self._validate_task_name = validate_task_name
def _initialize_validator(self, fl_ctx: FLContext):
# In order to use two different local datasets in POC mode we use the client_id to figure out which dataset needs to be trained on
self.client_id = fl_ctx.get_identity_name() #e.g. "site-1"
# Data
self._val_ds = TabformerDataset(self.dataset_base_dir, self.client_id, trainset=False)
self._val_loader = DataLoader(self._val_ds, batch_size=self._batch_size, shuffle=False, drop_last=True)
self._n_iterations = len(self._val_loader)
self.model = TabLSTM()
# Warning: this is specifically for POC mode with 2 clients training on 2 different GPUs on the same machine
# Modify this section if you want to change GPU training behavior
gpu_id = f'cuda:{int(self.client_id.split("-")[1]) - 1}' #e.g. if client_id = "site-1" --> gpu_id = "cuda:0"
self.device = torch.device(gpu_id if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
def _terminate_validator(self):
# collect threads, close files here
pass
def handle_event(self, event_type: str, fl_ctx: FLContext):
# the start and end of a run - only happen once
if event_type == EventType.START_RUN:
try:
self._initialize_validator(fl_ctx)
except BaseException as e:
error_msg = f"Exception in _initialize_trainer: {e}"
self.log_exception(fl_ctx, error_msg)
self.system_panic(error_msg, fl_ctx)
elif event_type == EventType.END_RUN:
self._terminate_validator()
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"f's data: {val_accuracy}')
dxo = DXO(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, batch in enumerate(self._val_loader):
if abort_signal.triggered:
return 0
x_cat, x_cont, target = batch[0].to(self.device), batch[1].to(self.device), batch[2].to(self.device)
logits = self.model(x_cat, x_cont)
pred_prob = torch.sigmoid(logits)
pred_label = (pred_prob > 0.5).float()*1
correct += (pred_label == target).sum().item()
total += target.size()[0]
metric = correct/float(total)
return metric
| fsi-samples-main | federated_learning/train-tabformer/custom/tabformer_lstm_validator.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import pandas as pd
from sklearn.preprocessing import StandardScaler
import torch
from torch.utils.data import Dataset
class TabformerDataset(Dataset):
def __init__(self, fp, client_id, trainset=True):
# read client dataframe
if trainset:
df = pd.read_csv(os.path.join(fp, f"{client_id}.csv"))
else:
df = pd.read_csv(os.path.join(fp, f"val-{client_id}.csv"))
# read metadata dictionary file
with open(os.path.join(fp, 'meta_data.pickle'), 'rb') as handle:
self.meta_data = pickle.load(handle)
amt_scaler = StandardScaler()
df['amount'] = amt_scaler.fit_transform(df[['amount']])
# cast error columns to uint8 instead of int64 - might not make a difference
error_cols = ['errors_Bad CVV', 'errors_Bad Card Number', 'errors_Bad Expiration', 'errors_Bad PIN',
'errors_Bad Zipcode', 'errors_Insufficient Balance', 'errors_Technical Glitch']
df[error_cols] = df[error_cols].astype('uint8')
x_cat = df[self.meta_data['cat_cols']].values
x_cont = df[self.meta_data['num_cols']].values
y = df[self.meta_data['target_col']].values
self.xcat = torch.tensor(x_cat, dtype=torch.long)
self.xcont = torch.tensor(x_cont, dtype=torch.float32)
self.y = torch.tensor(y, dtype=torch.float32)
def __len__(self):
return len(self.y)
def __getitem__(self,idx):
return self.xcat[idx], self.xcont[idx], self.y[idx]
| fsi-samples-main | federated_learning/train-tabformer/custom/tabformer_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from nvflare.apis.dxo import from_shareable, DataKind
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from pt_constants import PTConstants
class ValidationJsonGenerator(FLComponent):
def __init__(self, results_dir=AppConstants.CROSS_VAL_DIR, json_file_name=PTConstants.CrossValResultsJsonFilename):
"""Catches VALIDATION_RESULT_RECEIVED event and generates a results.json containing accuracy of each
validated model.
Args:
results_dir (str, optional): Name of the results directory. Defaults to cross_site_val
json_file_name (str, optional): Name of the json file. Defaults to cross_val_results.json
"""
super(ValidationJsonGenerator, self).__init__()
self._results_dir = results_dir
self._val_results = {}
self._json_file_name = json_file_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._val_results.clear()
elif event_type == AppEventType.VALIDATION_RESULT_RECEIVED:
model_owner = fl_ctx.get_prop(AppConstants.MODEL_OWNER, None)
data_client = fl_ctx.get_prop(AppConstants.DATA_CLIENT, None)
val_results = fl_ctx.get_prop(AppConstants.VALIDATION_RESULT, None)
if not model_owner:
self.log_error(fl_ctx, "model_owner unknown. Validation result will not be saved to json",
fire_event=False)
if not data_client:
self.log_error(fl_ctx, "data_client unknown. Validation result will not be saved to json",
fire_event=False)
if val_results:
try:
dxo = from_shareable(val_results)
dxo.validate()
if dxo.data_kind == DataKind.METRICS:
if data_client not in self._val_results:
self._val_results[data_client] = {}
self._val_results[data_client][model_owner] = dxo.data
else:
self.log_error(fl_ctx, f"Expected dxo of kind METRICS but got {dxo.data_kind} instead.",
fire_event=False)
except:
self.log_exception(fl_ctx, f"Exception in handling validation result.", fire_event=False)
else:
self.log_error(fl_ctx, "Validation result not found.", fire_event=False)
elif event_type == EventType.END_RUN:
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_run_number())
cross_val_res_dir = os.path.join(run_dir, self._results_dir)
if not os.path.exists(cross_val_res_dir):
os.makedirs(cross_val_res_dir)
res_file_path = os.path.join(cross_val_res_dir, self._json_file_name)
with open(res_file_path, 'w') as f:
json.dump(self._val_results, f)
| fsi-samples-main | federated_learning/train-tabformer/custom/validation_json_generator.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import torch.cuda
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_common.pt.pt_fed_utils import PTModelPersistenceFormatManager
from pt_constants import PTConstants
from lstm_network import TabLSTM
class PTModelLocator(ModelLocator):
def __init__(self, exclude_vars=None, model=None):
super(PTModelLocator, self).__init__()
self.model = TabLSTM()
self.exclude_vars = exclude_vars
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
return [PTConstants.PTServerName]
def locate_model(self, model_name, fl_ctx: FLContext) -> Union[DXO, None]:
if model_name == PTConstants.PTServerName:
try:
server_run_dir = fl_ctx.get_engine().get_workspace().get_app_dir(fl_ctx.get_run_number())
model_path = os.path.join(server_run_dir, PTConstants.PTFileModelName)
if not os.path.exists(model_path):
return None
# Load the torch model
device = "cuda" if torch.cuda.is_available() else "cpu"
data = torch.load(model_path, map_location=device)
# Setup the persistence manager.
if self.model:
default_train_conf = {"train": {"model": type(self.model).__name__}}
else:
default_train_conf = None
# Use persistence manager to get learnable
persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=default_train_conf)
ml = persistence_manager.to_model_learnable(exclude_vars=None)
# Create dxo and return
return model_learnable_to_dxo(ml)
except:
self.log_error(fl_ctx, "Error in retrieving {model_name}.", fire_event=False)
return None
else:
self.log_error(fl_ctx, f"PTModelLocator doesn't recognize name: {model_name}", fire_event=False)
return None
| fsi-samples-main | federated_learning/train-tabformer/custom/pt_model_locator.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import torch
from torch import nn
from torch.optim import SGD, Adam
from torch.utils.data.dataloader import DataLoader
from torchvision.transforms import ToTensor, Normalize, Compose
from nvflare.apis.dxo import from_shareable, DXO, DataKind, MetaKey
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode, ReservedKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.apis.event_type import EventType
from nvflare.app_common.abstract.model import make_model_learnable, model_learnable_to_dxo
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.pt.pt_fed_utils import PTModelPersistenceFormatManager
from pt_constants import PTConstants
#from simple_network import SimpleNetwork
from lstm_network import TabLSTM
from tabformer_dataset import TabformerDataset
class TabformerTrainer(Executor):
def __init__(self, dataset_base_dir, lr=0.001, epochs=5, batch_size=1024, train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, exclude_vars=None):
"""Trainer handles train and submit_model tasks. During train_task, it trains a
PyTorch network on TabFormer dataset. For submit_model task, it sends the locally trained model
(if present) to the server.
Args:
dataset_base_dir (str): Path to where local datasets are stored. Expect two files inside: "site-1.csv" and "site-2.csv".
lr (float, optional): Learning rate. Defaults to 0.001
epochs (int, optional): Local epochs for each client. Defaults to 5.
batch_size (int, optional): Training batch size. Defaults to 1024.
train_task_name (str, optional): Task name for train task. Defaults to "train".
submit_model_task_name (str, optional): Task name for submit model. Defaults to "submit_model".
exclude_vars (list): List of variables to exclude during model loading.
"""
super(TabformerTrainer, self).__init__()
self.dataset_base_dir = dataset_base_dir
self._lr = lr
self._epochs = epochs
self._batch_size = batch_size
self._train_task_name = train_task_name
self._submit_model_task_name = submit_model_task_name
self._exclude_vars = exclude_vars
def _initialize_trainer(self, fl_ctx: FLContext):
# when the run starts, this is where the actual settings get initialized for trainer
# In order to use two different local datasets in POC mode we use the client_id to figure out which dataset needs to be trained on
self.client_id = fl_ctx.get_identity_name() #e.g. "site-1"
# Data
self._train_ds = TabformerDataset(self.dataset_base_dir, self.client_id)
self._train_loader = DataLoader(self._train_ds, batch_size=self._batch_size, shuffle=True, drop_last=True)
self._n_iterations = len(self._train_loader)
# Training setup
self.model = TabLSTM()
# Warning: this is specifically for POC mode with 2 clients training on 2 different GPUs
gpu_id = f'cuda:{int(self.client_id.split("-")[1]) - 1}'
self.device = torch.device(gpu_id if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
# Loss and Optimizer
self.criterion = nn.BCEWithLogitsLoss()
self.optimizer = Adam(self.model.parameters(), lr=self._lr)
# Setup the persistence manager to save PT model.
# The default training configuration is used by persistence manager
# in case no initial model is found.
self._default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self._default_train_conf)
def _terminate_trainer(self):
# collect threads, close files here
pass
def handle_event(self, event_type: str, fl_ctx: FLContext):
# the start and end of a run - only happen once
if event_type == EventType.START_RUN:
try:
self._initialize_trainer(fl_ctx)
except BaseException as e:
error_msg = f"Exception in _initialize_trainer: {e}"
self.log_exception(fl_ctx, error_msg)
self.system_panic(error_msg, fl_ctx)
elif event_type == EventType.END_RUN:
self._terminate_trainer()
def local_train(self, fl_ctx, weights, abort_signal):
# Set the model weights
self.model.load_state_dict(state_dict=weights)
# Basic training
self.model.train()
for epoch in range(self._epochs):
running_loss = 0.0
for i, batch in enumerate(self._train_loader):
if abort_signal.triggered:
# If abort_signal is triggered, we simply return.
# The outside function will check it again and decide steps to take.
return
x_cat, x_cont, target = batch[0].to(self.device), batch[1].to(self.device), batch[2].to(self.device)
self.optimizer.zero_grad()
logits = self.model(x_cat, x_cont)
loss = self.criterion(logits, target)
loss.backward()
self.optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
self.log_info(fl_ctx, f"Epoch: {epoch+1}/{self._epochs}, Iteration: {i}, "
f"Loss: {running_loss/2000:4f}")
running_loss = 0.0
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
try:
if task_name == self._train_task_name:
# Get model weights
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
self.local_train(fl_ctx, torch_weights, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self.save_local_model(fl_ctx)
# Get the new state dict and send as weights
new_weights = self.model.state_dict()
new_weights = {k: v.cpu().numpy() for k, v in new_weights.items()}
outgoing_dxo = DXO(data_kind=DataKind.WEIGHTS, data=new_weights,
meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self._n_iterations})
return outgoing_dxo.to_shareable()
elif task_name == self._submit_model_task_name:
# Load local model
ml = self.load_local_model(fl_ctx)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
except:
self.log_exception(fl_ctx, f"Exception in simple trainer.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
def load_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(data=torch.load(model_path),
default_train_conf=self._default_train_conf)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self._exclude_vars)
return ml
| fsi-samples-main | federated_learning/train-tabformer/custom/tabformer_lstm_trainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class TabLSTM(nn.Module):
def __init__(self, hidden_size=128):
super(TabLSTM, self).__init__()
# Explicitly specified for simplicity
cat_cards = [9, 32, 2, 2, 2, 2, 2, 2, 2, 24, 892, 13429, 100343, 224, 60, 13, 10, 3, 30, 27322] # cardinality of the SORTED categorical columns
numer_dims = 1 # only 1 numeric column - amount
self.num_cats = len(cat_cards)
self.hidden_size = hidden_size
self.embeddings = nn.ModuleList([nn.Embedding(cat_card, min(50, cat_card//2 + 1)) for
cat_card in cat_cards])
total_embed_dims = sum(i.embedding_dim for i in self.embeddings)
input_size = total_embed_dims + numer_dims
self.lstm = nn.LSTM(input_size, hidden_size, num_layers=1, batch_first=True)
self.linear = nn.Linear(hidden_size, 1)
def forward(self, cat_inputs, numer_inputs):
inputs = torch.cat([self.embeddings[col](cat_inputs[:, col])
for col in range(self.num_cats)] + [numer_inputs], dim=1)
batch, d = inputs.shape
x, _ = self.lstm(inputs.reshape(batch, 1, d))
x = self.linear(x.reshape(batch, self.hidden_size))
return x
| fsi-samples-main | federated_learning/train-tabformer/custom/lstm_network.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PTConstants:
PTServerName = "server"
PTFileModelName = "FL_global_model.pt"
PTLocalModelName = "local_model.pt"
PTModelsDir = "models"
CrossValResultsJsonFilename = "cross_val_results.json"
| fsi-samples-main | federated_learning/train-tabformer/custom/pt_constants.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
import cherrypy
import pathlib
import io
from base64 import b64encode
from models_infer import Model
from wait_socket import wait_for_port
wait_for_port(50051, "riva", 120)
m = Model()
WEB_ROOT = str(pathlib.Path(__file__).parent.absolute())+'/client'
print(WEB_ROOT)
def stop_clean():
print('stopped')
def run_server():
cherrypy.config.update({
'server.socket_port': 8888,
# 'environment': 'production',
'engine.autoreload.on': False,
# 'server.thread_pool': 1,
'server.socket_host': '0.0.0.0',
'tools.staticdir.on': True,
'tools.staticdir.dir': WEB_ROOT,
'tools.staticdir.index': 'index.html'
})
cherrypy.server.ssl_certificate = "cert.pem"
cherrypy.server.ssl_private_key = "privkey.pem"
class HelloWorld(object):
@cherrypy.expose
def doc(self):
p = pathlib.Path('text/doc.txt')
if p.exists():
with io.open(str(p), 'r', encoding='utf-8') as f:
content = f.read()
return content
else:
return ""
@cherrypy.expose
@cherrypy.tools.json_out()
def questions(self):
p = pathlib.Path('text/questions.txt')
if p.exists():
with io.open(str(p), 'r', encoding='utf-8') as f:
content = f.readlines()
return content
else:
return []
@cherrypy.expose
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def infer(self):
input_json = cherrypy.request.json
r = m.qa_infer(input_json['para'], input_json['question'])
return [r]
@cherrypy.expose
def asr(self, audio_data):
inputs = audio_data
r = m.asr_infer(inputs.file)
return r
@cherrypy.expose
@cherrypy.tools.json_in()
def tacotron(self):
input_json = cherrypy.request.json
r = m.tacotron_infer(input_json['text'])
print('input', input_json['text'])
cherrypy.response.headers[
'Content-Type'] = 'application/octet-stream'
return b64encode(r)
cherrypy.engine.subscribe('stop', stop_clean)
cherrypy.quickstart(HelloWorld())
if __name__ == '__main__':
run_server()
| fsi-samples-main | nlp_demo_riva/webserver.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
# TTS proto
import riva_api.riva_tts_pb2 as rtts
import riva_api.riva_tts_pb2_grpc as rtts_srv
import riva_api.riva_audio_pb2 as ra
import grpc
import numpy as np
from wave_utils import add_header
channel = grpc.insecure_channel('riva:50051')
riva_tts = rtts_srv.RivaSpeechSynthesisStub(channel)
def get_wave(text):
req = rtts.SynthesizeSpeechRequest()
req.text = text
# currently required to be "en-US"
req.language_code = "en-US"
# Supports LINEAR_PCM, FLAC, MULAW and ALAW audio encodings
req.encoding = ra.AudioEncoding.LINEAR_PCM
# ignored, audio returned will be 22.05KHz
req.sample_rate_hz = 22050
# ignored
req.voice_name = "ljspeech"
resp = riva_tts.Synthesize(req)
float32_data = np.frombuffer(resp.audio, dtype=np.float32)
print(float32_data.min(), float32_data.max())
float32_data = float32_data / 1.414
float32_data = float32_data * 32767
int16_data = float32_data.astype(np.int16).tobytes()
wav = add_header(int16_data, 16, 1, 22050)
return wav
| fsi-samples-main | nlp_demo_riva/tts_infer.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
# NLP proto
import riva_api.riva_nlp_pb2 as rnlp
import riva_api.riva_nlp_pb2_grpc as rnlp_srv
import grpc
channel = grpc.insecure_channel('riva:50051')
riva_nlp = rnlp_srv.RivaLanguageUnderstandingStub(channel)
def get_answer(paragraph_text, question_text):
total = len(paragraph_text)
stride = 1024
final_answer = ''
final_score = 0
for i in range(0, total, stride):
req = rnlp.NaturalQueryRequest()
req.query = question_text
req.context = paragraph_text[i:]
resp = riva_nlp.NaturalQuery(req)
if resp.results[0].score > final_score and resp.results[0].answer:
final_answer = resp.results[0].answer
final_score = resp.results[0].score
return final_answer
| fsi-samples-main | nlp_demo_riva/qa_infer.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
import time
import socket
def wait_for_port(port, host='localhost', timeout=5.0):
"""Wait until a port starts accepting TCP connections.
Args:
port (int): Port number.
host (str): Host address on which the port should exist.
timeout (float): In seconds. How long to wait before raising errors.
Raises:
TimeoutError: The port isn't accepting connection after time
specified in `timeout`.
"""
start_time = time.perf_counter()
while True:
try:
with socket.create_connection((host, port), timeout=timeout):
break
except OSError as ex:
time.sleep(0.01)
if time.perf_counter() - start_time >= timeout:
raise TimeoutError(
'Waited too long for the port {} on host {} to \
start accepting '
'connections.'.format(port, host)) from ex
| fsi-samples-main | nlp_demo_riva/wait_socket.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
def get_data(data):
total = 0
for i, num in enumerate(data):
total += num * 256**i
return total
def set_data(content, data):
length = len(content)
for i in range(length-1, -1, -1):
content[i] = data // (256**i)
data = data % (256**i)
return content
def examine_wav(content):
print('header type:', content[0:4])
print('file size: %d' % get_data(content[4:8]))
print('file type header: %s' % content[8:12])
print('file format chunk marker: %s' % content[12:16])
print('format data length: %d, has to be 16' % get_data(content[16:20]))
print('Type of format %d, 1 for pcm' % get_data(content[20:22]))
print('Number of channels %d' % get_data(content[22:24]))
print('Sample rate: %d' % get_data(content[24:28]))
print('Byte rate: %d' % get_data(content[28:32]))
print('Byte Per Sample * Channels : %d' % get_data(content[32:34]))
print('Bits Per Sample: %d' % get_data(content[34:36]))
print('data chunk header: %s' % content[36:40])
print('data chunk size: %d' % get_data(content[40:44]))
print(len(content), 'match', 44 + get_data(content[40:44]))
def add_header(newdata, bits_per_sample, channel, sr):
n = bytearray(
b'RIFF\xc4P\x05\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x01\x00\x80>\x00\x00\x00}\x00\x00\x02\x00\x10\x00data\xa0P\x05\x00' # noqa
)
n[22:24] = set_data(n[22:24], channel)
n[34:36] = set_data(n[34:36], bits_per_sample)
n[32:34] = set_data(n[32:34], bits_per_sample // 8 * channel)
n[24:28] = set_data(n[24:28], sr)
n[28:32] = set_data(n[28:32], sr * bits_per_sample * channel // 8)
n[40:44] = set_data(n[40:44], len(newdata))
n[4:8] = set_data(n[4:8], 44 + len(newdata) - 8)
return n + newdata
| fsi-samples-main | nlp_demo_riva/wave_utils.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
import grpc
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
import io
import wave
channel = grpc.insecure_channel("riva:50051")
client = rasr_srv.RivaSpeechRecognitionStub(channel)
def asr_text(data):
audio_data = data.read()
wf = wave.open(io.BytesIO(audio_data), 'rb')
rate = wf.getframerate()
config = rasr.RecognitionConfig(
encoding=ra.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=rate,
language_code="en-US",
max_alternatives=1,
enable_automatic_punctuation=True,
audio_channel_count=1,
)
request = rasr.RecognizeRequest(config=config, audio=audio_data)
response = client.Recognize(request)
print(response)
if len(response.results[0].alternatives) > 0:
asr_best_transcript = response.results[0].alternatives[0].transcript
else:
asr_best_transcript = ''
return asr_best_transcript
| fsi-samples-main | nlp_demo_riva/asr_infer.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from asr_infer import asr_text
from tts_infer import get_wave
from qa_infer import get_answer
class Model(object):
def __init__(self):
pass
def qa_infer(self, paragraph_text, question):
return get_answer(paragraph_text, question)
def asr_infer(self, wav_file):
return asr_text(wav_file)
def tacotron_infer(self, text):
return get_wave(text)
| fsi-samples-main | nlp_demo_riva/models_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import wave
import sys
import grpc
import time
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--audio-file", required=True, help="path to local file to stream")
return parser.parse_args()
def listen_print_loop(responses):
num_chars_printed = 0
idx = 0
for response in responses:
idx += 1
if not response.results:
continue
for result in response.results:
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
if result.is_final:
print(f"Final transcript: {transcript.encode('utf-8')}")
print(f"Confidence: {result.alternatives[0].confidence:9.4f}")
else:
print(f"Partial transcript: {transcript.encode('utf-8')}")
print(f"Stability: {result.stability:9.4f}")
print("----")
CHUNK = 1024
args = get_args()
wf = wave.open(args.audio_file, 'rb')
channel = grpc.insecure_channel(args.server)
client = rasr_srv.RivaSpeechRecognitionStub(channel)
config = rasr.RecognitionConfig(
encoding=ra.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=wf.getframerate(),
language_code="en-US",
max_alternatives=1,
enable_automatic_punctuation=True,
)
streaming_config = rasr.StreamingRecognitionConfig(config=config, interim_results=True)
# read data
def generator(w, s):
yield rasr.StreamingRecognizeRequest(streaming_config=s)
d = w.readframes(CHUNK)
while len(d) > 0:
yield rasr.StreamingRecognizeRequest(audio_content=d)
d = w.readframes(CHUNK)
responses = client.StreamingRecognize(generator(wf, streaming_config))
listen_print_loop(responses)
| fsi-samples-main | nlp_demo_riva/riva/examples/transcribe_file_verbose.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import wave
import sys
import grpc
import time
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--audio-file", required=True, help="path to local file to stream")
parser.add_argument(
"--show-intermediate", action="store_true", help="show intermediate transcripts as they are available"
)
return parser.parse_args()
def listen_print_loop(responses, show_intermediate=False):
num_chars_printed = 0
idx = 0
for response in responses:
idx += 1
if not response.results:
continue
partial_transcript = ""
for result in response.results:
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
if show_intermediate:
if not result.is_final:
partial_transcript += transcript
else:
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
print("## " + transcript + overwrite_chars + "\n")
num_chars_printed = 0
else:
if result.is_final:
sys.stdout.buffer.write(transcript.encode('utf-8'))
sys.stdout.flush()
print("\n")
if show_intermediate and partial_transcript != "":
overwrite_chars = ' ' * (num_chars_printed - len(partial_transcript))
sys.stdout.write(">> " + partial_transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(partial_transcript) + 3
CHUNK = 1024
args = get_args()
wf = wave.open(args.audio_file, 'rb')
channel = grpc.insecure_channel(args.server)
client = rasr_srv.RivaSpeechRecognitionStub(channel)
config = rasr.RecognitionConfig(
encoding=ra.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=wf.getframerate(),
language_code="en-US",
max_alternatives=1,
enable_automatic_punctuation=True,
)
streaming_config = rasr.StreamingRecognitionConfig(config=config, interim_results=True)
# read data
def generator(w, s):
yield rasr.StreamingRecognizeRequest(streaming_config=s)
d = w.readframes(CHUNK)
while len(d) > 0:
yield rasr.StreamingRecognizeRequest(audio_content=d)
d = w.readframes(CHUNK)
responses = client.StreamingRecognize(generator(wf, streaming_config))
listen_print_loop(responses, show_intermediate=args.show_intermediate)
| fsi-samples-main | nlp_demo_riva/riva/examples/transcribe_file.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import grpc
import queue
import argparse
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
import pyaudio
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--input-device", type=int, default=None, help="output device to use")
parser.add_argument("--list-devices", action="store_true", help="list output devices indices")
return parser.parse_args()
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk, device=None):
self._rate = rate
self._chunk = chunk
self._device = device
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
input_device_index=self._device,
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
def listen_print_loop(responses):
num_chars_printed = 0
for response in responses:
if not response.results:
continue
partial_transcript = ""
for result in response.results:
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
if not result.is_final:
partial_transcript += transcript
else:
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
print("## " + transcript + overwrite_chars + "\n")
num_chars_printed = 0
if partial_transcript != "":
overwrite_chars = ' ' * (num_chars_printed - len(partial_transcript))
sys.stdout.write(">> " + partial_transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(partial_transcript) + 3
def main():
args = get_args()
if args.list_devices:
p = pyaudio.PyAudio()
for i in range(p.get_device_count()):
info = p.get_device_info_by_index(i)
if info['maxInputChannels'] < 1:
continue
print(f"{info['index']}: {info['name']}")
sys.exit(0)
channel = grpc.insecure_channel(args.server)
client = rasr_srv.RivaSpeechRecognitionStub(channel)
config = rasr.RecognitionConfig(
encoding=ra.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=RATE,
language_code="en-US",
max_alternatives=1,
enable_automatic_punctuation=True,
)
streaming_config = rasr.StreamingRecognitionConfig(config=config, interim_results=True)
with MicrophoneStream(RATE, CHUNK, device=args.input_device) as stream:
audio_generator = stream.generator()
requests = (rasr.StreamingRecognizeRequest(audio_content=content) for content in audio_generator)
def build_generator(cfg, gen):
yield rasr.StreamingRecognizeRequest(streaming_config=cfg)
for x in gen:
yield x
responses = client.StreamingRecognize(build_generator(streaming_config, requests))
listen_print_loop(responses)
if __name__ == '__main__':
main()
| fsi-samples-main | nlp_demo_riva/riva/examples/transcribe_mic.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import grpc
import time
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
import wave
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--audio-file", required=True, help="path to local file to stream")
return parser.parse_args()
args = get_args()
wf = wave.open(args.audio_file, 'rb')
with open(args.audio_file, 'rb') as fh:
data = fh.read()
channel = grpc.insecure_channel(args.server)
client = rasr_srv.RivaSpeechRecognitionStub(channel)
config = rasr.RecognitionConfig(
encoding=ra.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=wf.getframerate(),
language_code="en-US",
max_alternatives=1,
enable_automatic_punctuation=False,
audio_channel_count=1,
)
request = rasr.RecognizeRequest(config=config, audio=data)
response = client.Recognize(request)
print(response)
| fsi-samples-main | nlp_demo_riva/riva/examples/transcribe_file_offline.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pyaudio
import argparse
import wave
import sys
import grpc
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--audio-file", required=True, help="path to local file to stream")
parser.add_argument("--output-device", type=int, default=None, help="output device to use")
parser.add_argument("--list-devices", action="store_true", help="list output devices indices")
return parser.parse_args()
def listen_print_loop(responses):
num_chars_printed = 0
for response in responses:
if not response.results:
continue
partial_transcript = ""
for result in response.results:
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
if not result.is_final:
partial_transcript += transcript
else:
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
print("## " + transcript + overwrite_chars + "\n")
num_chars_printed = 0
if partial_transcript != "":
overwrite_chars = ' ' * (num_chars_printed - len(partial_transcript))
sys.stdout.write(">> " + partial_transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(partial_transcript) + 3
CHUNK = 1024
args = get_args()
wf = wave.open(args.audio_file, 'rb')
channel = grpc.insecure_channel(args.server)
client = rasr_srv.RivaSpeechRecognitionStub(channel)
config = rasr.RecognitionConfig(
encoding=ra.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=wf.getframerate(),
language_code="en-US",
max_alternatives=1,
enable_automatic_punctuation=True,
)
streaming_config = rasr.StreamingRecognitionConfig(config=config, interim_results=True)
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
if args.list_devices:
for i in range(p.get_device_count()):
info = p.get_device_info_by_index(i)
if info['maxOutputChannels'] < 1:
continue
print(f"{info['index']}: {info['name']}")
sys.exit(0)
# open stream (2)
stream = p.open(
output_device_index=args.output_device,
format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
)
# read data
def generator(w, s):
d = w.readframes(CHUNK)
yield rasr.StreamingRecognizeRequest(streaming_config=s)
while len(d) > 0:
yield rasr.StreamingRecognizeRequest(audio_content=d)
stream.write(d)
d = w.readframes(CHUNK)
return
responses = client.StreamingRecognize(generator(wf, streaming_config))
listen_print_loop(responses)
# stop stream (4)
stream.stop_stream()
stream.close()
# close PyAudio (5)
p.terminate()
| fsi-samples-main | nlp_demo_riva/riva/examples/transcribe_file_rt.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import time
import grpc
import numpy as np
import argparse
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_tts_pb2 as rtts
import riva_api.riva_tts_pb2_grpc as rtts_srv
import wave
import pyaudio
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--voice", type=str, help="voice name to use", default="ljspeech")
parser.add_argument("-o", "--output", default=None, type=str, help="Output file to write last utterance")
return parser.parse_args()
def main():
args = get_args()
channel = grpc.insecure_channel(args.server)
tts_client = rtts_srv.RivaSpeechSynthesisStub(channel)
audio_handle = pyaudio.PyAudio()
print("Connecting...")
print("Example query:")
print(
" Hello, My name is Linda"
+ ", and I am demonstrating streaming speech synthesis with Riva {@EY2}.I. services, running on NVIDIA {@JH}{@IY1}_{@P}{@IY}_{@Y}{@UW0}s."
)
req = rtts.SynthesizeSpeechRequest()
req.text = "Hello"
req.language_code = "en-US"
req.encoding = ra.AudioEncoding.LINEAR_PCM
req.sample_rate_hz = 22050
req.voice_name = args.voice
stream = audio_handle.open(format=pyaudio.paFloat32, channels=1, rate=22050, output=True)
while True:
print("Speak: ", end='')
req.text = str(input())
if args.output:
wav = wave.open(args.output, 'wb')
wav.setnchannels(1)
wav.setsampwidth(2)
wav.setframerate(req.sample_rate_hz)
print("Generating audio for request...")
print(f" > '{req.text}': ", end='')
start = time.time()
responses = tts_client.SynthesizeOnline(req)
stop = time.time()
first = True
for resp in responses:
stop = time.time()
if first:
print(f"Time to first audio: {(stop-start):.3f}s")
first = False
stream.write(resp.audio)
if args.output:
dt = np.float32
f32_output = (np.frombuffer(resp.audio, dtype=np.float32) * 32767).astype(np.int16)
wav.writeframesraw(f32_output)
if args.output:
wav.close()
stream.stop_stream()
stream.close()
if __name__ == '__main__':
main()
| fsi-samples-main | nlp_demo_riva/riva/examples/talk_stream.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import time
import grpc
import numpy as np
import argparse
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_tts_pb2 as rtts
import riva_api.riva_tts_pb2_grpc as rtts_srv
import wave
import pyaudio
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--voice", type=str, help="voice name to use", default="ljspeech")
parser.add_argument("-o", "--output", default=None, type=str, help="Output file to write last utterance")
return parser.parse_args()
def main():
args = get_args()
channel = grpc.insecure_channel(args.server)
tts_client = rtts_srv.RivaSpeechSynthesisStub(channel)
audio_handle = pyaudio.PyAudio()
print("Example query:")
print(
" Hello, My name is Linda"
+ ", and I am demonstrating speech synthesis with Riva {@EY2}.I. services, running on NVIDIA {@JH}{@IY1}_{@P}{@IY}_{@Y}{@UW0}s."
)
req = rtts.SynthesizeSpeechRequest()
req.text = "Hello"
req.language_code = "en-US"
req.encoding = ra.AudioEncoding.LINEAR_PCM
req.sample_rate_hz = 22050
req.voice_name = args.voice
stream = audio_handle.open(format=pyaudio.paFloat32, channels=1, rate=22050, output=True)
while True:
print("Speak: ", end='')
req.text = str(input())
if args.output:
wav = wave.open(args.output, 'wb')
wav.setnchannels(1)
wav.setsampwidth(2)
wav.setframerate(req.sample_rate_hz)
print("Generating audio for request...")
print(f" > '{req.text}': ", end='')
start = time.time()
resp = tts_client.Synthesize(req)
stop = time.time()
print(f"Time to first audio: {(stop-start):.3f}s")
stream.write(resp.audio)
if args.output:
dt = np.float32
f32_output = (np.frombuffer(resp.audio, dtype=np.float32) * 32767).astype(np.int16)
wav.writeframesraw(f32_output)
wav.close()
stream.stop_stream()
stream.close()
if __name__ == '__main__':
main()
| fsi-samples-main | nlp_demo_riva/riva/examples/talk.py |
import wave
import sys
import grpc
import time
import argparse
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--num-clients", default=1, type=int, help="Number of client threads")
parser.add_argument("--num-iterations", default=1, type=int, help="Number of iterations over the file")
parser.add_argument(
"--input-file", required=True, type=str, help="Name of the WAV file with LINEAR_PCM encoding to transcribe"
)
parser.add_argument(
"--simulate-realtime", default=False, action='store_true', help="Option to simulate realtime transcription"
)
parser.add_argument(
"--word-time-offsets", default=False, action='store_true', help="Option to output word timestamps"
)
parser.add_argument(
"--max-alternatives",
default=1,
type=int,
help="Maximum number of alternative transcripts to return (up to limit configured on server)",
)
parser.add_argument(
"--automatic-punctuation",
default=False,
action='store_true',
help="Flag that controls if transcript should be automatically punctuated",
)
parser.add_argument("--riva-uri", default="localhost:50051", type=str, help="URI to access Riva server")
parser.add_argument(
"--no-verbatim-transcripts",
default=False,
action='store_true',
help="If specified, text inverse normalization will be applied",
)
return parser.parse_args()
def print_to_file(responses, output_file, max_alternatives, word_time_offsets):
start_time = time.time()
with open(output_file, "w") as f:
for response in responses:
if not response.results:
continue
partial_transcript = ""
for result in response.results:
if result.is_final:
for index, alternative in enumerate(result.alternatives):
f.write(
"Time %.2fs: Transcript %d: %s\n"
% (time.time() - start_time, index, alternative.transcript)
)
if word_time_offsets:
f.write("Timestamps:\n")
f.write("%-40s %-16s %-16s\n" % ("Word", "Start (ms)", "End (ms)"))
for word_info in result.alternatives[0].words:
f.write(
"%-40s %-16.0f %-16.0f\n" % (word_info.word, word_info.start_time, word_info.end_time)
)
else:
transcript = result.alternatives[0].transcript
partial_transcript += transcript
f.write(">>>Time %.2fs: %s\n" % (time.time() - start_time, partial_transcript))
def asr_client(
id,
output_file,
input_file,
num_iterations,
simulate_realtime,
riva_uri,
max_alternatives,
automatic_punctuation,
word_time_offsets,
verbatim_transcripts,
):
CHUNK = 1600
channel = grpc.insecure_channel(riva_uri)
wf = wave.open(input_file, 'rb')
frames = wf.getnframes()
rate = wf.getframerate()
duration = frames / float(rate)
if id == 0:
print("File duration: %.2fs" % duration)
client = rasr_srv.RivaSpeechRecognitionStub(channel)
config = rasr.RecognitionConfig(
encoding=ra.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=wf.getframerate(),
language_code="en-US",
max_alternatives=max_alternatives,
enable_automatic_punctuation=automatic_punctuation,
enable_word_time_offsets=word_time_offsets,
verbatim_transcripts=verbatim_transcripts,
)
streaming_config = rasr.StreamingRecognitionConfig(config=config, interim_results=True) # read data
def generator(w, s, num_iterations, output_file):
try:
for i in range(num_iterations):
w = wave.open(input_file, 'rb')
start_time = time.time()
yield rasr.StreamingRecognizeRequest(streaming_config=s)
num_requests = 0
while 1:
d = w.readframes(CHUNK)
if len(d) <= 0:
break
num_requests += 1
if simulate_realtime:
time_to_sleep = max(0.0, CHUNK / rate * num_requests - (time.time() - start_time))
time.sleep(time_to_sleep)
yield rasr.StreamingRecognizeRequest(audio_content=d)
w.close()
except Exception as e:
print(e)
responses = client.StreamingRecognize(generator(wf, streaming_config, num_iterations, output_file))
print_to_file(responses, output_file, max_alternatives, word_time_offsets)
from threading import Thread
parser = get_args()
print("Number of clients:", parser.num_clients)
print("Number of iteration:", parser.num_iterations)
print("Input file:", parser.input_file)
threads = []
output_filenames = []
for i in range(parser.num_clients):
output_filenames.append("output_%d.txt" % i)
t = Thread(
target=asr_client,
args=(
i,
output_filenames[-1],
parser.input_file,
parser.num_iterations,
parser.simulate_realtime,
parser.riva_uri,
parser.max_alternatives,
parser.automatic_punctuation,
parser.word_time_offsets,
not parser.no_verbatim_transcripts,
),
)
t.start()
threads.append(t)
for i, t in enumerate(threads):
t.join()
print(str(parser.num_clients), "threads done, output written to output_<thread_id>.txt")
| fsi-samples-main | nlp_demo_riva/riva/examples/riva_streaming_asr_client.py |
#!/usr/bin/python
import sys
import html5lib
htmlfilename = sys.argv[1]
htmlfile = open(htmlfilename)
try:
doc = html5lib.parse(htmlfile, treebuilder="dom")
finally:
htmlfile.close()
def elementHasClass(el, classArg):
"""
Return true if and only if classArg is one of the classes of el
"""
classes = [ c for c in el.getAttribute("class").split(" ") if c is not "" ]
return classArg in classes
def elementTextContent(el):
"""
Implementation of DOM Core's .textContent
"""
textContent = ""
for child in el.childNodes:
if child.nodeType == 3: # Node.TEXT_NODE
textContent += child.data
elif child.nodeType == 1: # Node.ELEMENT_NODE
textContent += elementTextContent(child)
else:
# Other nodes are ignored
pass
return textContent
preList = doc.getElementsByTagName("pre")
idlList = [elementTextContent(p) for p in preList if elementHasClass(p, "idl") ]
print "\n\n".join(idlList)
| WebGL-master | specs/latest/1.0/extract-idl.py |
#!/usr/bin/python
import sys
import html5lib
htmlfilename = sys.argv[1]
htmlfile = open(htmlfilename)
try:
doc = html5lib.parse(htmlfile, treebuilder="dom")
finally:
htmlfile.close()
def elementHasClass(el, classArg):
"""
Return true if and only if classArg is one of the classes of el
"""
classes = [ c for c in el.getAttribute("class").split(" ") if c is not "" ]
return classArg in classes
def elementTextContent(el):
"""
Implementation of DOM Core's .textContent
"""
textContent = ""
for child in el.childNodes:
if child.nodeType == 3: # Node.TEXT_NODE
textContent += child.data
elif child.nodeType == 1: # Node.ELEMENT_NODE
textContent += elementTextContent(child)
else:
# Other nodes are ignored
pass
return textContent
preList = doc.getElementsByTagName("pre")
idlList = [elementTextContent(p) for p in preList if elementHasClass(p, "idl") ]
print "\n\n".join(idlList)
| WebGL-master | specs/latest/2.0/extract-idl.py |
#!/usr/bin/python
import sys
import html5lib
htmlfilename = sys.argv[1]
htmlfile = open(htmlfilename)
try:
doc = html5lib.parse(htmlfile, treebuilder="dom")
finally:
htmlfile.close()
def elementHasClass(el, classArg):
"""
Return true if and only if classArg is one of the classes of el
"""
classes = [ c for c in el.getAttribute("class").split(" ") if c is not "" ]
return classArg in classes
def elementTextContent(el):
"""
Implementation of DOM Core's .textContent
"""
textContent = ""
for child in el.childNodes:
if child.nodeType == 3: # Node.TEXT_NODE
textContent += child.data
elif child.nodeType == 1: # Node.ELEMENT_NODE
textContent += elementTextContent(child)
else:
# Other nodes are ignored
pass
return textContent
preList = doc.getElementsByTagName("pre")
idlList = [elementTextContent(p) for p in preList if elementHasClass(p, "idl") ]
print "\n\n".join(idlList)
| WebGL-master | specs/1.0.3/extract-idl.py |
#!/usr/bin/python
import sys
import html5lib
htmlfilename = sys.argv[1]
htmlfile = open(htmlfilename)
try:
doc = html5lib.parse(htmlfile, treebuilder="dom")
finally:
htmlfile.close()
def elementHasClass(el, classArg):
"""
Return true if and only if classArg is one of the classes of el
"""
classes = [ c for c in el.getAttribute("class").split(" ") if c is not "" ]
return classArg in classes
def elementTextContent(el):
"""
Implementation of DOM Core's .textContent
"""
textContent = ""
for child in el.childNodes:
if child.nodeType == 3: # Node.TEXT_NODE
textContent += child.data
elif child.nodeType == 1: # Node.ELEMENT_NODE
textContent += elementTextContent(child)
else:
# Other nodes are ignored
pass
return textContent
preList = doc.getElementsByTagName("pre")
idlList = [elementTextContent(p) for p in preList if elementHasClass(p, "idl") ]
print "\n\n".join(idlList)
| WebGL-master | specs/1.0.2/extract-idl.py |
#!/usr/bin/python
"""generates tests from OpenGL ES 2.0 .run/.test files."""
import os
import os.path
import sys
import re
import json
import shutil
from optparse import OptionParser
from xml.dom.minidom import parse
if sys.version < '2.6':
print 'Wrong Python Version !!!: Need >= 2.6'
sys.exit(1)
# each shader test generates up to 3 512x512 images.
# a 512x512 image takes 1meg of memory so set this
# number apporpriate for the platform with
# the smallest memory issue. At 8 that means
# at least 24 meg is needed to run the test.
MAX_TESTS_PER_SET = 8
VERBOSE = False
FILTERS = [
re.compile("GL/"),
]
LICENSE = """
/*
** Copyright (c) 2012 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
"""
COMMENT_RE = re.compile("/\*\n\*\*\s+Copyright.*?\*/",
re.IGNORECASE | re.DOTALL)
REMOVE_COPYRIGHT_RE = re.compile("\/\/\s+Copyright.*?\n",
re.IGNORECASE | re.DOTALL)
MATRIX_RE = re.compile("Matrix(\\d)")
VALID_UNIFORM_TYPES = [
"uniform1f",
"uniform1fv",
"uniform1fv",
"uniform1i",
"uniform1iv",
"uniform1iv",
"uniform2f",
"uniform2fv",
"uniform2fv",
"uniform2i",
"uniform2iv",
"uniform2iv",
"uniform3f",
"uniform3fv",
"uniform3fv",
"uniform3i",
"uniform3iv",
"uniform3iv",
"uniform4f",
"uniform4fv",
"uniform4fv",
"uniform4i",
"uniform4iv",
"uniform4ivy",
"uniformMatrix2fv",
"uniformMatrix2fv",
"uniformMatrix3fv",
"uniformMatrix3fv",
"uniformMatrix4fv",
"uniformMatrix4fv",
]
SUBSTITUTIONS = [
("uniformmat3fv", "uniformMatrix3fv"),
("uniformmat4fv", "uniformMatrix4fv"),
]
def Log(msg):
global VERBOSE
if VERBOSE:
print msg
def TransposeMatrix(values, dim):
size = dim * dim
count = len(values) / size
for m in range(0, count):
offset = m * size
for i in range(0, dim):
for j in range(i + 1, dim):
t = values[offset + i * dim + j]
values[offset + i * dim + j] = values[offset + j * dim + i]
values[offset + j * dim + i] = t
def GetValidTypeName(type_name):
global VALID_UNIFORM_TYPES
global SUBSTITUTIONS
for subst in SUBSTITUTIONS:
type_name = type_name.replace(subst[0], subst[1])
if not type_name in VALID_UNIFORM_TYPES:
print "unknown type name: ", type_name
raise SyntaxError
return type_name
def WriteOpen(filename):
dirname = os.path.dirname(filename)
if len(dirname) > 0 and not os.path.exists(dirname):
os.makedirs(dirname)
return open(filename, "wb")
class TxtWriter():
def __init__(self, filename):
self.filename = filename
self.lines = []
def Write(self, line):
self.lines.append(line)
def Close(self):
if len(self.lines) > 0:
Log("Writing: %s" % self.filename)
f = WriteOpen(self.filename)
f.write("# this file is auto-generated. DO NOT EDIT.\n")
f.write("".join(self.lines))
f.close()
def ReadFileAsLines(filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
return [line.strip() for line in lines]
def ReadFile(filename):
f = open(filename, "r")
content = f.read()
f.close()
return content.replace("\r\n", "\n")
def Chunkify(list, chunk_size):
"""divides an array into chunks of chunk_size"""
return [list[i:i + chunk_size] for i in range(0, len(list), chunk_size)]
def GetText(nodelist):
"""Gets the text of from a list of nodes"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def GetElementText(node, name):
"""Gets the text of an element"""
elements = node.getElementsByTagName(name)
if len(elements) > 0:
return GetText(elements[0].childNodes)
else:
return None
def GetBoolElement(node, name):
text = GetElementText(node, name)
return text.lower() == "true"
def GetModel(node):
"""Gets the model"""
model = GetElementText(node, "model")
if model and len(model.strip()) == 0:
elements = node.getElementsByTagName("model")
if len(elements) > 0:
model = GetElementText(elements[0], "filename")
return model
def RelativizePaths(base, paths, template):
"""converts paths to relative paths"""
rels = []
for p in paths:
#print "---"
#print "base: ", os.path.abspath(base)
#print "path: ", os.path.abspath(p)
relpath = os.path.relpath(os.path.abspath(p), os.path.dirname(os.path.abspath(base))).replace("\\", "/")
#print "rel : ", relpath
rels.append(template % relpath)
return "\n".join(rels)
def CopyFile(filename, src, dst):
s = os.path.abspath(os.path.join(os.path.dirname(src), filename))
d = os.path.abspath(os.path.join(os.path.dirname(dst), filename))
dst_dir = os.path.dirname(d)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
shutil.copyfile(s, d)
def CopyShader(filename, src, dst):
s = os.path.abspath(os.path.join(os.path.dirname(src), filename))
d = os.path.abspath(os.path.join(os.path.dirname(dst), filename))
text = ReadFile(s)
# By agreement with the Khronos OpenGL working group we are allowed
# to open source only the .vert and .frag files from the OpenGL ES 2.0
# conformance tests. All other files from the OpenGL ES 2.0 conformance
# tests are not included.
marker = "insert-copyright-here"
new_text = COMMENT_RE.sub(marker, text)
if new_text == text:
print "no matching license found:", s
raise RuntimeError
new_text = REMOVE_COPYRIGHT_RE.sub("", new_text)
new_text = new_text.replace(marker, LICENSE)
f = WriteOpen(d)
f.write(new_text)
f.close()
def IsOneOf(string, regexs):
for regex in regexs:
if re.match(regex, string):
return True
return False
def CheckForUnknownTags(valid_tags, node, depth=1):
"""do a hacky check to make sure we're not missing something."""
for child in node.childNodes:
if child.localName and not IsOneOf(child.localName, valid_tags[0]):
print "unsupported tag:", child.localName
print "depth:", depth
raise SyntaxError
else:
if len(valid_tags) > 1:
CheckForUnknownTags(valid_tags[1:], child, depth + 1)
def IsFileWeWant(filename):
for f in FILTERS:
if f.search(filename):
return True
return False
class TestReader():
"""class to read and parse tests"""
def __init__(self, basepath):
self.tests = []
self.modes = {}
self.patterns = {}
self.basepath = basepath
def Print(self, msg):
if self.verbose:
print msg
def MakeOutPath(self, filename):
relpath = os.path.relpath(os.path.abspath(filename), os.path.dirname(os.path.abspath(self.basepath)))
return relpath
def ReadTests(self, filename):
"""reads a .run file and parses."""
Log("reading %s" % filename)
outname = self.MakeOutPath(filename + ".txt")
f = TxtWriter(outname)
dirname = os.path.dirname(filename)
lines = ReadFileAsLines(filename)
count = 0
tests_data = []
for line in lines:
if len(line) > 0 and not line.startswith("#"):
fname = os.path.join(dirname, line)
if line.endswith(".run"):
if self.ReadTests(fname):
f.Write(line + ".txt\n")
count += 1
elif line.endswith(".test"):
tests_data.extend(self.ReadTest(fname))
else:
print "Error in %s:%d:%s" % (filename, count, line)
raise SyntaxError()
if len(tests_data):
global MAX_TESTS_PER_SET
sets = Chunkify(tests_data, MAX_TESTS_PER_SET)
id = 1
for set in sets:
suffix = "_%03d_to_%03d" % (id, id + len(set) - 1)
test_outname = self.MakeOutPath(filename + suffix + ".html")
if os.path.basename(test_outname).startswith("input.run"):
dname = os.path.dirname(test_outname)
folder_name = os.path.basename(dname)
test_outname = os.path.join(dname, folder_name + suffix + ".html")
self.WriteTests(filename, test_outname, {"tests":set})
f.Write(os.path.basename(test_outname) + "\n")
id += len(set)
count += 1
f.Close()
return count
def ReadTest(self, filename):
"""reads a .test file and parses."""
Log("reading %s" % filename)
dom = parse(filename)
tests = dom.getElementsByTagName("test")
tests_data = []
outname = self.MakeOutPath(filename + ".html")
for test in tests:
if not IsFileWeWant(filename):
self.CopyShaders(test, filename, outname)
else:
test_data = self.ProcessTest(test, filename, outname, len(tests_data))
if test_data:
tests_data.append(test_data)
return tests_data
def ProcessTest(self, test, filename, outname, id):
"""Process a test"""
mode = test.getAttribute("mode")
pattern = test.getAttribute("pattern")
self.modes[mode] = 1
self.patterns[pattern] = 1
Log ("%d: mode: %s pattern: %s" % (id, mode, pattern))
method = getattr(self, 'Process_' + pattern)
test_data = method(test, filename, outname)
if test_data:
test_data["pattern"] = pattern
return test_data
def WriteTests(self, filename, outname, tests_data):
Log("Writing %s" % outname)
template = """<!DOCTYPE html>
<!-- this file is auto-generated. DO NOT EDIT.
%(license)s
-->
<html>
<head>
<meta charset="utf-8">
<title>WebGL GLSL conformance test: %(title)s</title>
%(css)s
%(scripts)s
</head>
<body>
<canvas id="example" width="500" height="500" style="width: 16px; height: 16px;"></canvas>
<div id="description"></div>
<div id="console"></div>
</body>
<script>
"use strict";
OpenGLESTestRunner.run(%(tests_data)s);
var successfullyParsed = true;
</script>
</html>
"""
css = [
"../../resources/js-test-style.css",
"../resources/ogles-tests.css",
]
scripts = [
"../../resources/js-test-pre.js",
"../resources/webgl-test-utils.js",
"ogles-utils.js",
]
css_html = RelativizePaths(outname, css, '<link rel="stylesheet" href="%s" />')
scripts_html = RelativizePaths(outname, scripts, '<script src="%s"></script>')
f = WriteOpen(outname)
f.write(template % {
"license": LICENSE,
"css": css_html,
"scripts": scripts_html,
"title": os.path.basename(outname),
"tests_data": json.dumps(tests_data, indent=2)
})
f.close()
def CopyShaders(self, test, filename, outname):
"""For tests we don't actually support yet, at least copy the shaders"""
shaders = test.getElementsByTagName("shader")
for shader in shaders:
for name in ["vertshader", "fragshader"]:
s = GetElementText(shader, name)
if s and s != "empty":
CopyShader(s, filename, outname)
#
# pattern handlers.
#
def Process_compare(self, test, filename, outname):
global MATRIX_RE
valid_tags = [
["shader", "model", "glstate"],
["uniform", "vertshader", "fragshader", "filename", "depthrange"],
["name", "count", "transpose", "uniform*", "near", "far"],
]
CheckForUnknownTags(valid_tags, test)
# parse the test
shaders = test.getElementsByTagName("shader")
shaderInfos = []
for shader in shaders:
v = GetElementText(shader, "vertshader")
f = GetElementText(shader, "fragshader")
CopyShader(v, filename, outname)
CopyShader(f, filename, outname)
info = {
"vertexShader": v,
"fragmentShader": f,
}
shaderInfos.append(info)
uniformElems = shader.getElementsByTagName("uniform")
if len(uniformElems) > 0:
uniforms = {}
info["uniforms"] = uniforms
for uniformElem in uniformElems:
uniform = {"count": 1}
for child in uniformElem.childNodes:
if child.localName == None:
pass
elif child.localName == "name":
uniforms[GetText(child.childNodes)] = uniform
elif child.localName == "count":
uniform["count"] = int(GetText(child.childNodes))
elif child.localName == "transpose":
uniform["transpose"] = (GetText(child.childNodes) == "true")
else:
if "type" in uniform:
print "utype was:", uniform["type"], " found ", child.localName
raise SyntaxError
type_name = GetValidTypeName(child.localName)
uniform["type"] = type_name
valueText = GetText(child.childNodes).replace(",", " ")
uniform["value"] = [float(t) for t in valueText.split()]
m = MATRIX_RE.search(type_name)
if m:
# Why are these backward from the API?!?!?
TransposeMatrix(uniform["value"], int(m.group(1)))
data = {
"name": os.path.basename(outname),
"model": GetModel(test),
"referenceProgram": shaderInfos[1],
"testProgram": shaderInfos[0],
}
gl_states = test.getElementsByTagName("glstate")
if len(gl_states) > 0:
state = {}
data["state"] = state
for gl_state in gl_states:
for state_name in gl_state.childNodes:
if state_name.localName:
values = {}
for field in state_name.childNodes:
if field.localName:
values[field.localName] = GetText(field.childNodes)
state[state_name.localName] = values
return data
def Process_shaderload(self, test, filename, outname):
"""no need for shaderload tests"""
self.CopyShaders(test, filename, outname)
def Process_extension(self, test, filename, outname):
"""no need for extension tests"""
self.CopyShaders(test, filename, outname)
def Process_createtests(self, test, filename, outname):
Log("createtests Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_GL2Test(self, test, filename, outname):
Log("GL2Test Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_uniformquery(self, test, filename, outname):
Log("uniformquery Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_egl_image_external(self, test, filename, outname):
"""no need for egl_image_external tests"""
self.CopyShaders(test, filename, outname)
def Process_dismount(self, test, filename, outname):
Log("dismount Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_build(self, test, filename, outname):
"""don't need build tests"""
valid_tags = [
["shader", "compstat", "linkstat"],
["vertshader", "fragshader"],
]
CheckForUnknownTags(valid_tags, test)
shader = test.getElementsByTagName("shader")
if not shader:
return None
vs = GetElementText(shader[0], "vertshader")
fs = GetElementText(shader[0], "fragshader")
if vs and vs != "empty":
CopyShader(vs, filename, outname)
if fs and fs != "empty":
CopyShader(fs, filename, outname)
data = {
"name": os.path.basename(outname),
"compstat": bool(GetBoolElement(test, "compstat")),
"linkstat": bool(GetBoolElement(test, "linkstat")),
"testProgram": {
"vertexShader": vs,
"fragmentShader": fs,
},
}
attach = test.getElementsByTagName("attach")
if len(attach) > 0:
data["attachError"] = GetElementText(attach[0], "attacherror")
return data
def Process_coverage(self, test, filename, outname):
Log("coverage Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_attributes(self, test, filename, outname):
Log("attributes Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_fixed(self, test, filename, outname):
"""no need for fixed function tests"""
self.CopyShaders(test, filename, outname)
def main(argv):
"""This is the main function."""
global VERBOSE
parser = OptionParser()
parser.add_option(
"-v", "--verbose", action="store_true",
help="prints more output.")
(options, args) = parser.parse_args(args=argv)
if len(args) < 1:
pass # fix me
os.chdir(os.path.dirname(__file__) or '.')
VERBOSE = options.verbose
filename = args[0]
test_reader = TestReader(filename)
test_reader.ReadTests(filename)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| WebGL-master | conformance-suites/1.0.3/conformance/ogles/process-ogles2-tests.py |
#!/usr/bin/python
"""generates tests from OpenGL ES 2.0 .run/.test files."""
import os
import os.path
import sys
import re
import json
import shutil
from optparse import OptionParser
from xml.dom.minidom import parse
if sys.version < '2.6':
print 'Wrong Python Version !!!: Need >= 2.6'
sys.exit(1)
# each shader test generates up to 3 512x512 images.
# a 512x512 image takes 1meg of memory so set this
# number apporpriate for the platform with
# the smallest memory issue. At 8 that means
# at least 24 meg is needed to run the test.
MAX_TESTS_PER_SET = 8
VERBOSE = False
FILTERS = [
re.compile("GL/"),
]
LICENSE = """
/*
** Copyright (c) 2012 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
"""
COMMENT_RE = re.compile("/\*\n\*\*\s+Copyright.*?\*/",
re.IGNORECASE | re.DOTALL)
REMOVE_COPYRIGHT_RE = re.compile("\/\/\s+Copyright.*?\n",
re.IGNORECASE | re.DOTALL)
MATRIX_RE = re.compile("Matrix(\\d)")
VALID_UNIFORM_TYPES = [
"uniform1f",
"uniform1fv",
"uniform1fv",
"uniform1i",
"uniform1iv",
"uniform1iv",
"uniform2f",
"uniform2fv",
"uniform2fv",
"uniform2i",
"uniform2iv",
"uniform2iv",
"uniform3f",
"uniform3fv",
"uniform3fv",
"uniform3i",
"uniform3iv",
"uniform3iv",
"uniform4f",
"uniform4fv",
"uniform4fv",
"uniform4i",
"uniform4iv",
"uniform4ivy",
"uniformMatrix2fv",
"uniformMatrix2fv",
"uniformMatrix3fv",
"uniformMatrix3fv",
"uniformMatrix4fv",
"uniformMatrix4fv",
]
SUBSTITUTIONS = [
("uniformmat3fv", "uniformMatrix3fv"),
("uniformmat4fv", "uniformMatrix4fv"),
]
def Log(msg):
global VERBOSE
if VERBOSE:
print msg
def TransposeMatrix(values, dim):
size = dim * dim
count = len(values) / size
for m in range(0, count):
offset = m * size
for i in range(0, dim):
for j in range(i + 1, dim):
t = values[offset + i * dim + j]
values[offset + i * dim + j] = values[offset + j * dim + i]
values[offset + j * dim + i] = t
def GetValidTypeName(type_name):
global VALID_UNIFORM_TYPES
global SUBSTITUTIONS
for subst in SUBSTITUTIONS:
type_name = type_name.replace(subst[0], subst[1])
if not type_name in VALID_UNIFORM_TYPES:
print "unknown type name: ", type_name
raise SyntaxError
return type_name
def WriteOpen(filename):
dirname = os.path.dirname(filename)
if len(dirname) > 0 and not os.path.exists(dirname):
os.makedirs(dirname)
return open(filename, "wb")
class TxtWriter():
def __init__(self, filename):
self.filename = filename
self.lines = []
def Write(self, line):
self.lines.append(line)
def Close(self):
if len(self.lines) > 0:
Log("Writing: %s" % self.filename)
f = WriteOpen(self.filename)
f.write("# this file is auto-generated. DO NOT EDIT.\n")
f.write("".join(self.lines))
f.close()
def ReadFileAsLines(filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
return [line.strip() for line in lines]
def ReadFile(filename):
f = open(filename, "r")
content = f.read()
f.close()
return content.replace("\r\n", "\n")
def Chunkify(list, chunk_size):
"""divides an array into chunks of chunk_size"""
return [list[i:i + chunk_size] for i in range(0, len(list), chunk_size)]
def GetText(nodelist):
"""Gets the text of from a list of nodes"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def GetElementText(node, name):
"""Gets the text of an element"""
elements = node.getElementsByTagName(name)
if len(elements) > 0:
return GetText(elements[0].childNodes)
else:
return None
def GetBoolElement(node, name):
text = GetElementText(node, name)
return text.lower() == "true"
def GetModel(node):
"""Gets the model"""
model = GetElementText(node, "model")
if model and len(model.strip()) == 0:
elements = node.getElementsByTagName("model")
if len(elements) > 0:
model = GetElementText(elements[0], "filename")
return model
def RelativizePaths(base, paths, template):
"""converts paths to relative paths"""
rels = []
for p in paths:
#print "---"
#print "base: ", os.path.abspath(base)
#print "path: ", os.path.abspath(p)
relpath = os.path.relpath(os.path.abspath(p), os.path.dirname(os.path.abspath(base))).replace("\\", "/")
#print "rel : ", relpath
rels.append(template % relpath)
return "\n".join(rels)
def CopyFile(filename, src, dst):
s = os.path.abspath(os.path.join(os.path.dirname(src), filename))
d = os.path.abspath(os.path.join(os.path.dirname(dst), filename))
dst_dir = os.path.dirname(d)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
shutil.copyfile(s, d)
def CopyShader(filename, src, dst):
s = os.path.abspath(os.path.join(os.path.dirname(src), filename))
d = os.path.abspath(os.path.join(os.path.dirname(dst), filename))
text = ReadFile(s)
# By agreement with the Khronos OpenGL working group we are allowed
# to open source only the .vert and .frag files from the OpenGL ES 2.0
# conformance tests. All other files from the OpenGL ES 2.0 conformance
# tests are not included.
marker = "insert-copyright-here"
new_text = COMMENT_RE.sub(marker, text)
if new_text == text:
print "no matching license found:", s
raise RuntimeError
new_text = REMOVE_COPYRIGHT_RE.sub("", new_text)
new_text = new_text.replace(marker, LICENSE)
f = WriteOpen(d)
f.write(new_text)
f.close()
def IsOneOf(string, regexs):
for regex in regexs:
if re.match(regex, string):
return True
return False
def CheckForUnknownTags(valid_tags, node, depth=1):
"""do a hacky check to make sure we're not missing something."""
for child in node.childNodes:
if child.localName and not IsOneOf(child.localName, valid_tags[0]):
print "unsupported tag:", child.localName
print "depth:", depth
raise SyntaxError
else:
if len(valid_tags) > 1:
CheckForUnknownTags(valid_tags[1:], child, depth + 1)
def IsFileWeWant(filename):
for f in FILTERS:
if f.search(filename):
return True
return False
class TestReader():
"""class to read and parse tests"""
def __init__(self, basepath):
self.tests = []
self.modes = {}
self.patterns = {}
self.basepath = basepath
def Print(self, msg):
if self.verbose:
print msg
def MakeOutPath(self, filename):
relpath = os.path.relpath(os.path.abspath(filename), os.path.dirname(os.path.abspath(self.basepath)))
return relpath
def ReadTests(self, filename):
"""reads a .run file and parses."""
Log("reading %s" % filename)
outname = self.MakeOutPath(filename + ".txt")
f = TxtWriter(outname)
dirname = os.path.dirname(filename)
lines = ReadFileAsLines(filename)
count = 0
tests_data = []
for line in lines:
if len(line) > 0 and not line.startswith("#"):
fname = os.path.join(dirname, line)
if line.endswith(".run"):
if self.ReadTests(fname):
f.Write(line + ".txt\n")
count += 1
elif line.endswith(".test"):
tests_data.extend(self.ReadTest(fname))
else:
print "Error in %s:%d:%s" % (filename, count, line)
raise SyntaxError()
if len(tests_data):
global MAX_TESTS_PER_SET
sets = Chunkify(tests_data, MAX_TESTS_PER_SET)
id = 1
for set in sets:
suffix = "_%03d_to_%03d" % (id, id + len(set) - 1)
test_outname = self.MakeOutPath(filename + suffix + ".html")
if os.path.basename(test_outname).startswith("input.run"):
dname = os.path.dirname(test_outname)
folder_name = os.path.basename(dname)
test_outname = os.path.join(dname, folder_name + suffix + ".html")
self.WriteTests(filename, test_outname, {"tests":set})
f.Write(os.path.basename(test_outname) + "\n")
id += len(set)
count += 1
f.Close()
return count
def ReadTest(self, filename):
"""reads a .test file and parses."""
Log("reading %s" % filename)
dom = parse(filename)
tests = dom.getElementsByTagName("test")
tests_data = []
outname = self.MakeOutPath(filename + ".html")
for test in tests:
if not IsFileWeWant(filename):
self.CopyShaders(test, filename, outname)
else:
test_data = self.ProcessTest(test, filename, outname, len(tests_data))
if test_data:
tests_data.append(test_data)
return tests_data
def ProcessTest(self, test, filename, outname, id):
"""Process a test"""
mode = test.getAttribute("mode")
pattern = test.getAttribute("pattern")
self.modes[mode] = 1
self.patterns[pattern] = 1
Log ("%d: mode: %s pattern: %s" % (id, mode, pattern))
method = getattr(self, 'Process_' + pattern)
test_data = method(test, filename, outname)
if test_data:
test_data["pattern"] = pattern
return test_data
def WriteTests(self, filename, outname, tests_data):
Log("Writing %s" % outname)
template = """<!DOCTYPE html>
<!-- this file is auto-generated. DO NOT EDIT.
%(license)s
-->
<html>
<head>
<meta charset="utf-8">
<title>WebGL GLSL conformance test: %(title)s</title>
%(css)s
%(scripts)s
</head>
<body>
<canvas id="example" width="500" height="500" style="width: 16px; height: 16px;"></canvas>
<div id="description"></div>
<div id="console"></div>
</body>
<script>
"use strict";
OpenGLESTestRunner.run(%(tests_data)s);
var successfullyParsed = true;
</script>
</html>
"""
css = [
"../../resources/js-test-style.css",
"../resources/ogles-tests.css",
]
scripts = [
"../../resources/js-test-pre.js",
"../resources/webgl-test.js",
"../resources/webgl-test-utils.js",
"ogles-utils.js",
]
css_html = RelativizePaths(outname, css, '<link rel="stylesheet" href="%s" />')
scripts_html = RelativizePaths(outname, scripts, '<script src="%s"></script>')
f = WriteOpen(outname)
f.write(template % {
"license": LICENSE,
"css": css_html,
"scripts": scripts_html,
"title": os.path.basename(outname),
"tests_data": json.dumps(tests_data, indent=2)
})
f.close()
def CopyShaders(self, test, filename, outname):
"""For tests we don't actually support yet, at least copy the shaders"""
shaders = test.getElementsByTagName("shader")
for shader in shaders:
for name in ["vertshader", "fragshader"]:
s = GetElementText(shader, name)
if s and s != "empty":
CopyShader(s, filename, outname)
#
# pattern handlers.
#
def Process_compare(self, test, filename, outname):
global MATRIX_RE
valid_tags = [
["shader", "model", "glstate"],
["uniform", "vertshader", "fragshader", "filename", "depthrange"],
["name", "count", "transpose", "uniform*", "near", "far"],
]
CheckForUnknownTags(valid_tags, test)
# parse the test
shaders = test.getElementsByTagName("shader")
shaderInfos = []
for shader in shaders:
v = GetElementText(shader, "vertshader")
f = GetElementText(shader, "fragshader")
CopyShader(v, filename, outname)
CopyShader(f, filename, outname)
info = {
"vertexShader": v,
"fragmentShader": f,
}
shaderInfos.append(info)
uniformElems = shader.getElementsByTagName("uniform")
if len(uniformElems) > 0:
uniforms = {}
info["uniforms"] = uniforms
for uniformElem in uniformElems:
uniform = {"count": 1}
for child in uniformElem.childNodes:
if child.localName == None:
pass
elif child.localName == "name":
uniforms[GetText(child.childNodes)] = uniform
elif child.localName == "count":
uniform["count"] = int(GetText(child.childNodes))
elif child.localName == "transpose":
uniform["transpose"] = (GetText(child.childNodes) == "true")
else:
if "type" in uniform:
print "utype was:", uniform["type"], " found ", child.localName
raise SyntaxError
type_name = GetValidTypeName(child.localName)
uniform["type"] = type_name
valueText = GetText(child.childNodes).replace(",", " ")
uniform["value"] = [float(t) for t in valueText.split()]
m = MATRIX_RE.search(type_name)
if m:
# Why are these backward from the API?!?!?
TransposeMatrix(uniform["value"], int(m.group(1)))
data = {
"name": os.path.basename(outname),
"model": GetModel(test),
"referenceProgram": shaderInfos[1],
"testProgram": shaderInfos[0],
}
gl_states = test.getElementsByTagName("glstate")
if len(gl_states) > 0:
state = {}
data["state"] = state
for gl_state in gl_states:
for state_name in gl_state.childNodes:
if state_name.localName:
values = {}
for field in state_name.childNodes:
if field.localName:
values[field.localName] = GetText(field.childNodes)
state[state_name.localName] = values
return data
def Process_shaderload(self, test, filename, outname):
"""no need for shaderload tests"""
self.CopyShaders(test, filename, outname)
def Process_extension(self, test, filename, outname):
"""no need for extension tests"""
self.CopyShaders(test, filename, outname)
def Process_createtests(self, test, filename, outname):
Log("createtests Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_GL2Test(self, test, filename, outname):
Log("GL2Test Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_uniformquery(self, test, filename, outname):
Log("uniformquery Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_egl_image_external(self, test, filename, outname):
"""no need for egl_image_external tests"""
self.CopyShaders(test, filename, outname)
def Process_dismount(self, test, filename, outname):
Log("dismount Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_build(self, test, filename, outname):
"""don't need build tests"""
valid_tags = [
["shader", "compstat", "linkstat"],
["vertshader", "fragshader"],
]
CheckForUnknownTags(valid_tags, test)
shader = test.getElementsByTagName("shader")
if not shader:
return None
vs = GetElementText(shader[0], "vertshader")
fs = GetElementText(shader[0], "fragshader")
if vs and vs != "empty":
CopyShader(vs, filename, outname)
if fs and fs != "empty":
CopyShader(fs, filename, outname)
data = {
"name": os.path.basename(outname),
"compstat": bool(GetBoolElement(test, "compstat")),
"linkstat": bool(GetBoolElement(test, "linkstat")),
"testProgram": {
"vertexShader": vs,
"fragmentShader": fs,
},
}
attach = test.getElementsByTagName("attach")
if len(attach) > 0:
data["attachError"] = GetElementText(attach[0], "attacherror")
return data
def Process_coverage(self, test, filename, outname):
Log("coverage Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_attributes(self, test, filename, outname):
Log("attributes Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_fixed(self, test, filename, outname):
"""no need for fixed function tests"""
self.CopyShaders(test, filename, outname)
def main(argv):
"""This is the main function."""
global VERBOSE
parser = OptionParser()
parser.add_option(
"-v", "--verbose", action="store_true",
help="prints more output.")
(options, args) = parser.parse_args(args=argv)
if len(args) < 1:
pass # fix me
os.chdir(os.path.dirname(__file__) or '.')
VERBOSE = options.verbose
filename = args[0]
test_reader = TestReader(filename)
test_reader.ReadTests(filename)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| WebGL-master | conformance-suites/1.0.2/conformance/ogles/process-ogles2-tests.py |
import codecs
import re
import types
import sys
from constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from constants import encodings, ReparseException
import utils
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([str(item) for item in spaceCharacters])
asciiLettersBytes = frozenset([str(item) for item in asciiLetters])
asciiUppercaseBytes = frozenset([str(item) for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([">", "<"])
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile(ur"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1,0] #chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
class HTMLInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
#Craziness
if len(u"\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(u"[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile(u"([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (codecName(encoding), "certain")
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
# Encoding Information
#Number of bytes to use when looking for a meta element with
#encoding information
self.numBytesMeta = 512
#Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
#Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
#Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
#Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
# Otherwise treat source as a string and convert to a file object
if isinstance(source, unicode):
source = source.encode('utf-8')
self.charEncoding = ("utf-8", "certain")
try:
from io import BytesIO
except:
# 2to3 converts this line to: from io import StringIO
from cStringIO import StringIO as BytesIO
stream = BytesIO(source)
if (not(hasattr(stream, "tell") and hasattr(stream, "seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
#First look for a BOM
#This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
#If there is no BOM need to look for meta elements with encoding
#information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
#Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence="tentative"
encoding = self.defaultEncoding
#Substitute for equivalent encodings:
encodingSub = {"iso-8859-1":"windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException, "Encoding changed from %s to %s"%(self.charEncoding[0], newEncoding)
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count(u'\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind(u'\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line+1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
#Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub(u"\ufffd", data)
data = data.replace(u"\r\n", u"\n")
data = data.replace(u"\r", u"\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in xrange(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
#Someone picked the wrong compile option
#You lose
skip = False
import sys
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
#Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos+2]):
#We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos+2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite = False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = u"".join([u"\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = u"^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile(u"[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class EncodingBytes(str):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
return str.__new__(self, value.lower())
def __init__(self, value):
self._position=-1
def __iter__(self):
return self
def next(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p]
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p+len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes)-1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
("<!--",self.handleComment),
("<meta",self.handleMeta),
("</",self.handlePossibleEndTag),
("<!",self.handleOther),
("<?",self.handleOther),
("<",self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing=False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo("-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
#if we have <meta not followed by a space so just keep going
return True
#We have a valid meta element we want to search for attributes
while True:
#Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == "charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == "content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
self.data.next()
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
#If the next byte is not an ascii letter either ignore this
#fragment (possible start tag case) or treat it according to
#handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == "<":
#return to the first step in the overall "two step" algorithm
#reprocessing the < byte
data.previous()
else:
#Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset("/"))
# Step 2
if c in (">", None):
return None
# Step 3
attrName = []
attrValue = []
#Step 4 attribute name
while True:
if c == "=" and attrName:
break
elif c in spaceCharactersBytes:
#Step 6!
c = data.skip()
c = data.next()
break
elif c in ("/", ">"):
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c == None:
return None
else:
attrName.append(c)
#Step 5
c = data.next()
#Step 7
if c != "=":
data.previous()
return "".join(attrName), ""
#Step 8
data.next()
#Step 9
c = data.skip()
#Step 10
if c in ("'", '"'):
#10.1
quoteChar = c
while True:
#10.2
c = data.next()
#10.3
if c == quoteChar:
data.next()
return "".join(attrName), "".join(attrValue)
#10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
#10.5
else:
attrValue.append(c)
elif c == ">":
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = data.next()
if c in spacesAngleBrackets:
return "".join(attrName), "".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
self.data = data
def parse(self):
try:
#Check if the attr name is charset
#otherwise return
self.data.jumpTo("charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == "=":
#If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
#Look for an encoding between matching quote marks
if self.data.currentByte in ('"', "'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
#Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
#Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if (encoding is not None and type(encoding) in types.StringTypes):
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| WebGL-master | resources/html5lib/src/html5lib/inputstream.py |
import re
from xml.sax.saxutils import escape, unescape
from tokenizer import HTMLTokenizer
from constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs' ]
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
if token.has_key("data"):
attrs = dict([(name,val) for name,val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
#remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if attrs.has_key('style'):
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name,val] for name,val in attrs.items()]
return token
else:
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
token["data"] = "<%s%s>" % (token["name"],attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"]=token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
#Change case matching defaults as we only output lowercase html anyway
#This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| WebGL-master | resources/html5lib/src/html5lib/sanitizer.py |
import string, gettext
_ = gettext.gettext
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
EOF = None
E = {
"null-character":
_(u"Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_(u"Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_(u"Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_(u"Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_(u"Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_(u"Numeric entity couldn't be converted to character "
u"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_(u"Numeric entity represents an illegal codepoint: "
u"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_(u"Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_(u"Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_(u"Numeric entity expected but none found."),
"named-entity-without-semicolon":
_(u"Named entity didn't end with ';'."),
"expected-named-entity":
_(u"Named entity expected. Got none."),
"attributes-in-end-tag":
_(u"End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_(u"End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_(u"Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_(u"Expected tag name. Got '?' instead. (HTML doesn't "
u"support processing instructions.)"),
"expected-tag-name":
_(u"Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_(u"Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_(u"Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_(u"Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_(u"Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_(u"Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_(u"Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_(u"Invalid chracter in attribute name"),
"duplicate-attribute":
_(u"Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_(u"Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_(u"Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_(u"Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_(u"Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_(u"Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_(u"Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_(u"Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_(u"Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_(u"Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_(u"Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_(u"Unexpected end of file in tag. Expected >"),
"unexpected-character-after-soldius-in-tag":
_(u"Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_(u"Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_(u"Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_(u"Unexpected space after -- in comment"),
"incorrect-comment":
_(u"Incorrect comment."),
"eof-in-comment":
_(u"Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_(u"Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_(u"Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_(u"Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_(u"Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_(u"Unexpected end of file in comment."),
"unexpected-char-in-comment":
_(u"Unexpected character in comment found."),
"need-space-after-doctype":
_(u"No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_(u"Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_(u"Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_(u"Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_(u"Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_(u"Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_(u"Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_(u"Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_(u"XXX innerHTML EOF"),
"unexpected-doctype":
_(u"Unexpected DOCTYPE. Ignored."),
"non-html-root":
_(u"html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_(u"Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_(u"Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_(u"Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_(u"Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_(u"Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_(u"Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_(u"Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_(u"Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_(u"Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_(u"Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_(u"Unexpected start tag (%(name)s)."),
"missing-end-tag":
_(u"Missing end tag (%(name)s)."),
"missing-end-tags":
_(u"Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_(u"Unexpected start tag (%(startName)s) "
u"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_(u"Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_(u"Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_(u"Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_(u"Unexpected end tag (%(gotName)s). "
u"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_(u"End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_(u"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_(u"End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 3 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_(u"Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_(u"This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_(u"Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_(u"Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_(u"Unexpected non-space characters in "
u"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_(u"Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_(u"Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_(u"Unexpected start tag (%(name)s) in "
u"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_(u"Unexpected end tag (%(name)s) in "
u"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_(u"Unexpected table cell start tag (%(name)s) "
u"in the table body phase."),
"unexpected-cell-end-tag":
_(u"Got table cell end tag (%(name)s) "
u"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_(u"Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_(u"Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_(u"Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_(u"Unexpected select start tag in the select phase "
u"treated as select end tag."),
"unexpected-input-in-select":
_(u"Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_(u"Unexpected start tag token (%(name)s in the select phase. "
u"Ignored."),
"unexpected-end-tag-in-select":
_(u"Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_(u"Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_(u"Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_(u"Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_(u"Unexpected start tag token (%(name)s)"
u" in the after body phase."),
"unexpected-end-tag-after-body":
_(u"Unexpected end tag token (%(name)s)"
u" in the after body phase."),
"unexpected-char-in-frameset":
_(u"Unepxected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_(u"Unexpected start tag token (%(name)s)"
u" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_(u"Unexpected end tag token (frameset) "
u"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_(u"Unexpected end tag token (%(name)s)"
u" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_(u"Unexpected non-space characters in the "
u"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_(u"Unexpected start tag (%(name)s)"
u" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_(u"Unexpected end tag (%(name)s)"
u" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_(u"Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_(u"Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_(u"Unexpected start tag (%(name)s)"
u". Expected end of file."),
"expected-eof-but-got-end-tag":
_(u"Unexpected end tag (%(name)s)"
u". Expected end of file."),
"eof-in-table":
_(u"Unexpected end of file. Expected table content."),
"eof-in-select":
_(u"Unexpected end of file. Expected select content."),
"eof-in-frameset":
_(u"Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_(u"Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_(u"Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_(u"Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_(u"Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_(u"Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
(u"Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html":"http://www.w3.org/1999/xhtml",
"mathml":"http://www.w3.org/1998/Math/MathML",
"svg":"http://www.w3.org/2000/svg",
"xlink":"http://www.w3.org/1999/xlink",
"xml":"http://www.w3.org/XML/1998/namespace",
"xmlns":"http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
spaceCharacters = frozenset((
u"\t",
u"\n",
u"\u000C",
u" ",
u"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c),ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay","controls")),
"video": frozenset(("autoplay","controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": u"\xc6",
"AElig;": u"\xc6",
"AMP": u"&",
"AMP;": u"&",
"Aacute": u"\xc1",
"Aacute;": u"\xc1",
"Abreve;": u"\u0102",
"Acirc": u"\xc2",
"Acirc;": u"\xc2",
"Acy;": u"\u0410",
"Afr;": u"\U0001d504",
"Agrave": u"\xc0",
"Agrave;": u"\xc0",
"Alpha;": u"\u0391",
"Amacr;": u"\u0100",
"And;": u"\u2a53",
"Aogon;": u"\u0104",
"Aopf;": u"\U0001d538",
"ApplyFunction;": u"\u2061",
"Aring": u"\xc5",
"Aring;": u"\xc5",
"Ascr;": u"\U0001d49c",
"Assign;": u"\u2254",
"Atilde": u"\xc3",
"Atilde;": u"\xc3",
"Auml": u"\xc4",
"Auml;": u"\xc4",
"Backslash;": u"\u2216",
"Barv;": u"\u2ae7",
"Barwed;": u"\u2306",
"Bcy;": u"\u0411",
"Because;": u"\u2235",
"Bernoullis;": u"\u212c",
"Beta;": u"\u0392",
"Bfr;": u"\U0001d505",
"Bopf;": u"\U0001d539",
"Breve;": u"\u02d8",
"Bscr;": u"\u212c",
"Bumpeq;": u"\u224e",
"CHcy;": u"\u0427",
"COPY": u"\xa9",
"COPY;": u"\xa9",
"Cacute;": u"\u0106",
"Cap;": u"\u22d2",
"CapitalDifferentialD;": u"\u2145",
"Cayleys;": u"\u212d",
"Ccaron;": u"\u010c",
"Ccedil": u"\xc7",
"Ccedil;": u"\xc7",
"Ccirc;": u"\u0108",
"Cconint;": u"\u2230",
"Cdot;": u"\u010a",
"Cedilla;": u"\xb8",
"CenterDot;": u"\xb7",
"Cfr;": u"\u212d",
"Chi;": u"\u03a7",
"CircleDot;": u"\u2299",
"CircleMinus;": u"\u2296",
"CirclePlus;": u"\u2295",
"CircleTimes;": u"\u2297",
"ClockwiseContourIntegral;": u"\u2232",
"CloseCurlyDoubleQuote;": u"\u201d",
"CloseCurlyQuote;": u"\u2019",
"Colon;": u"\u2237",
"Colone;": u"\u2a74",
"Congruent;": u"\u2261",
"Conint;": u"\u222f",
"ContourIntegral;": u"\u222e",
"Copf;": u"\u2102",
"Coproduct;": u"\u2210",
"CounterClockwiseContourIntegral;": u"\u2233",
"Cross;": u"\u2a2f",
"Cscr;": u"\U0001d49e",
"Cup;": u"\u22d3",
"CupCap;": u"\u224d",
"DD;": u"\u2145",
"DDotrahd;": u"\u2911",
"DJcy;": u"\u0402",
"DScy;": u"\u0405",
"DZcy;": u"\u040f",
"Dagger;": u"\u2021",
"Darr;": u"\u21a1",
"Dashv;": u"\u2ae4",
"Dcaron;": u"\u010e",
"Dcy;": u"\u0414",
"Del;": u"\u2207",
"Delta;": u"\u0394",
"Dfr;": u"\U0001d507",
"DiacriticalAcute;": u"\xb4",
"DiacriticalDot;": u"\u02d9",
"DiacriticalDoubleAcute;": u"\u02dd",
"DiacriticalGrave;": u"`",
"DiacriticalTilde;": u"\u02dc",
"Diamond;": u"\u22c4",
"DifferentialD;": u"\u2146",
"Dopf;": u"\U0001d53b",
"Dot;": u"\xa8",
"DotDot;": u"\u20dc",
"DotEqual;": u"\u2250",
"DoubleContourIntegral;": u"\u222f",
"DoubleDot;": u"\xa8",
"DoubleDownArrow;": u"\u21d3",
"DoubleLeftArrow;": u"\u21d0",
"DoubleLeftRightArrow;": u"\u21d4",
"DoubleLeftTee;": u"\u2ae4",
"DoubleLongLeftArrow;": u"\u27f8",
"DoubleLongLeftRightArrow;": u"\u27fa",
"DoubleLongRightArrow;": u"\u27f9",
"DoubleRightArrow;": u"\u21d2",
"DoubleRightTee;": u"\u22a8",
"DoubleUpArrow;": u"\u21d1",
"DoubleUpDownArrow;": u"\u21d5",
"DoubleVerticalBar;": u"\u2225",
"DownArrow;": u"\u2193",
"DownArrowBar;": u"\u2913",
"DownArrowUpArrow;": u"\u21f5",
"DownBreve;": u"\u0311",
"DownLeftRightVector;": u"\u2950",
"DownLeftTeeVector;": u"\u295e",
"DownLeftVector;": u"\u21bd",
"DownLeftVectorBar;": u"\u2956",
"DownRightTeeVector;": u"\u295f",
"DownRightVector;": u"\u21c1",
"DownRightVectorBar;": u"\u2957",
"DownTee;": u"\u22a4",
"DownTeeArrow;": u"\u21a7",
"Downarrow;": u"\u21d3",
"Dscr;": u"\U0001d49f",
"Dstrok;": u"\u0110",
"ENG;": u"\u014a",
"ETH": u"\xd0",
"ETH;": u"\xd0",
"Eacute": u"\xc9",
"Eacute;": u"\xc9",
"Ecaron;": u"\u011a",
"Ecirc": u"\xca",
"Ecirc;": u"\xca",
"Ecy;": u"\u042d",
"Edot;": u"\u0116",
"Efr;": u"\U0001d508",
"Egrave": u"\xc8",
"Egrave;": u"\xc8",
"Element;": u"\u2208",
"Emacr;": u"\u0112",
"EmptySmallSquare;": u"\u25fb",
"EmptyVerySmallSquare;": u"\u25ab",
"Eogon;": u"\u0118",
"Eopf;": u"\U0001d53c",
"Epsilon;": u"\u0395",
"Equal;": u"\u2a75",
"EqualTilde;": u"\u2242",
"Equilibrium;": u"\u21cc",
"Escr;": u"\u2130",
"Esim;": u"\u2a73",
"Eta;": u"\u0397",
"Euml": u"\xcb",
"Euml;": u"\xcb",
"Exists;": u"\u2203",
"ExponentialE;": u"\u2147",
"Fcy;": u"\u0424",
"Ffr;": u"\U0001d509",
"FilledSmallSquare;": u"\u25fc",
"FilledVerySmallSquare;": u"\u25aa",
"Fopf;": u"\U0001d53d",
"ForAll;": u"\u2200",
"Fouriertrf;": u"\u2131",
"Fscr;": u"\u2131",
"GJcy;": u"\u0403",
"GT": u">",
"GT;": u">",
"Gamma;": u"\u0393",
"Gammad;": u"\u03dc",
"Gbreve;": u"\u011e",
"Gcedil;": u"\u0122",
"Gcirc;": u"\u011c",
"Gcy;": u"\u0413",
"Gdot;": u"\u0120",
"Gfr;": u"\U0001d50a",
"Gg;": u"\u22d9",
"Gopf;": u"\U0001d53e",
"GreaterEqual;": u"\u2265",
"GreaterEqualLess;": u"\u22db",
"GreaterFullEqual;": u"\u2267",
"GreaterGreater;": u"\u2aa2",
"GreaterLess;": u"\u2277",
"GreaterSlantEqual;": u"\u2a7e",
"GreaterTilde;": u"\u2273",
"Gscr;": u"\U0001d4a2",
"Gt;": u"\u226b",
"HARDcy;": u"\u042a",
"Hacek;": u"\u02c7",
"Hat;": u"^",
"Hcirc;": u"\u0124",
"Hfr;": u"\u210c",
"HilbertSpace;": u"\u210b",
"Hopf;": u"\u210d",
"HorizontalLine;": u"\u2500",
"Hscr;": u"\u210b",
"Hstrok;": u"\u0126",
"HumpDownHump;": u"\u224e",
"HumpEqual;": u"\u224f",
"IEcy;": u"\u0415",
"IJlig;": u"\u0132",
"IOcy;": u"\u0401",
"Iacute": u"\xcd",
"Iacute;": u"\xcd",
"Icirc": u"\xce",
"Icirc;": u"\xce",
"Icy;": u"\u0418",
"Idot;": u"\u0130",
"Ifr;": u"\u2111",
"Igrave": u"\xcc",
"Igrave;": u"\xcc",
"Im;": u"\u2111",
"Imacr;": u"\u012a",
"ImaginaryI;": u"\u2148",
"Implies;": u"\u21d2",
"Int;": u"\u222c",
"Integral;": u"\u222b",
"Intersection;": u"\u22c2",
"InvisibleComma;": u"\u2063",
"InvisibleTimes;": u"\u2062",
"Iogon;": u"\u012e",
"Iopf;": u"\U0001d540",
"Iota;": u"\u0399",
"Iscr;": u"\u2110",
"Itilde;": u"\u0128",
"Iukcy;": u"\u0406",
"Iuml": u"\xcf",
"Iuml;": u"\xcf",
"Jcirc;": u"\u0134",
"Jcy;": u"\u0419",
"Jfr;": u"\U0001d50d",
"Jopf;": u"\U0001d541",
"Jscr;": u"\U0001d4a5",
"Jsercy;": u"\u0408",
"Jukcy;": u"\u0404",
"KHcy;": u"\u0425",
"KJcy;": u"\u040c",
"Kappa;": u"\u039a",
"Kcedil;": u"\u0136",
"Kcy;": u"\u041a",
"Kfr;": u"\U0001d50e",
"Kopf;": u"\U0001d542",
"Kscr;": u"\U0001d4a6",
"LJcy;": u"\u0409",
"LT": u"<",
"LT;": u"<",
"Lacute;": u"\u0139",
"Lambda;": u"\u039b",
"Lang;": u"\u27ea",
"Laplacetrf;": u"\u2112",
"Larr;": u"\u219e",
"Lcaron;": u"\u013d",
"Lcedil;": u"\u013b",
"Lcy;": u"\u041b",
"LeftAngleBracket;": u"\u27e8",
"LeftArrow;": u"\u2190",
"LeftArrowBar;": u"\u21e4",
"LeftArrowRightArrow;": u"\u21c6",
"LeftCeiling;": u"\u2308",
"LeftDoubleBracket;": u"\u27e6",
"LeftDownTeeVector;": u"\u2961",
"LeftDownVector;": u"\u21c3",
"LeftDownVectorBar;": u"\u2959",
"LeftFloor;": u"\u230a",
"LeftRightArrow;": u"\u2194",
"LeftRightVector;": u"\u294e",
"LeftTee;": u"\u22a3",
"LeftTeeArrow;": u"\u21a4",
"LeftTeeVector;": u"\u295a",
"LeftTriangle;": u"\u22b2",
"LeftTriangleBar;": u"\u29cf",
"LeftTriangleEqual;": u"\u22b4",
"LeftUpDownVector;": u"\u2951",
"LeftUpTeeVector;": u"\u2960",
"LeftUpVector;": u"\u21bf",
"LeftUpVectorBar;": u"\u2958",
"LeftVector;": u"\u21bc",
"LeftVectorBar;": u"\u2952",
"Leftarrow;": u"\u21d0",
"Leftrightarrow;": u"\u21d4",
"LessEqualGreater;": u"\u22da",
"LessFullEqual;": u"\u2266",
"LessGreater;": u"\u2276",
"LessLess;": u"\u2aa1",
"LessSlantEqual;": u"\u2a7d",
"LessTilde;": u"\u2272",
"Lfr;": u"\U0001d50f",
"Ll;": u"\u22d8",
"Lleftarrow;": u"\u21da",
"Lmidot;": u"\u013f",
"LongLeftArrow;": u"\u27f5",
"LongLeftRightArrow;": u"\u27f7",
"LongRightArrow;": u"\u27f6",
"Longleftarrow;": u"\u27f8",
"Longleftrightarrow;": u"\u27fa",
"Longrightarrow;": u"\u27f9",
"Lopf;": u"\U0001d543",
"LowerLeftArrow;": u"\u2199",
"LowerRightArrow;": u"\u2198",
"Lscr;": u"\u2112",
"Lsh;": u"\u21b0",
"Lstrok;": u"\u0141",
"Lt;": u"\u226a",
"Map;": u"\u2905",
"Mcy;": u"\u041c",
"MediumSpace;": u"\u205f",
"Mellintrf;": u"\u2133",
"Mfr;": u"\U0001d510",
"MinusPlus;": u"\u2213",
"Mopf;": u"\U0001d544",
"Mscr;": u"\u2133",
"Mu;": u"\u039c",
"NJcy;": u"\u040a",
"Nacute;": u"\u0143",
"Ncaron;": u"\u0147",
"Ncedil;": u"\u0145",
"Ncy;": u"\u041d",
"NegativeMediumSpace;": u"\u200b",
"NegativeThickSpace;": u"\u200b",
"NegativeThinSpace;": u"\u200b",
"NegativeVeryThinSpace;": u"\u200b",
"NestedGreaterGreater;": u"\u226b",
"NestedLessLess;": u"\u226a",
"NewLine;": u"\n",
"Nfr;": u"\U0001d511",
"NoBreak;": u"\u2060",
"NonBreakingSpace;": u"\xa0",
"Nopf;": u"\u2115",
"Not;": u"\u2aec",
"NotCongruent;": u"\u2262",
"NotCupCap;": u"\u226d",
"NotDoubleVerticalBar;": u"\u2226",
"NotElement;": u"\u2209",
"NotEqual;": u"\u2260",
"NotEqualTilde;": u"\u2242\u0338",
"NotExists;": u"\u2204",
"NotGreater;": u"\u226f",
"NotGreaterEqual;": u"\u2271",
"NotGreaterFullEqual;": u"\u2267\u0338",
"NotGreaterGreater;": u"\u226b\u0338",
"NotGreaterLess;": u"\u2279",
"NotGreaterSlantEqual;": u"\u2a7e\u0338",
"NotGreaterTilde;": u"\u2275",
"NotHumpDownHump;": u"\u224e\u0338",
"NotHumpEqual;": u"\u224f\u0338",
"NotLeftTriangle;": u"\u22ea",
"NotLeftTriangleBar;": u"\u29cf\u0338",
"NotLeftTriangleEqual;": u"\u22ec",
"NotLess;": u"\u226e",
"NotLessEqual;": u"\u2270",
"NotLessGreater;": u"\u2278",
"NotLessLess;": u"\u226a\u0338",
"NotLessSlantEqual;": u"\u2a7d\u0338",
"NotLessTilde;": u"\u2274",
"NotNestedGreaterGreater;": u"\u2aa2\u0338",
"NotNestedLessLess;": u"\u2aa1\u0338",
"NotPrecedes;": u"\u2280",
"NotPrecedesEqual;": u"\u2aaf\u0338",
"NotPrecedesSlantEqual;": u"\u22e0",
"NotReverseElement;": u"\u220c",
"NotRightTriangle;": u"\u22eb",
"NotRightTriangleBar;": u"\u29d0\u0338",
"NotRightTriangleEqual;": u"\u22ed",
"NotSquareSubset;": u"\u228f\u0338",
"NotSquareSubsetEqual;": u"\u22e2",
"NotSquareSuperset;": u"\u2290\u0338",
"NotSquareSupersetEqual;": u"\u22e3",
"NotSubset;": u"\u2282\u20d2",
"NotSubsetEqual;": u"\u2288",
"NotSucceeds;": u"\u2281",
"NotSucceedsEqual;": u"\u2ab0\u0338",
"NotSucceedsSlantEqual;": u"\u22e1",
"NotSucceedsTilde;": u"\u227f\u0338",
"NotSuperset;": u"\u2283\u20d2",
"NotSupersetEqual;": u"\u2289",
"NotTilde;": u"\u2241",
"NotTildeEqual;": u"\u2244",
"NotTildeFullEqual;": u"\u2247",
"NotTildeTilde;": u"\u2249",
"NotVerticalBar;": u"\u2224",
"Nscr;": u"\U0001d4a9",
"Ntilde": u"\xd1",
"Ntilde;": u"\xd1",
"Nu;": u"\u039d",
"OElig;": u"\u0152",
"Oacute": u"\xd3",
"Oacute;": u"\xd3",
"Ocirc": u"\xd4",
"Ocirc;": u"\xd4",
"Ocy;": u"\u041e",
"Odblac;": u"\u0150",
"Ofr;": u"\U0001d512",
"Ograve": u"\xd2",
"Ograve;": u"\xd2",
"Omacr;": u"\u014c",
"Omega;": u"\u03a9",
"Omicron;": u"\u039f",
"Oopf;": u"\U0001d546",
"OpenCurlyDoubleQuote;": u"\u201c",
"OpenCurlyQuote;": u"\u2018",
"Or;": u"\u2a54",
"Oscr;": u"\U0001d4aa",
"Oslash": u"\xd8",
"Oslash;": u"\xd8",
"Otilde": u"\xd5",
"Otilde;": u"\xd5",
"Otimes;": u"\u2a37",
"Ouml": u"\xd6",
"Ouml;": u"\xd6",
"OverBar;": u"\u203e",
"OverBrace;": u"\u23de",
"OverBracket;": u"\u23b4",
"OverParenthesis;": u"\u23dc",
"PartialD;": u"\u2202",
"Pcy;": u"\u041f",
"Pfr;": u"\U0001d513",
"Phi;": u"\u03a6",
"Pi;": u"\u03a0",
"PlusMinus;": u"\xb1",
"Poincareplane;": u"\u210c",
"Popf;": u"\u2119",
"Pr;": u"\u2abb",
"Precedes;": u"\u227a",
"PrecedesEqual;": u"\u2aaf",
"PrecedesSlantEqual;": u"\u227c",
"PrecedesTilde;": u"\u227e",
"Prime;": u"\u2033",
"Product;": u"\u220f",
"Proportion;": u"\u2237",
"Proportional;": u"\u221d",
"Pscr;": u"\U0001d4ab",
"Psi;": u"\u03a8",
"QUOT": u"\"",
"QUOT;": u"\"",
"Qfr;": u"\U0001d514",
"Qopf;": u"\u211a",
"Qscr;": u"\U0001d4ac",
"RBarr;": u"\u2910",
"REG": u"\xae",
"REG;": u"\xae",
"Racute;": u"\u0154",
"Rang;": u"\u27eb",
"Rarr;": u"\u21a0",
"Rarrtl;": u"\u2916",
"Rcaron;": u"\u0158",
"Rcedil;": u"\u0156",
"Rcy;": u"\u0420",
"Re;": u"\u211c",
"ReverseElement;": u"\u220b",
"ReverseEquilibrium;": u"\u21cb",
"ReverseUpEquilibrium;": u"\u296f",
"Rfr;": u"\u211c",
"Rho;": u"\u03a1",
"RightAngleBracket;": u"\u27e9",
"RightArrow;": u"\u2192",
"RightArrowBar;": u"\u21e5",
"RightArrowLeftArrow;": u"\u21c4",
"RightCeiling;": u"\u2309",
"RightDoubleBracket;": u"\u27e7",
"RightDownTeeVector;": u"\u295d",
"RightDownVector;": u"\u21c2",
"RightDownVectorBar;": u"\u2955",
"RightFloor;": u"\u230b",
"RightTee;": u"\u22a2",
"RightTeeArrow;": u"\u21a6",
"RightTeeVector;": u"\u295b",
"RightTriangle;": u"\u22b3",
"RightTriangleBar;": u"\u29d0",
"RightTriangleEqual;": u"\u22b5",
"RightUpDownVector;": u"\u294f",
"RightUpTeeVector;": u"\u295c",
"RightUpVector;": u"\u21be",
"RightUpVectorBar;": u"\u2954",
"RightVector;": u"\u21c0",
"RightVectorBar;": u"\u2953",
"Rightarrow;": u"\u21d2",
"Ropf;": u"\u211d",
"RoundImplies;": u"\u2970",
"Rrightarrow;": u"\u21db",
"Rscr;": u"\u211b",
"Rsh;": u"\u21b1",
"RuleDelayed;": u"\u29f4",
"SHCHcy;": u"\u0429",
"SHcy;": u"\u0428",
"SOFTcy;": u"\u042c",
"Sacute;": u"\u015a",
"Sc;": u"\u2abc",
"Scaron;": u"\u0160",
"Scedil;": u"\u015e",
"Scirc;": u"\u015c",
"Scy;": u"\u0421",
"Sfr;": u"\U0001d516",
"ShortDownArrow;": u"\u2193",
"ShortLeftArrow;": u"\u2190",
"ShortRightArrow;": u"\u2192",
"ShortUpArrow;": u"\u2191",
"Sigma;": u"\u03a3",
"SmallCircle;": u"\u2218",
"Sopf;": u"\U0001d54a",
"Sqrt;": u"\u221a",
"Square;": u"\u25a1",
"SquareIntersection;": u"\u2293",
"SquareSubset;": u"\u228f",
"SquareSubsetEqual;": u"\u2291",
"SquareSuperset;": u"\u2290",
"SquareSupersetEqual;": u"\u2292",
"SquareUnion;": u"\u2294",
"Sscr;": u"\U0001d4ae",
"Star;": u"\u22c6",
"Sub;": u"\u22d0",
"Subset;": u"\u22d0",
"SubsetEqual;": u"\u2286",
"Succeeds;": u"\u227b",
"SucceedsEqual;": u"\u2ab0",
"SucceedsSlantEqual;": u"\u227d",
"SucceedsTilde;": u"\u227f",
"SuchThat;": u"\u220b",
"Sum;": u"\u2211",
"Sup;": u"\u22d1",
"Superset;": u"\u2283",
"SupersetEqual;": u"\u2287",
"Supset;": u"\u22d1",
"THORN": u"\xde",
"THORN;": u"\xde",
"TRADE;": u"\u2122",
"TSHcy;": u"\u040b",
"TScy;": u"\u0426",
"Tab;": u"\t",
"Tau;": u"\u03a4",
"Tcaron;": u"\u0164",
"Tcedil;": u"\u0162",
"Tcy;": u"\u0422",
"Tfr;": u"\U0001d517",
"Therefore;": u"\u2234",
"Theta;": u"\u0398",
"ThickSpace;": u"\u205f\u200a",
"ThinSpace;": u"\u2009",
"Tilde;": u"\u223c",
"TildeEqual;": u"\u2243",
"TildeFullEqual;": u"\u2245",
"TildeTilde;": u"\u2248",
"Topf;": u"\U0001d54b",
"TripleDot;": u"\u20db",
"Tscr;": u"\U0001d4af",
"Tstrok;": u"\u0166",
"Uacute": u"\xda",
"Uacute;": u"\xda",
"Uarr;": u"\u219f",
"Uarrocir;": u"\u2949",
"Ubrcy;": u"\u040e",
"Ubreve;": u"\u016c",
"Ucirc": u"\xdb",
"Ucirc;": u"\xdb",
"Ucy;": u"\u0423",
"Udblac;": u"\u0170",
"Ufr;": u"\U0001d518",
"Ugrave": u"\xd9",
"Ugrave;": u"\xd9",
"Umacr;": u"\u016a",
"UnderBar;": u"_",
"UnderBrace;": u"\u23df",
"UnderBracket;": u"\u23b5",
"UnderParenthesis;": u"\u23dd",
"Union;": u"\u22c3",
"UnionPlus;": u"\u228e",
"Uogon;": u"\u0172",
"Uopf;": u"\U0001d54c",
"UpArrow;": u"\u2191",
"UpArrowBar;": u"\u2912",
"UpArrowDownArrow;": u"\u21c5",
"UpDownArrow;": u"\u2195",
"UpEquilibrium;": u"\u296e",
"UpTee;": u"\u22a5",
"UpTeeArrow;": u"\u21a5",
"Uparrow;": u"\u21d1",
"Updownarrow;": u"\u21d5",
"UpperLeftArrow;": u"\u2196",
"UpperRightArrow;": u"\u2197",
"Upsi;": u"\u03d2",
"Upsilon;": u"\u03a5",
"Uring;": u"\u016e",
"Uscr;": u"\U0001d4b0",
"Utilde;": u"\u0168",
"Uuml": u"\xdc",
"Uuml;": u"\xdc",
"VDash;": u"\u22ab",
"Vbar;": u"\u2aeb",
"Vcy;": u"\u0412",
"Vdash;": u"\u22a9",
"Vdashl;": u"\u2ae6",
"Vee;": u"\u22c1",
"Verbar;": u"\u2016",
"Vert;": u"\u2016",
"VerticalBar;": u"\u2223",
"VerticalLine;": u"|",
"VerticalSeparator;": u"\u2758",
"VerticalTilde;": u"\u2240",
"VeryThinSpace;": u"\u200a",
"Vfr;": u"\U0001d519",
"Vopf;": u"\U0001d54d",
"Vscr;": u"\U0001d4b1",
"Vvdash;": u"\u22aa",
"Wcirc;": u"\u0174",
"Wedge;": u"\u22c0",
"Wfr;": u"\U0001d51a",
"Wopf;": u"\U0001d54e",
"Wscr;": u"\U0001d4b2",
"Xfr;": u"\U0001d51b",
"Xi;": u"\u039e",
"Xopf;": u"\U0001d54f",
"Xscr;": u"\U0001d4b3",
"YAcy;": u"\u042f",
"YIcy;": u"\u0407",
"YUcy;": u"\u042e",
"Yacute": u"\xdd",
"Yacute;": u"\xdd",
"Ycirc;": u"\u0176",
"Ycy;": u"\u042b",
"Yfr;": u"\U0001d51c",
"Yopf;": u"\U0001d550",
"Yscr;": u"\U0001d4b4",
"Yuml;": u"\u0178",
"ZHcy;": u"\u0416",
"Zacute;": u"\u0179",
"Zcaron;": u"\u017d",
"Zcy;": u"\u0417",
"Zdot;": u"\u017b",
"ZeroWidthSpace;": u"\u200b",
"Zeta;": u"\u0396",
"Zfr;": u"\u2128",
"Zopf;": u"\u2124",
"Zscr;": u"\U0001d4b5",
"aacute": u"\xe1",
"aacute;": u"\xe1",
"abreve;": u"\u0103",
"ac;": u"\u223e",
"acE;": u"\u223e\u0333",
"acd;": u"\u223f",
"acirc": u"\xe2",
"acirc;": u"\xe2",
"acute": u"\xb4",
"acute;": u"\xb4",
"acy;": u"\u0430",
"aelig": u"\xe6",
"aelig;": u"\xe6",
"af;": u"\u2061",
"afr;": u"\U0001d51e",
"agrave": u"\xe0",
"agrave;": u"\xe0",
"alefsym;": u"\u2135",
"aleph;": u"\u2135",
"alpha;": u"\u03b1",
"amacr;": u"\u0101",
"amalg;": u"\u2a3f",
"amp": u"&",
"amp;": u"&",
"and;": u"\u2227",
"andand;": u"\u2a55",
"andd;": u"\u2a5c",
"andslope;": u"\u2a58",
"andv;": u"\u2a5a",
"ang;": u"\u2220",
"ange;": u"\u29a4",
"angle;": u"\u2220",
"angmsd;": u"\u2221",
"angmsdaa;": u"\u29a8",
"angmsdab;": u"\u29a9",
"angmsdac;": u"\u29aa",
"angmsdad;": u"\u29ab",
"angmsdae;": u"\u29ac",
"angmsdaf;": u"\u29ad",
"angmsdag;": u"\u29ae",
"angmsdah;": u"\u29af",
"angrt;": u"\u221f",
"angrtvb;": u"\u22be",
"angrtvbd;": u"\u299d",
"angsph;": u"\u2222",
"angst;": u"\xc5",
"angzarr;": u"\u237c",
"aogon;": u"\u0105",
"aopf;": u"\U0001d552",
"ap;": u"\u2248",
"apE;": u"\u2a70",
"apacir;": u"\u2a6f",
"ape;": u"\u224a",
"apid;": u"\u224b",
"apos;": u"'",
"approx;": u"\u2248",
"approxeq;": u"\u224a",
"aring": u"\xe5",
"aring;": u"\xe5",
"ascr;": u"\U0001d4b6",
"ast;": u"*",
"asymp;": u"\u2248",
"asympeq;": u"\u224d",
"atilde": u"\xe3",
"atilde;": u"\xe3",
"auml": u"\xe4",
"auml;": u"\xe4",
"awconint;": u"\u2233",
"awint;": u"\u2a11",
"bNot;": u"\u2aed",
"backcong;": u"\u224c",
"backepsilon;": u"\u03f6",
"backprime;": u"\u2035",
"backsim;": u"\u223d",
"backsimeq;": u"\u22cd",
"barvee;": u"\u22bd",
"barwed;": u"\u2305",
"barwedge;": u"\u2305",
"bbrk;": u"\u23b5",
"bbrktbrk;": u"\u23b6",
"bcong;": u"\u224c",
"bcy;": u"\u0431",
"bdquo;": u"\u201e",
"becaus;": u"\u2235",
"because;": u"\u2235",
"bemptyv;": u"\u29b0",
"bepsi;": u"\u03f6",
"bernou;": u"\u212c",
"beta;": u"\u03b2",
"beth;": u"\u2136",
"between;": u"\u226c",
"bfr;": u"\U0001d51f",
"bigcap;": u"\u22c2",
"bigcirc;": u"\u25ef",
"bigcup;": u"\u22c3",
"bigodot;": u"\u2a00",
"bigoplus;": u"\u2a01",
"bigotimes;": u"\u2a02",
"bigsqcup;": u"\u2a06",
"bigstar;": u"\u2605",
"bigtriangledown;": u"\u25bd",
"bigtriangleup;": u"\u25b3",
"biguplus;": u"\u2a04",
"bigvee;": u"\u22c1",
"bigwedge;": u"\u22c0",
"bkarow;": u"\u290d",
"blacklozenge;": u"\u29eb",
"blacksquare;": u"\u25aa",
"blacktriangle;": u"\u25b4",
"blacktriangledown;": u"\u25be",
"blacktriangleleft;": u"\u25c2",
"blacktriangleright;": u"\u25b8",
"blank;": u"\u2423",
"blk12;": u"\u2592",
"blk14;": u"\u2591",
"blk34;": u"\u2593",
"block;": u"\u2588",
"bne;": u"=\u20e5",
"bnequiv;": u"\u2261\u20e5",
"bnot;": u"\u2310",
"bopf;": u"\U0001d553",
"bot;": u"\u22a5",
"bottom;": u"\u22a5",
"bowtie;": u"\u22c8",
"boxDL;": u"\u2557",
"boxDR;": u"\u2554",
"boxDl;": u"\u2556",
"boxDr;": u"\u2553",
"boxH;": u"\u2550",
"boxHD;": u"\u2566",
"boxHU;": u"\u2569",
"boxHd;": u"\u2564",
"boxHu;": u"\u2567",
"boxUL;": u"\u255d",
"boxUR;": u"\u255a",
"boxUl;": u"\u255c",
"boxUr;": u"\u2559",
"boxV;": u"\u2551",
"boxVH;": u"\u256c",
"boxVL;": u"\u2563",
"boxVR;": u"\u2560",
"boxVh;": u"\u256b",
"boxVl;": u"\u2562",
"boxVr;": u"\u255f",
"boxbox;": u"\u29c9",
"boxdL;": u"\u2555",
"boxdR;": u"\u2552",
"boxdl;": u"\u2510",
"boxdr;": u"\u250c",
"boxh;": u"\u2500",
"boxhD;": u"\u2565",
"boxhU;": u"\u2568",
"boxhd;": u"\u252c",
"boxhu;": u"\u2534",
"boxminus;": u"\u229f",
"boxplus;": u"\u229e",
"boxtimes;": u"\u22a0",
"boxuL;": u"\u255b",
"boxuR;": u"\u2558",
"boxul;": u"\u2518",
"boxur;": u"\u2514",
"boxv;": u"\u2502",
"boxvH;": u"\u256a",
"boxvL;": u"\u2561",
"boxvR;": u"\u255e",
"boxvh;": u"\u253c",
"boxvl;": u"\u2524",
"boxvr;": u"\u251c",
"bprime;": u"\u2035",
"breve;": u"\u02d8",
"brvbar": u"\xa6",
"brvbar;": u"\xa6",
"bscr;": u"\U0001d4b7",
"bsemi;": u"\u204f",
"bsim;": u"\u223d",
"bsime;": u"\u22cd",
"bsol;": u"\\",
"bsolb;": u"\u29c5",
"bsolhsub;": u"\u27c8",
"bull;": u"\u2022",
"bullet;": u"\u2022",
"bump;": u"\u224e",
"bumpE;": u"\u2aae",
"bumpe;": u"\u224f",
"bumpeq;": u"\u224f",
"cacute;": u"\u0107",
"cap;": u"\u2229",
"capand;": u"\u2a44",
"capbrcup;": u"\u2a49",
"capcap;": u"\u2a4b",
"capcup;": u"\u2a47",
"capdot;": u"\u2a40",
"caps;": u"\u2229\ufe00",
"caret;": u"\u2041",
"caron;": u"\u02c7",
"ccaps;": u"\u2a4d",
"ccaron;": u"\u010d",
"ccedil": u"\xe7",
"ccedil;": u"\xe7",
"ccirc;": u"\u0109",
"ccups;": u"\u2a4c",
"ccupssm;": u"\u2a50",
"cdot;": u"\u010b",
"cedil": u"\xb8",
"cedil;": u"\xb8",
"cemptyv;": u"\u29b2",
"cent": u"\xa2",
"cent;": u"\xa2",
"centerdot;": u"\xb7",
"cfr;": u"\U0001d520",
"chcy;": u"\u0447",
"check;": u"\u2713",
"checkmark;": u"\u2713",
"chi;": u"\u03c7",
"cir;": u"\u25cb",
"cirE;": u"\u29c3",
"circ;": u"\u02c6",
"circeq;": u"\u2257",
"circlearrowleft;": u"\u21ba",
"circlearrowright;": u"\u21bb",
"circledR;": u"\xae",
"circledS;": u"\u24c8",
"circledast;": u"\u229b",
"circledcirc;": u"\u229a",
"circleddash;": u"\u229d",
"cire;": u"\u2257",
"cirfnint;": u"\u2a10",
"cirmid;": u"\u2aef",
"cirscir;": u"\u29c2",
"clubs;": u"\u2663",
"clubsuit;": u"\u2663",
"colon;": u":",
"colone;": u"\u2254",
"coloneq;": u"\u2254",
"comma;": u",",
"commat;": u"@",
"comp;": u"\u2201",
"compfn;": u"\u2218",
"complement;": u"\u2201",
"complexes;": u"\u2102",
"cong;": u"\u2245",
"congdot;": u"\u2a6d",
"conint;": u"\u222e",
"copf;": u"\U0001d554",
"coprod;": u"\u2210",
"copy": u"\xa9",
"copy;": u"\xa9",
"copysr;": u"\u2117",
"crarr;": u"\u21b5",
"cross;": u"\u2717",
"cscr;": u"\U0001d4b8",
"csub;": u"\u2acf",
"csube;": u"\u2ad1",
"csup;": u"\u2ad0",
"csupe;": u"\u2ad2",
"ctdot;": u"\u22ef",
"cudarrl;": u"\u2938",
"cudarrr;": u"\u2935",
"cuepr;": u"\u22de",
"cuesc;": u"\u22df",
"cularr;": u"\u21b6",
"cularrp;": u"\u293d",
"cup;": u"\u222a",
"cupbrcap;": u"\u2a48",
"cupcap;": u"\u2a46",
"cupcup;": u"\u2a4a",
"cupdot;": u"\u228d",
"cupor;": u"\u2a45",
"cups;": u"\u222a\ufe00",
"curarr;": u"\u21b7",
"curarrm;": u"\u293c",
"curlyeqprec;": u"\u22de",
"curlyeqsucc;": u"\u22df",
"curlyvee;": u"\u22ce",
"curlywedge;": u"\u22cf",
"curren": u"\xa4",
"curren;": u"\xa4",
"curvearrowleft;": u"\u21b6",
"curvearrowright;": u"\u21b7",
"cuvee;": u"\u22ce",
"cuwed;": u"\u22cf",
"cwconint;": u"\u2232",
"cwint;": u"\u2231",
"cylcty;": u"\u232d",
"dArr;": u"\u21d3",
"dHar;": u"\u2965",
"dagger;": u"\u2020",
"daleth;": u"\u2138",
"darr;": u"\u2193",
"dash;": u"\u2010",
"dashv;": u"\u22a3",
"dbkarow;": u"\u290f",
"dblac;": u"\u02dd",
"dcaron;": u"\u010f",
"dcy;": u"\u0434",
"dd;": u"\u2146",
"ddagger;": u"\u2021",
"ddarr;": u"\u21ca",
"ddotseq;": u"\u2a77",
"deg": u"\xb0",
"deg;": u"\xb0",
"delta;": u"\u03b4",
"demptyv;": u"\u29b1",
"dfisht;": u"\u297f",
"dfr;": u"\U0001d521",
"dharl;": u"\u21c3",
"dharr;": u"\u21c2",
"diam;": u"\u22c4",
"diamond;": u"\u22c4",
"diamondsuit;": u"\u2666",
"diams;": u"\u2666",
"die;": u"\xa8",
"digamma;": u"\u03dd",
"disin;": u"\u22f2",
"div;": u"\xf7",
"divide": u"\xf7",
"divide;": u"\xf7",
"divideontimes;": u"\u22c7",
"divonx;": u"\u22c7",
"djcy;": u"\u0452",
"dlcorn;": u"\u231e",
"dlcrop;": u"\u230d",
"dollar;": u"$",
"dopf;": u"\U0001d555",
"dot;": u"\u02d9",
"doteq;": u"\u2250",
"doteqdot;": u"\u2251",
"dotminus;": u"\u2238",
"dotplus;": u"\u2214",
"dotsquare;": u"\u22a1",
"doublebarwedge;": u"\u2306",
"downarrow;": u"\u2193",
"downdownarrows;": u"\u21ca",
"downharpoonleft;": u"\u21c3",
"downharpoonright;": u"\u21c2",
"drbkarow;": u"\u2910",
"drcorn;": u"\u231f",
"drcrop;": u"\u230c",
"dscr;": u"\U0001d4b9",
"dscy;": u"\u0455",
"dsol;": u"\u29f6",
"dstrok;": u"\u0111",
"dtdot;": u"\u22f1",
"dtri;": u"\u25bf",
"dtrif;": u"\u25be",
"duarr;": u"\u21f5",
"duhar;": u"\u296f",
"dwangle;": u"\u29a6",
"dzcy;": u"\u045f",
"dzigrarr;": u"\u27ff",
"eDDot;": u"\u2a77",
"eDot;": u"\u2251",
"eacute": u"\xe9",
"eacute;": u"\xe9",
"easter;": u"\u2a6e",
"ecaron;": u"\u011b",
"ecir;": u"\u2256",
"ecirc": u"\xea",
"ecirc;": u"\xea",
"ecolon;": u"\u2255",
"ecy;": u"\u044d",
"edot;": u"\u0117",
"ee;": u"\u2147",
"efDot;": u"\u2252",
"efr;": u"\U0001d522",
"eg;": u"\u2a9a",
"egrave": u"\xe8",
"egrave;": u"\xe8",
"egs;": u"\u2a96",
"egsdot;": u"\u2a98",
"el;": u"\u2a99",
"elinters;": u"\u23e7",
"ell;": u"\u2113",
"els;": u"\u2a95",
"elsdot;": u"\u2a97",
"emacr;": u"\u0113",
"empty;": u"\u2205",
"emptyset;": u"\u2205",
"emptyv;": u"\u2205",
"emsp13;": u"\u2004",
"emsp14;": u"\u2005",
"emsp;": u"\u2003",
"eng;": u"\u014b",
"ensp;": u"\u2002",
"eogon;": u"\u0119",
"eopf;": u"\U0001d556",
"epar;": u"\u22d5",
"eparsl;": u"\u29e3",
"eplus;": u"\u2a71",
"epsi;": u"\u03b5",
"epsilon;": u"\u03b5",
"epsiv;": u"\u03f5",
"eqcirc;": u"\u2256",
"eqcolon;": u"\u2255",
"eqsim;": u"\u2242",
"eqslantgtr;": u"\u2a96",
"eqslantless;": u"\u2a95",
"equals;": u"=",
"equest;": u"\u225f",
"equiv;": u"\u2261",
"equivDD;": u"\u2a78",
"eqvparsl;": u"\u29e5",
"erDot;": u"\u2253",
"erarr;": u"\u2971",
"escr;": u"\u212f",
"esdot;": u"\u2250",
"esim;": u"\u2242",
"eta;": u"\u03b7",
"eth": u"\xf0",
"eth;": u"\xf0",
"euml": u"\xeb",
"euml;": u"\xeb",
"euro;": u"\u20ac",
"excl;": u"!",
"exist;": u"\u2203",
"expectation;": u"\u2130",
"exponentiale;": u"\u2147",
"fallingdotseq;": u"\u2252",
"fcy;": u"\u0444",
"female;": u"\u2640",
"ffilig;": u"\ufb03",
"fflig;": u"\ufb00",
"ffllig;": u"\ufb04",
"ffr;": u"\U0001d523",
"filig;": u"\ufb01",
"fjlig;": u"fj",
"flat;": u"\u266d",
"fllig;": u"\ufb02",
"fltns;": u"\u25b1",
"fnof;": u"\u0192",
"fopf;": u"\U0001d557",
"forall;": u"\u2200",
"fork;": u"\u22d4",
"forkv;": u"\u2ad9",
"fpartint;": u"\u2a0d",
"frac12": u"\xbd",
"frac12;": u"\xbd",
"frac13;": u"\u2153",
"frac14": u"\xbc",
"frac14;": u"\xbc",
"frac15;": u"\u2155",
"frac16;": u"\u2159",
"frac18;": u"\u215b",
"frac23;": u"\u2154",
"frac25;": u"\u2156",
"frac34": u"\xbe",
"frac34;": u"\xbe",
"frac35;": u"\u2157",
"frac38;": u"\u215c",
"frac45;": u"\u2158",
"frac56;": u"\u215a",
"frac58;": u"\u215d",
"frac78;": u"\u215e",
"frasl;": u"\u2044",
"frown;": u"\u2322",
"fscr;": u"\U0001d4bb",
"gE;": u"\u2267",
"gEl;": u"\u2a8c",
"gacute;": u"\u01f5",
"gamma;": u"\u03b3",
"gammad;": u"\u03dd",
"gap;": u"\u2a86",
"gbreve;": u"\u011f",
"gcirc;": u"\u011d",
"gcy;": u"\u0433",
"gdot;": u"\u0121",
"ge;": u"\u2265",
"gel;": u"\u22db",
"geq;": u"\u2265",
"geqq;": u"\u2267",
"geqslant;": u"\u2a7e",
"ges;": u"\u2a7e",
"gescc;": u"\u2aa9",
"gesdot;": u"\u2a80",
"gesdoto;": u"\u2a82",
"gesdotol;": u"\u2a84",
"gesl;": u"\u22db\ufe00",
"gesles;": u"\u2a94",
"gfr;": u"\U0001d524",
"gg;": u"\u226b",
"ggg;": u"\u22d9",
"gimel;": u"\u2137",
"gjcy;": u"\u0453",
"gl;": u"\u2277",
"glE;": u"\u2a92",
"gla;": u"\u2aa5",
"glj;": u"\u2aa4",
"gnE;": u"\u2269",
"gnap;": u"\u2a8a",
"gnapprox;": u"\u2a8a",
"gne;": u"\u2a88",
"gneq;": u"\u2a88",
"gneqq;": u"\u2269",
"gnsim;": u"\u22e7",
"gopf;": u"\U0001d558",
"grave;": u"`",
"gscr;": u"\u210a",
"gsim;": u"\u2273",
"gsime;": u"\u2a8e",
"gsiml;": u"\u2a90",
"gt": u">",
"gt;": u">",
"gtcc;": u"\u2aa7",
"gtcir;": u"\u2a7a",
"gtdot;": u"\u22d7",
"gtlPar;": u"\u2995",
"gtquest;": u"\u2a7c",
"gtrapprox;": u"\u2a86",
"gtrarr;": u"\u2978",
"gtrdot;": u"\u22d7",
"gtreqless;": u"\u22db",
"gtreqqless;": u"\u2a8c",
"gtrless;": u"\u2277",
"gtrsim;": u"\u2273",
"gvertneqq;": u"\u2269\ufe00",
"gvnE;": u"\u2269\ufe00",
"hArr;": u"\u21d4",
"hairsp;": u"\u200a",
"half;": u"\xbd",
"hamilt;": u"\u210b",
"hardcy;": u"\u044a",
"harr;": u"\u2194",
"harrcir;": u"\u2948",
"harrw;": u"\u21ad",
"hbar;": u"\u210f",
"hcirc;": u"\u0125",
"hearts;": u"\u2665",
"heartsuit;": u"\u2665",
"hellip;": u"\u2026",
"hercon;": u"\u22b9",
"hfr;": u"\U0001d525",
"hksearow;": u"\u2925",
"hkswarow;": u"\u2926",
"hoarr;": u"\u21ff",
"homtht;": u"\u223b",
"hookleftarrow;": u"\u21a9",
"hookrightarrow;": u"\u21aa",
"hopf;": u"\U0001d559",
"horbar;": u"\u2015",
"hscr;": u"\U0001d4bd",
"hslash;": u"\u210f",
"hstrok;": u"\u0127",
"hybull;": u"\u2043",
"hyphen;": u"\u2010",
"iacute": u"\xed",
"iacute;": u"\xed",
"ic;": u"\u2063",
"icirc": u"\xee",
"icirc;": u"\xee",
"icy;": u"\u0438",
"iecy;": u"\u0435",
"iexcl": u"\xa1",
"iexcl;": u"\xa1",
"iff;": u"\u21d4",
"ifr;": u"\U0001d526",
"igrave": u"\xec",
"igrave;": u"\xec",
"ii;": u"\u2148",
"iiiint;": u"\u2a0c",
"iiint;": u"\u222d",
"iinfin;": u"\u29dc",
"iiota;": u"\u2129",
"ijlig;": u"\u0133",
"imacr;": u"\u012b",
"image;": u"\u2111",
"imagline;": u"\u2110",
"imagpart;": u"\u2111",
"imath;": u"\u0131",
"imof;": u"\u22b7",
"imped;": u"\u01b5",
"in;": u"\u2208",
"incare;": u"\u2105",
"infin;": u"\u221e",
"infintie;": u"\u29dd",
"inodot;": u"\u0131",
"int;": u"\u222b",
"intcal;": u"\u22ba",
"integers;": u"\u2124",
"intercal;": u"\u22ba",
"intlarhk;": u"\u2a17",
"intprod;": u"\u2a3c",
"iocy;": u"\u0451",
"iogon;": u"\u012f",
"iopf;": u"\U0001d55a",
"iota;": u"\u03b9",
"iprod;": u"\u2a3c",
"iquest": u"\xbf",
"iquest;": u"\xbf",
"iscr;": u"\U0001d4be",
"isin;": u"\u2208",
"isinE;": u"\u22f9",
"isindot;": u"\u22f5",
"isins;": u"\u22f4",
"isinsv;": u"\u22f3",
"isinv;": u"\u2208",
"it;": u"\u2062",
"itilde;": u"\u0129",
"iukcy;": u"\u0456",
"iuml": u"\xef",
"iuml;": u"\xef",
"jcirc;": u"\u0135",
"jcy;": u"\u0439",
"jfr;": u"\U0001d527",
"jmath;": u"\u0237",
"jopf;": u"\U0001d55b",
"jscr;": u"\U0001d4bf",
"jsercy;": u"\u0458",
"jukcy;": u"\u0454",
"kappa;": u"\u03ba",
"kappav;": u"\u03f0",
"kcedil;": u"\u0137",
"kcy;": u"\u043a",
"kfr;": u"\U0001d528",
"kgreen;": u"\u0138",
"khcy;": u"\u0445",
"kjcy;": u"\u045c",
"kopf;": u"\U0001d55c",
"kscr;": u"\U0001d4c0",
"lAarr;": u"\u21da",
"lArr;": u"\u21d0",
"lAtail;": u"\u291b",
"lBarr;": u"\u290e",
"lE;": u"\u2266",
"lEg;": u"\u2a8b",
"lHar;": u"\u2962",
"lacute;": u"\u013a",
"laemptyv;": u"\u29b4",
"lagran;": u"\u2112",
"lambda;": u"\u03bb",
"lang;": u"\u27e8",
"langd;": u"\u2991",
"langle;": u"\u27e8",
"lap;": u"\u2a85",
"laquo": u"\xab",
"laquo;": u"\xab",
"larr;": u"\u2190",
"larrb;": u"\u21e4",
"larrbfs;": u"\u291f",
"larrfs;": u"\u291d",
"larrhk;": u"\u21a9",
"larrlp;": u"\u21ab",
"larrpl;": u"\u2939",
"larrsim;": u"\u2973",
"larrtl;": u"\u21a2",
"lat;": u"\u2aab",
"latail;": u"\u2919",
"late;": u"\u2aad",
"lates;": u"\u2aad\ufe00",
"lbarr;": u"\u290c",
"lbbrk;": u"\u2772",
"lbrace;": u"{",
"lbrack;": u"[",
"lbrke;": u"\u298b",
"lbrksld;": u"\u298f",
"lbrkslu;": u"\u298d",
"lcaron;": u"\u013e",
"lcedil;": u"\u013c",
"lceil;": u"\u2308",
"lcub;": u"{",
"lcy;": u"\u043b",
"ldca;": u"\u2936",
"ldquo;": u"\u201c",
"ldquor;": u"\u201e",
"ldrdhar;": u"\u2967",
"ldrushar;": u"\u294b",
"ldsh;": u"\u21b2",
"le;": u"\u2264",
"leftarrow;": u"\u2190",
"leftarrowtail;": u"\u21a2",
"leftharpoondown;": u"\u21bd",
"leftharpoonup;": u"\u21bc",
"leftleftarrows;": u"\u21c7",
"leftrightarrow;": u"\u2194",
"leftrightarrows;": u"\u21c6",
"leftrightharpoons;": u"\u21cb",
"leftrightsquigarrow;": u"\u21ad",
"leftthreetimes;": u"\u22cb",
"leg;": u"\u22da",
"leq;": u"\u2264",
"leqq;": u"\u2266",
"leqslant;": u"\u2a7d",
"les;": u"\u2a7d",
"lescc;": u"\u2aa8",
"lesdot;": u"\u2a7f",
"lesdoto;": u"\u2a81",
"lesdotor;": u"\u2a83",
"lesg;": u"\u22da\ufe00",
"lesges;": u"\u2a93",
"lessapprox;": u"\u2a85",
"lessdot;": u"\u22d6",
"lesseqgtr;": u"\u22da",
"lesseqqgtr;": u"\u2a8b",
"lessgtr;": u"\u2276",
"lesssim;": u"\u2272",
"lfisht;": u"\u297c",
"lfloor;": u"\u230a",
"lfr;": u"\U0001d529",
"lg;": u"\u2276",
"lgE;": u"\u2a91",
"lhard;": u"\u21bd",
"lharu;": u"\u21bc",
"lharul;": u"\u296a",
"lhblk;": u"\u2584",
"ljcy;": u"\u0459",
"ll;": u"\u226a",
"llarr;": u"\u21c7",
"llcorner;": u"\u231e",
"llhard;": u"\u296b",
"lltri;": u"\u25fa",
"lmidot;": u"\u0140",
"lmoust;": u"\u23b0",
"lmoustache;": u"\u23b0",
"lnE;": u"\u2268",
"lnap;": u"\u2a89",
"lnapprox;": u"\u2a89",
"lne;": u"\u2a87",
"lneq;": u"\u2a87",
"lneqq;": u"\u2268",
"lnsim;": u"\u22e6",
"loang;": u"\u27ec",
"loarr;": u"\u21fd",
"lobrk;": u"\u27e6",
"longleftarrow;": u"\u27f5",
"longleftrightarrow;": u"\u27f7",
"longmapsto;": u"\u27fc",
"longrightarrow;": u"\u27f6",
"looparrowleft;": u"\u21ab",
"looparrowright;": u"\u21ac",
"lopar;": u"\u2985",
"lopf;": u"\U0001d55d",
"loplus;": u"\u2a2d",
"lotimes;": u"\u2a34",
"lowast;": u"\u2217",
"lowbar;": u"_",
"loz;": u"\u25ca",
"lozenge;": u"\u25ca",
"lozf;": u"\u29eb",
"lpar;": u"(",
"lparlt;": u"\u2993",
"lrarr;": u"\u21c6",
"lrcorner;": u"\u231f",
"lrhar;": u"\u21cb",
"lrhard;": u"\u296d",
"lrm;": u"\u200e",
"lrtri;": u"\u22bf",
"lsaquo;": u"\u2039",
"lscr;": u"\U0001d4c1",
"lsh;": u"\u21b0",
"lsim;": u"\u2272",
"lsime;": u"\u2a8d",
"lsimg;": u"\u2a8f",
"lsqb;": u"[",
"lsquo;": u"\u2018",
"lsquor;": u"\u201a",
"lstrok;": u"\u0142",
"lt": u"<",
"lt;": u"<",
"ltcc;": u"\u2aa6",
"ltcir;": u"\u2a79",
"ltdot;": u"\u22d6",
"lthree;": u"\u22cb",
"ltimes;": u"\u22c9",
"ltlarr;": u"\u2976",
"ltquest;": u"\u2a7b",
"ltrPar;": u"\u2996",
"ltri;": u"\u25c3",
"ltrie;": u"\u22b4",
"ltrif;": u"\u25c2",
"lurdshar;": u"\u294a",
"luruhar;": u"\u2966",
"lvertneqq;": u"\u2268\ufe00",
"lvnE;": u"\u2268\ufe00",
"mDDot;": u"\u223a",
"macr": u"\xaf",
"macr;": u"\xaf",
"male;": u"\u2642",
"malt;": u"\u2720",
"maltese;": u"\u2720",
"map;": u"\u21a6",
"mapsto;": u"\u21a6",
"mapstodown;": u"\u21a7",
"mapstoleft;": u"\u21a4",
"mapstoup;": u"\u21a5",
"marker;": u"\u25ae",
"mcomma;": u"\u2a29",
"mcy;": u"\u043c",
"mdash;": u"\u2014",
"measuredangle;": u"\u2221",
"mfr;": u"\U0001d52a",
"mho;": u"\u2127",
"micro": u"\xb5",
"micro;": u"\xb5",
"mid;": u"\u2223",
"midast;": u"*",
"midcir;": u"\u2af0",
"middot": u"\xb7",
"middot;": u"\xb7",
"minus;": u"\u2212",
"minusb;": u"\u229f",
"minusd;": u"\u2238",
"minusdu;": u"\u2a2a",
"mlcp;": u"\u2adb",
"mldr;": u"\u2026",
"mnplus;": u"\u2213",
"models;": u"\u22a7",
"mopf;": u"\U0001d55e",
"mp;": u"\u2213",
"mscr;": u"\U0001d4c2",
"mstpos;": u"\u223e",
"mu;": u"\u03bc",
"multimap;": u"\u22b8",
"mumap;": u"\u22b8",
"nGg;": u"\u22d9\u0338",
"nGt;": u"\u226b\u20d2",
"nGtv;": u"\u226b\u0338",
"nLeftarrow;": u"\u21cd",
"nLeftrightarrow;": u"\u21ce",
"nLl;": u"\u22d8\u0338",
"nLt;": u"\u226a\u20d2",
"nLtv;": u"\u226a\u0338",
"nRightarrow;": u"\u21cf",
"nVDash;": u"\u22af",
"nVdash;": u"\u22ae",
"nabla;": u"\u2207",
"nacute;": u"\u0144",
"nang;": u"\u2220\u20d2",
"nap;": u"\u2249",
"napE;": u"\u2a70\u0338",
"napid;": u"\u224b\u0338",
"napos;": u"\u0149",
"napprox;": u"\u2249",
"natur;": u"\u266e",
"natural;": u"\u266e",
"naturals;": u"\u2115",
"nbsp": u"\xa0",
"nbsp;": u"\xa0",
"nbump;": u"\u224e\u0338",
"nbumpe;": u"\u224f\u0338",
"ncap;": u"\u2a43",
"ncaron;": u"\u0148",
"ncedil;": u"\u0146",
"ncong;": u"\u2247",
"ncongdot;": u"\u2a6d\u0338",
"ncup;": u"\u2a42",
"ncy;": u"\u043d",
"ndash;": u"\u2013",
"ne;": u"\u2260",
"neArr;": u"\u21d7",
"nearhk;": u"\u2924",
"nearr;": u"\u2197",
"nearrow;": u"\u2197",
"nedot;": u"\u2250\u0338",
"nequiv;": u"\u2262",
"nesear;": u"\u2928",
"nesim;": u"\u2242\u0338",
"nexist;": u"\u2204",
"nexists;": u"\u2204",
"nfr;": u"\U0001d52b",
"ngE;": u"\u2267\u0338",
"nge;": u"\u2271",
"ngeq;": u"\u2271",
"ngeqq;": u"\u2267\u0338",
"ngeqslant;": u"\u2a7e\u0338",
"nges;": u"\u2a7e\u0338",
"ngsim;": u"\u2275",
"ngt;": u"\u226f",
"ngtr;": u"\u226f",
"nhArr;": u"\u21ce",
"nharr;": u"\u21ae",
"nhpar;": u"\u2af2",
"ni;": u"\u220b",
"nis;": u"\u22fc",
"nisd;": u"\u22fa",
"niv;": u"\u220b",
"njcy;": u"\u045a",
"nlArr;": u"\u21cd",
"nlE;": u"\u2266\u0338",
"nlarr;": u"\u219a",
"nldr;": u"\u2025",
"nle;": u"\u2270",
"nleftarrow;": u"\u219a",
"nleftrightarrow;": u"\u21ae",
"nleq;": u"\u2270",
"nleqq;": u"\u2266\u0338",
"nleqslant;": u"\u2a7d\u0338",
"nles;": u"\u2a7d\u0338",
"nless;": u"\u226e",
"nlsim;": u"\u2274",
"nlt;": u"\u226e",
"nltri;": u"\u22ea",
"nltrie;": u"\u22ec",
"nmid;": u"\u2224",
"nopf;": u"\U0001d55f",
"not": u"\xac",
"not;": u"\xac",
"notin;": u"\u2209",
"notinE;": u"\u22f9\u0338",
"notindot;": u"\u22f5\u0338",
"notinva;": u"\u2209",
"notinvb;": u"\u22f7",
"notinvc;": u"\u22f6",
"notni;": u"\u220c",
"notniva;": u"\u220c",
"notnivb;": u"\u22fe",
"notnivc;": u"\u22fd",
"npar;": u"\u2226",
"nparallel;": u"\u2226",
"nparsl;": u"\u2afd\u20e5",
"npart;": u"\u2202\u0338",
"npolint;": u"\u2a14",
"npr;": u"\u2280",
"nprcue;": u"\u22e0",
"npre;": u"\u2aaf\u0338",
"nprec;": u"\u2280",
"npreceq;": u"\u2aaf\u0338",
"nrArr;": u"\u21cf",
"nrarr;": u"\u219b",
"nrarrc;": u"\u2933\u0338",
"nrarrw;": u"\u219d\u0338",
"nrightarrow;": u"\u219b",
"nrtri;": u"\u22eb",
"nrtrie;": u"\u22ed",
"nsc;": u"\u2281",
"nsccue;": u"\u22e1",
"nsce;": u"\u2ab0\u0338",
"nscr;": u"\U0001d4c3",
"nshortmid;": u"\u2224",
"nshortparallel;": u"\u2226",
"nsim;": u"\u2241",
"nsime;": u"\u2244",
"nsimeq;": u"\u2244",
"nsmid;": u"\u2224",
"nspar;": u"\u2226",
"nsqsube;": u"\u22e2",
"nsqsupe;": u"\u22e3",
"nsub;": u"\u2284",
"nsubE;": u"\u2ac5\u0338",
"nsube;": u"\u2288",
"nsubset;": u"\u2282\u20d2",
"nsubseteq;": u"\u2288",
"nsubseteqq;": u"\u2ac5\u0338",
"nsucc;": u"\u2281",
"nsucceq;": u"\u2ab0\u0338",
"nsup;": u"\u2285",
"nsupE;": u"\u2ac6\u0338",
"nsupe;": u"\u2289",
"nsupset;": u"\u2283\u20d2",
"nsupseteq;": u"\u2289",
"nsupseteqq;": u"\u2ac6\u0338",
"ntgl;": u"\u2279",
"ntilde": u"\xf1",
"ntilde;": u"\xf1",
"ntlg;": u"\u2278",
"ntriangleleft;": u"\u22ea",
"ntrianglelefteq;": u"\u22ec",
"ntriangleright;": u"\u22eb",
"ntrianglerighteq;": u"\u22ed",
"nu;": u"\u03bd",
"num;": u"#",
"numero;": u"\u2116",
"numsp;": u"\u2007",
"nvDash;": u"\u22ad",
"nvHarr;": u"\u2904",
"nvap;": u"\u224d\u20d2",
"nvdash;": u"\u22ac",
"nvge;": u"\u2265\u20d2",
"nvgt;": u">\u20d2",
"nvinfin;": u"\u29de",
"nvlArr;": u"\u2902",
"nvle;": u"\u2264\u20d2",
"nvlt;": u"<\u20d2",
"nvltrie;": u"\u22b4\u20d2",
"nvrArr;": u"\u2903",
"nvrtrie;": u"\u22b5\u20d2",
"nvsim;": u"\u223c\u20d2",
"nwArr;": u"\u21d6",
"nwarhk;": u"\u2923",
"nwarr;": u"\u2196",
"nwarrow;": u"\u2196",
"nwnear;": u"\u2927",
"oS;": u"\u24c8",
"oacute": u"\xf3",
"oacute;": u"\xf3",
"oast;": u"\u229b",
"ocir;": u"\u229a",
"ocirc": u"\xf4",
"ocirc;": u"\xf4",
"ocy;": u"\u043e",
"odash;": u"\u229d",
"odblac;": u"\u0151",
"odiv;": u"\u2a38",
"odot;": u"\u2299",
"odsold;": u"\u29bc",
"oelig;": u"\u0153",
"ofcir;": u"\u29bf",
"ofr;": u"\U0001d52c",
"ogon;": u"\u02db",
"ograve": u"\xf2",
"ograve;": u"\xf2",
"ogt;": u"\u29c1",
"ohbar;": u"\u29b5",
"ohm;": u"\u03a9",
"oint;": u"\u222e",
"olarr;": u"\u21ba",
"olcir;": u"\u29be",
"olcross;": u"\u29bb",
"oline;": u"\u203e",
"olt;": u"\u29c0",
"omacr;": u"\u014d",
"omega;": u"\u03c9",
"omicron;": u"\u03bf",
"omid;": u"\u29b6",
"ominus;": u"\u2296",
"oopf;": u"\U0001d560",
"opar;": u"\u29b7",
"operp;": u"\u29b9",
"oplus;": u"\u2295",
"or;": u"\u2228",
"orarr;": u"\u21bb",
"ord;": u"\u2a5d",
"order;": u"\u2134",
"orderof;": u"\u2134",
"ordf": u"\xaa",
"ordf;": u"\xaa",
"ordm": u"\xba",
"ordm;": u"\xba",
"origof;": u"\u22b6",
"oror;": u"\u2a56",
"orslope;": u"\u2a57",
"orv;": u"\u2a5b",
"oscr;": u"\u2134",
"oslash": u"\xf8",
"oslash;": u"\xf8",
"osol;": u"\u2298",
"otilde": u"\xf5",
"otilde;": u"\xf5",
"otimes;": u"\u2297",
"otimesas;": u"\u2a36",
"ouml": u"\xf6",
"ouml;": u"\xf6",
"ovbar;": u"\u233d",
"par;": u"\u2225",
"para": u"\xb6",
"para;": u"\xb6",
"parallel;": u"\u2225",
"parsim;": u"\u2af3",
"parsl;": u"\u2afd",
"part;": u"\u2202",
"pcy;": u"\u043f",
"percnt;": u"%",
"period;": u".",
"permil;": u"\u2030",
"perp;": u"\u22a5",
"pertenk;": u"\u2031",
"pfr;": u"\U0001d52d",
"phi;": u"\u03c6",
"phiv;": u"\u03d5",
"phmmat;": u"\u2133",
"phone;": u"\u260e",
"pi;": u"\u03c0",
"pitchfork;": u"\u22d4",
"piv;": u"\u03d6",
"planck;": u"\u210f",
"planckh;": u"\u210e",
"plankv;": u"\u210f",
"plus;": u"+",
"plusacir;": u"\u2a23",
"plusb;": u"\u229e",
"pluscir;": u"\u2a22",
"plusdo;": u"\u2214",
"plusdu;": u"\u2a25",
"pluse;": u"\u2a72",
"plusmn": u"\xb1",
"plusmn;": u"\xb1",
"plussim;": u"\u2a26",
"plustwo;": u"\u2a27",
"pm;": u"\xb1",
"pointint;": u"\u2a15",
"popf;": u"\U0001d561",
"pound": u"\xa3",
"pound;": u"\xa3",
"pr;": u"\u227a",
"prE;": u"\u2ab3",
"prap;": u"\u2ab7",
"prcue;": u"\u227c",
"pre;": u"\u2aaf",
"prec;": u"\u227a",
"precapprox;": u"\u2ab7",
"preccurlyeq;": u"\u227c",
"preceq;": u"\u2aaf",
"precnapprox;": u"\u2ab9",
"precneqq;": u"\u2ab5",
"precnsim;": u"\u22e8",
"precsim;": u"\u227e",
"prime;": u"\u2032",
"primes;": u"\u2119",
"prnE;": u"\u2ab5",
"prnap;": u"\u2ab9",
"prnsim;": u"\u22e8",
"prod;": u"\u220f",
"profalar;": u"\u232e",
"profline;": u"\u2312",
"profsurf;": u"\u2313",
"prop;": u"\u221d",
"propto;": u"\u221d",
"prsim;": u"\u227e",
"prurel;": u"\u22b0",
"pscr;": u"\U0001d4c5",
"psi;": u"\u03c8",
"puncsp;": u"\u2008",
"qfr;": u"\U0001d52e",
"qint;": u"\u2a0c",
"qopf;": u"\U0001d562",
"qprime;": u"\u2057",
"qscr;": u"\U0001d4c6",
"quaternions;": u"\u210d",
"quatint;": u"\u2a16",
"quest;": u"?",
"questeq;": u"\u225f",
"quot": u"\"",
"quot;": u"\"",
"rAarr;": u"\u21db",
"rArr;": u"\u21d2",
"rAtail;": u"\u291c",
"rBarr;": u"\u290f",
"rHar;": u"\u2964",
"race;": u"\u223d\u0331",
"racute;": u"\u0155",
"radic;": u"\u221a",
"raemptyv;": u"\u29b3",
"rang;": u"\u27e9",
"rangd;": u"\u2992",
"range;": u"\u29a5",
"rangle;": u"\u27e9",
"raquo": u"\xbb",
"raquo;": u"\xbb",
"rarr;": u"\u2192",
"rarrap;": u"\u2975",
"rarrb;": u"\u21e5",
"rarrbfs;": u"\u2920",
"rarrc;": u"\u2933",
"rarrfs;": u"\u291e",
"rarrhk;": u"\u21aa",
"rarrlp;": u"\u21ac",
"rarrpl;": u"\u2945",
"rarrsim;": u"\u2974",
"rarrtl;": u"\u21a3",
"rarrw;": u"\u219d",
"ratail;": u"\u291a",
"ratio;": u"\u2236",
"rationals;": u"\u211a",
"rbarr;": u"\u290d",
"rbbrk;": u"\u2773",
"rbrace;": u"}",
"rbrack;": u"]",
"rbrke;": u"\u298c",
"rbrksld;": u"\u298e",
"rbrkslu;": u"\u2990",
"rcaron;": u"\u0159",
"rcedil;": u"\u0157",
"rceil;": u"\u2309",
"rcub;": u"}",
"rcy;": u"\u0440",
"rdca;": u"\u2937",
"rdldhar;": u"\u2969",
"rdquo;": u"\u201d",
"rdquor;": u"\u201d",
"rdsh;": u"\u21b3",
"real;": u"\u211c",
"realine;": u"\u211b",
"realpart;": u"\u211c",
"reals;": u"\u211d",
"rect;": u"\u25ad",
"reg": u"\xae",
"reg;": u"\xae",
"rfisht;": u"\u297d",
"rfloor;": u"\u230b",
"rfr;": u"\U0001d52f",
"rhard;": u"\u21c1",
"rharu;": u"\u21c0",
"rharul;": u"\u296c",
"rho;": u"\u03c1",
"rhov;": u"\u03f1",
"rightarrow;": u"\u2192",
"rightarrowtail;": u"\u21a3",
"rightharpoondown;": u"\u21c1",
"rightharpoonup;": u"\u21c0",
"rightleftarrows;": u"\u21c4",
"rightleftharpoons;": u"\u21cc",
"rightrightarrows;": u"\u21c9",
"rightsquigarrow;": u"\u219d",
"rightthreetimes;": u"\u22cc",
"ring;": u"\u02da",
"risingdotseq;": u"\u2253",
"rlarr;": u"\u21c4",
"rlhar;": u"\u21cc",
"rlm;": u"\u200f",
"rmoust;": u"\u23b1",
"rmoustache;": u"\u23b1",
"rnmid;": u"\u2aee",
"roang;": u"\u27ed",
"roarr;": u"\u21fe",
"robrk;": u"\u27e7",
"ropar;": u"\u2986",
"ropf;": u"\U0001d563",
"roplus;": u"\u2a2e",
"rotimes;": u"\u2a35",
"rpar;": u")",
"rpargt;": u"\u2994",
"rppolint;": u"\u2a12",
"rrarr;": u"\u21c9",
"rsaquo;": u"\u203a",
"rscr;": u"\U0001d4c7",
"rsh;": u"\u21b1",
"rsqb;": u"]",
"rsquo;": u"\u2019",
"rsquor;": u"\u2019",
"rthree;": u"\u22cc",
"rtimes;": u"\u22ca",
"rtri;": u"\u25b9",
"rtrie;": u"\u22b5",
"rtrif;": u"\u25b8",
"rtriltri;": u"\u29ce",
"ruluhar;": u"\u2968",
"rx;": u"\u211e",
"sacute;": u"\u015b",
"sbquo;": u"\u201a",
"sc;": u"\u227b",
"scE;": u"\u2ab4",
"scap;": u"\u2ab8",
"scaron;": u"\u0161",
"sccue;": u"\u227d",
"sce;": u"\u2ab0",
"scedil;": u"\u015f",
"scirc;": u"\u015d",
"scnE;": u"\u2ab6",
"scnap;": u"\u2aba",
"scnsim;": u"\u22e9",
"scpolint;": u"\u2a13",
"scsim;": u"\u227f",
"scy;": u"\u0441",
"sdot;": u"\u22c5",
"sdotb;": u"\u22a1",
"sdote;": u"\u2a66",
"seArr;": u"\u21d8",
"searhk;": u"\u2925",
"searr;": u"\u2198",
"searrow;": u"\u2198",
"sect": u"\xa7",
"sect;": u"\xa7",
"semi;": u";",
"seswar;": u"\u2929",
"setminus;": u"\u2216",
"setmn;": u"\u2216",
"sext;": u"\u2736",
"sfr;": u"\U0001d530",
"sfrown;": u"\u2322",
"sharp;": u"\u266f",
"shchcy;": u"\u0449",
"shcy;": u"\u0448",
"shortmid;": u"\u2223",
"shortparallel;": u"\u2225",
"shy": u"\xad",
"shy;": u"\xad",
"sigma;": u"\u03c3",
"sigmaf;": u"\u03c2",
"sigmav;": u"\u03c2",
"sim;": u"\u223c",
"simdot;": u"\u2a6a",
"sime;": u"\u2243",
"simeq;": u"\u2243",
"simg;": u"\u2a9e",
"simgE;": u"\u2aa0",
"siml;": u"\u2a9d",
"simlE;": u"\u2a9f",
"simne;": u"\u2246",
"simplus;": u"\u2a24",
"simrarr;": u"\u2972",
"slarr;": u"\u2190",
"smallsetminus;": u"\u2216",
"smashp;": u"\u2a33",
"smeparsl;": u"\u29e4",
"smid;": u"\u2223",
"smile;": u"\u2323",
"smt;": u"\u2aaa",
"smte;": u"\u2aac",
"smtes;": u"\u2aac\ufe00",
"softcy;": u"\u044c",
"sol;": u"/",
"solb;": u"\u29c4",
"solbar;": u"\u233f",
"sopf;": u"\U0001d564",
"spades;": u"\u2660",
"spadesuit;": u"\u2660",
"spar;": u"\u2225",
"sqcap;": u"\u2293",
"sqcaps;": u"\u2293\ufe00",
"sqcup;": u"\u2294",
"sqcups;": u"\u2294\ufe00",
"sqsub;": u"\u228f",
"sqsube;": u"\u2291",
"sqsubset;": u"\u228f",
"sqsubseteq;": u"\u2291",
"sqsup;": u"\u2290",
"sqsupe;": u"\u2292",
"sqsupset;": u"\u2290",
"sqsupseteq;": u"\u2292",
"squ;": u"\u25a1",
"square;": u"\u25a1",
"squarf;": u"\u25aa",
"squf;": u"\u25aa",
"srarr;": u"\u2192",
"sscr;": u"\U0001d4c8",
"ssetmn;": u"\u2216",
"ssmile;": u"\u2323",
"sstarf;": u"\u22c6",
"star;": u"\u2606",
"starf;": u"\u2605",
"straightepsilon;": u"\u03f5",
"straightphi;": u"\u03d5",
"strns;": u"\xaf",
"sub;": u"\u2282",
"subE;": u"\u2ac5",
"subdot;": u"\u2abd",
"sube;": u"\u2286",
"subedot;": u"\u2ac3",
"submult;": u"\u2ac1",
"subnE;": u"\u2acb",
"subne;": u"\u228a",
"subplus;": u"\u2abf",
"subrarr;": u"\u2979",
"subset;": u"\u2282",
"subseteq;": u"\u2286",
"subseteqq;": u"\u2ac5",
"subsetneq;": u"\u228a",
"subsetneqq;": u"\u2acb",
"subsim;": u"\u2ac7",
"subsub;": u"\u2ad5",
"subsup;": u"\u2ad3",
"succ;": u"\u227b",
"succapprox;": u"\u2ab8",
"succcurlyeq;": u"\u227d",
"succeq;": u"\u2ab0",
"succnapprox;": u"\u2aba",
"succneqq;": u"\u2ab6",
"succnsim;": u"\u22e9",
"succsim;": u"\u227f",
"sum;": u"\u2211",
"sung;": u"\u266a",
"sup1": u"\xb9",
"sup1;": u"\xb9",
"sup2": u"\xb2",
"sup2;": u"\xb2",
"sup3": u"\xb3",
"sup3;": u"\xb3",
"sup;": u"\u2283",
"supE;": u"\u2ac6",
"supdot;": u"\u2abe",
"supdsub;": u"\u2ad8",
"supe;": u"\u2287",
"supedot;": u"\u2ac4",
"suphsol;": u"\u27c9",
"suphsub;": u"\u2ad7",
"suplarr;": u"\u297b",
"supmult;": u"\u2ac2",
"supnE;": u"\u2acc",
"supne;": u"\u228b",
"supplus;": u"\u2ac0",
"supset;": u"\u2283",
"supseteq;": u"\u2287",
"supseteqq;": u"\u2ac6",
"supsetneq;": u"\u228b",
"supsetneqq;": u"\u2acc",
"supsim;": u"\u2ac8",
"supsub;": u"\u2ad4",
"supsup;": u"\u2ad6",
"swArr;": u"\u21d9",
"swarhk;": u"\u2926",
"swarr;": u"\u2199",
"swarrow;": u"\u2199",
"swnwar;": u"\u292a",
"szlig": u"\xdf",
"szlig;": u"\xdf",
"target;": u"\u2316",
"tau;": u"\u03c4",
"tbrk;": u"\u23b4",
"tcaron;": u"\u0165",
"tcedil;": u"\u0163",
"tcy;": u"\u0442",
"tdot;": u"\u20db",
"telrec;": u"\u2315",
"tfr;": u"\U0001d531",
"there4;": u"\u2234",
"therefore;": u"\u2234",
"theta;": u"\u03b8",
"thetasym;": u"\u03d1",
"thetav;": u"\u03d1",
"thickapprox;": u"\u2248",
"thicksim;": u"\u223c",
"thinsp;": u"\u2009",
"thkap;": u"\u2248",
"thksim;": u"\u223c",
"thorn": u"\xfe",
"thorn;": u"\xfe",
"tilde;": u"\u02dc",
"times": u"\xd7",
"times;": u"\xd7",
"timesb;": u"\u22a0",
"timesbar;": u"\u2a31",
"timesd;": u"\u2a30",
"tint;": u"\u222d",
"toea;": u"\u2928",
"top;": u"\u22a4",
"topbot;": u"\u2336",
"topcir;": u"\u2af1",
"topf;": u"\U0001d565",
"topfork;": u"\u2ada",
"tosa;": u"\u2929",
"tprime;": u"\u2034",
"trade;": u"\u2122",
"triangle;": u"\u25b5",
"triangledown;": u"\u25bf",
"triangleleft;": u"\u25c3",
"trianglelefteq;": u"\u22b4",
"triangleq;": u"\u225c",
"triangleright;": u"\u25b9",
"trianglerighteq;": u"\u22b5",
"tridot;": u"\u25ec",
"trie;": u"\u225c",
"triminus;": u"\u2a3a",
"triplus;": u"\u2a39",
"trisb;": u"\u29cd",
"tritime;": u"\u2a3b",
"trpezium;": u"\u23e2",
"tscr;": u"\U0001d4c9",
"tscy;": u"\u0446",
"tshcy;": u"\u045b",
"tstrok;": u"\u0167",
"twixt;": u"\u226c",
"twoheadleftarrow;": u"\u219e",
"twoheadrightarrow;": u"\u21a0",
"uArr;": u"\u21d1",
"uHar;": u"\u2963",
"uacute": u"\xfa",
"uacute;": u"\xfa",
"uarr;": u"\u2191",
"ubrcy;": u"\u045e",
"ubreve;": u"\u016d",
"ucirc": u"\xfb",
"ucirc;": u"\xfb",
"ucy;": u"\u0443",
"udarr;": u"\u21c5",
"udblac;": u"\u0171",
"udhar;": u"\u296e",
"ufisht;": u"\u297e",
"ufr;": u"\U0001d532",
"ugrave": u"\xf9",
"ugrave;": u"\xf9",
"uharl;": u"\u21bf",
"uharr;": u"\u21be",
"uhblk;": u"\u2580",
"ulcorn;": u"\u231c",
"ulcorner;": u"\u231c",
"ulcrop;": u"\u230f",
"ultri;": u"\u25f8",
"umacr;": u"\u016b",
"uml": u"\xa8",
"uml;": u"\xa8",
"uogon;": u"\u0173",
"uopf;": u"\U0001d566",
"uparrow;": u"\u2191",
"updownarrow;": u"\u2195",
"upharpoonleft;": u"\u21bf",
"upharpoonright;": u"\u21be",
"uplus;": u"\u228e",
"upsi;": u"\u03c5",
"upsih;": u"\u03d2",
"upsilon;": u"\u03c5",
"upuparrows;": u"\u21c8",
"urcorn;": u"\u231d",
"urcorner;": u"\u231d",
"urcrop;": u"\u230e",
"uring;": u"\u016f",
"urtri;": u"\u25f9",
"uscr;": u"\U0001d4ca",
"utdot;": u"\u22f0",
"utilde;": u"\u0169",
"utri;": u"\u25b5",
"utrif;": u"\u25b4",
"uuarr;": u"\u21c8",
"uuml": u"\xfc",
"uuml;": u"\xfc",
"uwangle;": u"\u29a7",
"vArr;": u"\u21d5",
"vBar;": u"\u2ae8",
"vBarv;": u"\u2ae9",
"vDash;": u"\u22a8",
"vangrt;": u"\u299c",
"varepsilon;": u"\u03f5",
"varkappa;": u"\u03f0",
"varnothing;": u"\u2205",
"varphi;": u"\u03d5",
"varpi;": u"\u03d6",
"varpropto;": u"\u221d",
"varr;": u"\u2195",
"varrho;": u"\u03f1",
"varsigma;": u"\u03c2",
"varsubsetneq;": u"\u228a\ufe00",
"varsubsetneqq;": u"\u2acb\ufe00",
"varsupsetneq;": u"\u228b\ufe00",
"varsupsetneqq;": u"\u2acc\ufe00",
"vartheta;": u"\u03d1",
"vartriangleleft;": u"\u22b2",
"vartriangleright;": u"\u22b3",
"vcy;": u"\u0432",
"vdash;": u"\u22a2",
"vee;": u"\u2228",
"veebar;": u"\u22bb",
"veeeq;": u"\u225a",
"vellip;": u"\u22ee",
"verbar;": u"|",
"vert;": u"|",
"vfr;": u"\U0001d533",
"vltri;": u"\u22b2",
"vnsub;": u"\u2282\u20d2",
"vnsup;": u"\u2283\u20d2",
"vopf;": u"\U0001d567",
"vprop;": u"\u221d",
"vrtri;": u"\u22b3",
"vscr;": u"\U0001d4cb",
"vsubnE;": u"\u2acb\ufe00",
"vsubne;": u"\u228a\ufe00",
"vsupnE;": u"\u2acc\ufe00",
"vsupne;": u"\u228b\ufe00",
"vzigzag;": u"\u299a",
"wcirc;": u"\u0175",
"wedbar;": u"\u2a5f",
"wedge;": u"\u2227",
"wedgeq;": u"\u2259",
"weierp;": u"\u2118",
"wfr;": u"\U0001d534",
"wopf;": u"\U0001d568",
"wp;": u"\u2118",
"wr;": u"\u2240",
"wreath;": u"\u2240",
"wscr;": u"\U0001d4cc",
"xcap;": u"\u22c2",
"xcirc;": u"\u25ef",
"xcup;": u"\u22c3",
"xdtri;": u"\u25bd",
"xfr;": u"\U0001d535",
"xhArr;": u"\u27fa",
"xharr;": u"\u27f7",
"xi;": u"\u03be",
"xlArr;": u"\u27f8",
"xlarr;": u"\u27f5",
"xmap;": u"\u27fc",
"xnis;": u"\u22fb",
"xodot;": u"\u2a00",
"xopf;": u"\U0001d569",
"xoplus;": u"\u2a01",
"xotime;": u"\u2a02",
"xrArr;": u"\u27f9",
"xrarr;": u"\u27f6",
"xscr;": u"\U0001d4cd",
"xsqcup;": u"\u2a06",
"xuplus;": u"\u2a04",
"xutri;": u"\u25b3",
"xvee;": u"\u22c1",
"xwedge;": u"\u22c0",
"yacute": u"\xfd",
"yacute;": u"\xfd",
"yacy;": u"\u044f",
"ycirc;": u"\u0177",
"ycy;": u"\u044b",
"yen": u"\xa5",
"yen;": u"\xa5",
"yfr;": u"\U0001d536",
"yicy;": u"\u0457",
"yopf;": u"\U0001d56a",
"yscr;": u"\U0001d4ce",
"yucy;": u"\u044e",
"yuml": u"\xff",
"yuml;": u"\xff",
"zacute;": u"\u017a",
"zcaron;": u"\u017e",
"zcy;": u"\u0437",
"zdot;": u"\u017c",
"zeetrf;": u"\u2128",
"zeta;": u"\u03b6",
"zfr;": u"\U0001d537",
"zhcy;": u"\u0436",
"zigrarr;": u"\u21dd",
"zopf;": u"\U0001d56b",
"zscr;": u"\U0001d4cf",
"zwj;": u"\u200d",
"zwnj;": u"\u200c",
}
replacementCharacters = {
0x0:u"\uFFFD",
0x0d:u"\u000D",
0x80:u"\u20AC",
0x81:u"\u0081",
0x81:u"\u0081",
0x82:u"\u201A",
0x83:u"\u0192",
0x84:u"\u201E",
0x85:u"\u2026",
0x86:u"\u2020",
0x87:u"\u2021",
0x88:u"\u02C6",
0x89:u"\u2030",
0x8A:u"\u0160",
0x8B:u"\u2039",
0x8C:u"\u0152",
0x8D:u"\u008D",
0x8E:u"\u017D",
0x8F:u"\u008F",
0x90:u"\u0090",
0x91:u"\u2018",
0x92:u"\u2019",
0x93:u"\u201C",
0x94:u"\u201D",
0x95:u"\u2022",
0x96:u"\u2013",
0x97:u"\u2014",
0x98:u"\u02DC",
0x99:u"\u2122",
0x9A:u"\u0161",
0x9B:u"\u203A",
0x9C:u"\u0153",
0x9D:u"\u009D",
0x9E:u"\u017E",
0x9F:u"\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype":0,
"Characters":1,
"SpaceCharacters":2,
"StartTag":3,
"EndTag":4,
"EmptyTag":5,
"Comment":6,
"ParseError":7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v,k) for k,v in namespaces.iteritems()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| WebGL-master | resources/html5lib/src/html5lib/constants.py |
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
any
except:
# Implement 'any' for python 2.4 and previous
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
"abc".startswith(("a", "b"))
def startswithany(str, prefixes):
return str.startswith(prefixes)
except:
# Python 2.4 doesn't accept a tuple as argument to string startswith
def startswithany(str, prefixes):
for prefix in prefixes:
if str.startswith(prefix):
return True
return False
import sys
import types
import inputstream
import tokenizer
import treebuilders
from treebuilders._base import Marker
from treebuilders import simpletree
import utils
import constants
from constants import spaceCharacters, asciiUpper2Lower
from constants import formattingElements, specialElements
from constants import headingElements, tableInsertModeElements
from constants import cdataElements, rcdataElements, voidElements
from constants import tokenTypes, ReparseException, namespaces, spaceCharacters
from constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
def parse(doc, treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.iteritems():
if type(attribute) == types.FunctionType:
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree = simpletree.TreeBuilder,
tokenizer = tokenizer.HTMLTokenizer, strict = False,
namespaceHTMLElements = True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).iteritems()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException, e:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] #only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token= phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name":token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl":u"definitionURL"}
for k,v in replacements.iteritems():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename":u"attributeName",
"attributetype":u"attributeType",
"basefrequency":u"baseFrequency",
"baseprofile":u"baseProfile",
"calcmode":u"calcMode",
"clippathunits":u"clipPathUnits",
"contentscripttype":u"contentScriptType",
"contentstyletype":u"contentStyleType",
"diffuseconstant":u"diffuseConstant",
"edgemode":u"edgeMode",
"externalresourcesrequired":u"externalResourcesRequired",
"filterres":u"filterRes",
"filterunits":u"filterUnits",
"glyphref":u"glyphRef",
"gradienttransform":u"gradientTransform",
"gradientunits":u"gradientUnits",
"kernelmatrix":u"kernelMatrix",
"kernelunitlength":u"kernelUnitLength",
"keypoints":u"keyPoints",
"keysplines":u"keySplines",
"keytimes":u"keyTimes",
"lengthadjust":u"lengthAdjust",
"limitingconeangle":u"limitingConeAngle",
"markerheight":u"markerHeight",
"markerunits":u"markerUnits",
"markerwidth":u"markerWidth",
"maskcontentunits":u"maskContentUnits",
"maskunits":u"maskUnits",
"numoctaves":u"numOctaves",
"pathlength":u"pathLength",
"patterncontentunits":u"patternContentUnits",
"patterntransform":u"patternTransform",
"patternunits":u"patternUnits",
"pointsatx":u"pointsAtX",
"pointsaty":u"pointsAtY",
"pointsatz":u"pointsAtZ",
"preservealpha":u"preserveAlpha",
"preserveaspectratio":u"preserveAspectRatio",
"primitiveunits":u"primitiveUnits",
"refx":u"refX",
"refy":u"refY",
"repeatcount":u"repeatCount",
"repeatdur":u"repeatDur",
"requiredextensions":u"requiredExtensions",
"requiredfeatures":u"requiredFeatures",
"specularconstant":u"specularConstant",
"specularexponent":u"specularExponent",
"spreadmethod":u"spreadMethod",
"startoffset":u"startOffset",
"stddeviation":u"stdDeviation",
"stitchtiles":u"stitchTiles",
"surfacescale":u"surfaceScale",
"systemlanguage":u"systemLanguage",
"tablevalues":u"tableValues",
"targetx":u"targetX",
"targety":u"targetY",
"textlength":u"textLength",
"viewbox":u"viewBox",
"viewtarget":u"viewTarget",
"xchannelselector":u"xChannelSelector",
"ychannelselector":u"yChannelSelector",
"zoomandpan":u"zoomAndPan"
}
for originalName in token["data"].keys():
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = {
"xlink:actuate":("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole":("xlink", "arcrole", namespaces["xlink"]),
"xlink:href":("xlink", "href", namespaces["xlink"]),
"xlink:role":("xlink", "role", namespaces["xlink"]),
"xlink:show":("xlink", "show", namespaces["xlink"]),
"xlink:title":("xlink", "title", namespaces["xlink"]),
"xlink:type":("xlink", "type", namespaces["xlink"]),
"xml:base":("xml", "base", namespaces["xml"]),
"xml:lang":("xml", "lang", namespaces["xml"]),
"xml:space":("xml", "space", namespaces["xml"]),
"xmlns":(None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink":("xmlns", "xlink", namespaces["xmlns"])
}
for originalName in token["data"].iterkeys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select":"inSelect",
"td":"inCell",
"th":"inCell",
"tr":"inRow",
"tbody":"inTableBody",
"thead":"inTableBody",
"tfoot":"inTableBody",
"caption":"inCaption",
"colgroup":"inColumnGroup",
"table":"inTable",
"head":"inBody",
"body":"inBody",
"frameset":"inFrameset",
"html":"beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
element = self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.iteritems())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type":type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(object):
"""Base class for helper object that implements each phase of processing
"""
# Order should be (they can be omitted):
# * EOF
# * Comment
# * Doctype
# * SpaceCharacters
# * Characters
# * StartTag
# - startTag* methods
# * EndTag
# - endTag* methods
__metaclass__ = getMetaclass(debug, log)
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if self.parser.firstStartTag == False and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId != None or
systemId != None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or startswithany(publicId,
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or startswithany(publicId,
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId == None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (startswithany(publicId,
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or startswithany(publicId,
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId != None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF (self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif "content" in attributes:
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
#Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s"%node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name":token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name":token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
#Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext",self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"),self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body",self.endTagBody),
("html",self.endTagHtml),
(("address", "article", "aside", "blockquote", "center",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p",self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
#Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == u"\u0000":
#The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
#This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError(u"unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li":["li"],
"dt":["dt", "dd"],
"dd":["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
#input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = u"This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type":tokenTypes["Characters"], "data":prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes = attributes,
selfClosing =
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
#Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
#We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
#Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name":"form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude = token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://www.whatwg.org/specs/web-apps/current-work/#adoptionAgency
# XXX Better parseError messages appreciated.
name = token["name"]
outerLoopCounter = 0
while outerLoopCounter < 8:
outerLoopCounter += 1
# Step 1 paragraph 1
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
self.parser.parseError("adoption-agency-1.1", {"name": token["name"]})
return
# Step 1 paragraph 2
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Step 1 paragraph 3
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 2
# Start of the adoption agency algorithm proper
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 3
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
commonAncestor = self.tree.openElements[afeIndex-1]
# Step 5
#if furthestBlock.parent:
# furthestBlock.parent.removeChild(furthestBlock)
# Step 5
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 12. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 7.4
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 6
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 6.3
if node == formattingElement:
break
# Step 6.4
if lastNode == furthestBlock:
bookmark = (self.tree.activeFormattingElements.index(node)
+ 1)
# Step 6.5
#cite = node.parent
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 6.6
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 7.7
lastNode = node
# End of inner loop
# Step 7
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster parent the
# lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 8
clone = formattingElement.cloneNode()
# Step 9
furthestBlock.reparentChildren(clone)
# Step 10
furthestBlock.appendChild(clone)
# Step 11
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 12
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
self.tree.openElements[-1].name)
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode"%token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
#The rest of this method is all stuff that only happens if
#document.write works
def endTagOther(self, token):
node = self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
#Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
#If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type":tokenTypes["Characters"], "data":data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == u"\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
#pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
#XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == u"\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {u"altglyph":u"altGlyph",
u"altglyphdef":u"altGlyphDef",
u"altglyphitem":u"altGlyphItem",
u"animatecolor":u"animateColor",
u"animatemotion":u"animateMotion",
u"animatetransform":u"animateTransform",
u"clippath":u"clipPath",
u"feblend":u"feBlend",
u"fecolormatrix":u"feColorMatrix",
u"fecomponenttransfer":u"feComponentTransfer",
u"fecomposite":u"feComposite",
u"feconvolvematrix":u"feConvolveMatrix",
u"fediffuselighting":u"feDiffuseLighting",
u"fedisplacementmap":u"feDisplacementMap",
u"fedistantlight":u"feDistantLight",
u"feflood":u"feFlood",
u"fefunca":u"feFuncA",
u"fefuncb":u"feFuncB",
u"fefuncg":u"feFuncG",
u"fefuncr":u"feFuncR",
u"fegaussianblur":u"feGaussianBlur",
u"feimage":u"feImage",
u"femerge":u"feMerge",
u"femergenode":u"feMergeNode",
u"femorphology":u"feMorphology",
u"feoffset":u"feOffset",
u"fepointlight":u"fePointLight",
u"fespecularlighting":u"feSpecularLighting",
u"fespotlight":u"feSpotLight",
u"fetile":u"feTile",
u"feturbulence":u"feTurbulence",
u"foreignobject":u"foreignObject",
u"glyphref":u"glyphRef",
u"lineargradient":u"linearGradient",
u"radialgradient":u"radialGradient",
u"textpath":u"textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == u"\u0000":
token["data"] = u"\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
token["name"])
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", token["name"])
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
#XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self,name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes = None,
selfClosing = False):
if attributes is None:
attributes = {}
return {"type":tokenTypes[type], "name":unicode(name), "data":attributes,
"selfClosing":selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| WebGL-master | resources/html5lib/src/html5lib/html5parser.py |
"""
HTML parsing library based on the WHATWG "HTML5"
specification. The parser is designed to be compatible with existing
HTML found in the wild and implements well-defined error recovery that
is largely compatible with modern desktop web browsers.
Example usage:
import html5lib
f = open("my_document.html")
tree = html5lib.parse(f)
"""
__version__ = "0.95-dev"
from html5parser import HTMLParser, parse, parseFragment
from treebuilders import getTreeBuilder
from treewalkers import getTreeWalker
from serializer import serialize
| WebGL-master | resources/html5lib/src/html5lib/__init__.py |
import re
baseChar = """[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309A"""
digit = """[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | [#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
#Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1]*2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i+j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i+j][1]
j += 1
i += j
return rv
#We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1]+1, charList[i+1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(unichr(item[0])))
else:
rv.append(escapeRegexp(unichr(item[0])) + "-" +
escapeRegexp(unichr(item[1])))
return "[%s]"%"".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
if char in string:
print string
return string
#output from the above
nonXmlNameBMPRegexp = re.compile(u'[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile(u'[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars = None,
dropXmlnsLocalName = False,
dropXmlnsAttrNs = False,
preventDoubleDashComments = False,
preventDashAtCommentEnd = False,
replaceFormFeedCharacters = True):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
#Need a datalosswarning here
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
data = data.replace("\x0C", " ")
#Other non-xml characters
return data
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U" + hex(ord(char))[2:].upper().rjust(5, "0")
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return unichr(int(charcode[1:], 16))
| WebGL-master | resources/html5lib/src/html5lib/ihatexml.py |
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
from collections import deque
except ImportError:
from utils import deque
from constants import spaceCharacters
from constants import entitiesWindows1252, entities
from constants import asciiLowercase, asciiLetters, asciiUpper2Lower
from constants import digits, hexDigits, EOF
from constants import tokenTypes, tagTokenTypes
from constants import replacementCharacters
from inputstream import HTMLInputStream
# Group entities by their first character, for faster lookups
entitiesByFirstChar = {}
for e in entities:
entitiesByFirstChar.setdefault(e[0], []).append(e)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
#Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = u"\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
#Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = unichr(charAsInt)
except ValueError:
char = eval("u'\\U%08x'" % charAsInt)
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != u";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = u"&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, u"<", u"&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == u"#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in (u"x", u"X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
filteredEntityList = entitiesByFirstChar.get(charStack[0], [])
def entitiesStartingWith(name):
return [e for e in filteredEntityList if e.startswith(name)]
while (charStack[-1] is not EOF and
entitiesStartingWith("".join(charStack))):
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
entityName = None
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
for entityLength in xrange(len(charStack)-1, 1, -1):
possibleEntityName = "".join(charStack[:entityLength])
if possibleEntityName in entities:
entityName = possibleEntityName
break
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += u"".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":"invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil((u"&", u"<", u"\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil((u"&", u"<"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil((u"<", u"\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil((u"<", u"\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil(u"\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == u"!":
self.state = self.markupDeclarationOpenState
elif data == u"/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == u">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<>"})
self.state = self.dataState
elif data == u"?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing":False}
self.state = self.tagNameState
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += u"\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil((u"<", u"-", u"\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u">"})
self.state = self.scriptDataState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u">"})
self.state = self.scriptDataState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data in (u"'", u'"', u"=", u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append([u"\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == u"=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == u">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += u"\uFFFD"
leavingThisState = False
elif data in (u"'", u'"', u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"=":
self.state = self.beforeAttributeValueState
elif data == u">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append([u"\uFFFD", ""])
self.state = self.attributeNameState
elif data in (u"'", u'"', u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"\"":
self.state = self.attributeValueDoubleQuotedState
elif data == u"&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data);
elif data == u"'":
self.state = self.attributeValueSingleQuotedState
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += u"\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in (u"=", u"<", u"`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == u"&":
self.processEntityInAttribute(u'"')
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += u"\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", u"&"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == u"&":
self.processEntityInAttribute(u"'")
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += u"\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", u"&"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u"&":
self.processEntityInAttribute(">")
elif data == u">":
self.emitCurrentToken()
elif data in (u'"', u"'", u"=", u"<", u"`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += u"\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset((u"&", u">", u'"', u"'", u"=", u"<", u"`")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-soldius-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(u">")
data = data.replace(u"\u0000", u"\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == u"-":
charStack.append(self.stream.char())
if charStack[-1] == u"-":
self.currentToken = {"type": tokenTypes["Comment"], "data": u""}
self.state = self.commentStartState
return True
elif charStack[-1] in (u'd', u'D'):
matched = True
for expected in ((u'o', u'O'), (u'c', u'C'), (u't', u'T'),
(u'y', u'Y'), (u'p', u'P'), (u'e', u'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": u"",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += u"\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += u"-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == u"-":
self.state = self.commentEndDashState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += u"\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil((u"-", u"\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == u"-":
self.state = self.commentEndState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += u"-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += u"-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += u"--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == u"-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += u"--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += u"--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += u"--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = u"\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == u">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += u"\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in (u"p", u"P"):
matched = True
for expected in ((u"u", u"U"), (u"b", u"B"), (u"l", u"L"),
(u"i", u"I"), (u"c", u"C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in (u"s", u"S"):
matched = True
for expected in ((u"y", u"Y"), (u"s", u"S"), (u"t", u"T"),
(u"e", u"E"), (u"m", u"M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = u""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = u""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += u"\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += u"\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += u"\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == u"\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += u"\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil(u"]"))
charStack = []
for expected in ["]", "]", ">"]:
charStack.append(self.stream.char())
matched = True
if charStack[-1] == EOF:
data.extend(charStack[:-1])
break
elif charStack[-1] != expected:
matched = False
data.extend(charStack)
break
if matched:
break
data = "".join(data)
#Deal with null here rather than in the parser
nullCount = data.count(u"\u0000")
if nullCount > 0:
for i in xrange(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace(u"\u0000", u"\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| WebGL-master | resources/html5lib/src/html5lib/tokenizer.py |
try:
frozenset
except NameError:
#Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name,value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
#Pure python implementation of deque taken from the ASPN Python Cookbook
#Original code by Raymond Hettinger
class deque(object):
def __init__(self, iterable=(), maxsize=-1):
if not hasattr(self, 'data'):
self.left = self.right = 0
self.data = {}
self.maxsize = maxsize
self.extend(iterable)
def append(self, x):
self.data[self.right] = x
self.right += 1
if self.maxsize != -1 and len(self) > self.maxsize:
self.popleft()
def appendleft(self, x):
self.left -= 1
self.data[self.left] = x
if self.maxsize != -1 and len(self) > self.maxsize:
self.pop()
def pop(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
self.right -= 1
elem = self.data[self.right]
del self.data[self.right]
return elem
def popleft(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
elem = self.data[self.left]
del self.data[self.left]
self.left += 1
return elem
def clear(self):
self.data.clear()
self.left = self.right = 0
def extend(self, iterable):
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
for elem in iterable:
self.appendleft(elem)
def rotate(self, n=1):
if self:
n %= len(self)
for i in xrange(n):
self.appendleft(self.pop())
def __getitem__(self, i):
if i < 0:
i += len(self)
try:
return self.data[i + self.left]
except KeyError:
raise IndexError
def __setitem__(self, i, value):
if i < 0:
i += len(self)
try:
self.data[i + self.left] = value
except KeyError:
raise IndexError
def __delitem__(self, i):
size = len(self)
if not (-size <= i < size):
raise IndexError
data = self.data
if i < 0:
i += size
for j in xrange(self.left+i, self.right-1):
data[j] = data[j+1]
self.pop()
def __len__(self):
return self.right - self.left
def __cmp__(self, other):
if type(self) != type(other):
return cmp(type(self), type(other))
return cmp(list(self), list(other))
def __repr__(self, _track=[]):
if id(self) in _track:
return '...'
_track.append(id(self))
r = 'deque(%r)' % (list(self),)
_track.remove(id(self))
return r
def __getstate__(self):
return (tuple(self),)
def __setstate__(self, s):
self.__init__(s[0])
def __hash__(self):
raise TypeError
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo={}):
from copy import deepcopy
result = self.__class__()
memo[id(self)] = result
result.__init__(deepcopy(tuple(self), memo))
return result
#Some utility functions to dal with weirdness around UCS2 vs UCS4
#python builds
def encodingType():
if len() == 2:
return "UCS2"
else:
return "UCS4"
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
| WebGL-master | resources/html5lib/src/html5lib/utils.py |
class Filter(object):
def __init__(self, source):
self.source = source
def __iter__(self):
return iter(self.source)
def __getattr__(self, name):
return getattr(self.source, name)
| WebGL-master | resources/html5lib/src/html5lib/filters/_base.py |
import _base
from html5lib.sanitizer import HTMLSanitizerMixin
class Filter(_base.Filter, HTMLSanitizerMixin):
def __iter__(self):
for token in _base.Filter.__iter__(self):
token = self.sanitize_token(token)
if token: yield token
| WebGL-master | resources/html5lib/src/html5lib/filters/sanitizer.py |
import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == u"head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == u"meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace,name),value in token["data"].iteritems():
if namespace != None:
continue
elif name.lower() == u'charset':
token["data"][(namespace,name)] = self.encoding
meta_found = True
break
elif name == u'http-equiv' and value.lower() == u'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, u"content") in token["data"]:
token["data"][(None, u"content")] = u'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == u"head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": u"head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": u"meta",
"data": {(None, u"charset"): self.encoding}}
yield {"type": "EndTag", "name": u"head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == u"head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": u"meta",
"data": {(None, u"charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
| WebGL-master | resources/html5lib/src/html5lib/filters/inject_meta_charset.py |
WebGL-master | resources/html5lib/src/html5lib/filters/__init__.py |
|
#
# The goal is to finally have a form filler where you pass data for
# each form, using the algorithm for "Seeding a form with initial values"
# See http://www.whatwg.org/specs/web-forms/current-work/#seeding
#
import _base
from html5lib.constants import spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
class SimpleFilter(_base.Filter):
def __init__(self, source, fieldStorage):
_base.Filter.__init__(self, source)
self.fieldStorage = fieldStorage
def __iter__(self):
field_indices = {}
state = None
field_name = None
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"].lower()
if name == "input":
field_name = None
field_type = None
input_value_index = -1
input_checked_index = -1
for i,(n,v) in enumerate(token["data"]):
n = n.lower()
if n == u"name":
field_name = v.strip(spaceCharacters)
elif n == u"type":
field_type = v.strip(spaceCharacters)
elif n == u"checked":
input_checked_index = i
elif n == u"value":
input_value_index = i
value_list = self.fieldStorage.getlist(field_name)
field_index = field_indices.setdefault(field_name, 0)
if field_index < len(value_list):
value = value_list[field_index]
else:
value = ""
if field_type in (u"checkbox", u"radio"):
if value_list:
if token["data"][input_value_index][1] == value:
if input_checked_index < 0:
token["data"].append((u"checked", u""))
field_indices[field_name] = field_index + 1
elif input_checked_index >= 0:
del token["data"][input_checked_index]
elif field_type not in (u"button", u"submit", u"reset"):
if input_value_index >= 0:
token["data"][input_value_index] = (u"value", value)
else:
token["data"].append((u"value", value))
field_indices[field_name] = field_index + 1
field_type = None
field_name = None
elif name == "textarea":
field_type = "textarea"
field_name = dict((token["data"])[::-1])["name"]
elif name == "select":
field_type = "select"
attributes = dict(token["data"][::-1])
field_name = attributes.get("name")
is_select_multiple = "multiple" in attributes
is_selected_option_found = False
elif field_type == "select" and field_name and name == "option":
option_selected_index = -1
option_value = None
for i,(n,v) in enumerate(token["data"]):
n = n.lower()
if n == "selected":
option_selected_index = i
elif n == "value":
option_value = v.strip(spaceCharacters)
if option_value is None:
raise NotImplementedError("<option>s without a value= attribute")
else:
value_list = self.fieldStorage.getlist(field_name)
if value_list:
field_index = field_indices.setdefault(field_name, 0)
if field_index < len(value_list):
value = value_list[field_index]
else:
value = ""
if (is_select_multiple or not is_selected_option_found) and option_value == value:
if option_selected_index < 0:
token["data"].append((u"selected", u""))
field_indices[field_name] = field_index + 1
is_selected_option_found = True
elif option_selected_index >= 0:
del token["data"][option_selected_index]
elif field_type is not None and field_name and type == "EndTag":
name = token["name"].lower()
if name == field_type:
if name == "textarea":
value_list = self.fieldStorage.getlist(field_name)
if value_list:
field_index = field_indices.setdefault(field_name, 0)
if field_index < len(value_list):
value = value_list[field_index]
else:
value = ""
yield {"type": "Characters", "data": value}
field_indices[field_name] = field_index + 1
field_name = None
elif name == "option" and field_type == "select":
pass # TODO: part of "option without value= attribute" processing
elif field_type == "textarea":
continue # ignore token
yield token
| WebGL-master | resources/html5lib/src/html5lib/filters/formfiller.py |
import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody','thead','tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| WebGL-master | resources/html5lib/src/html5lib/filters/optionaltags.py |
from gettext import gettext
_ = gettext
import _base
from html5lib.constants import cdataElements, rcdataElements, voidElements
from html5lib.constants import spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
class LintError(Exception): pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %s") % name)
if not isinstance(name, unicode):
raise LintError(_(u"Tag name is not a string: %r") % name)
if not name:
raise LintError(_(u"Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_(u"Void element reported as StartTag token: %s") % name)
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_(u"Non-void element reported as EmptyTag token: %s") % token["name"])
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, unicode):
raise LintError(_("Attribute name is not a string: %r") % name)
if not name:
raise LintError(_(u"Empty attribute name"))
if not isinstance(value, unicode):
raise LintError(_("Attribute value is not a string: %r") % value)
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, unicode):
raise LintError(_(u"Tag name is not a string: %r") % name)
if not name:
raise LintError(_(u"Empty tag name"))
if name in voidElements:
raise LintError(_(u"Void element reported as EndTag token: %s") % name)
start_name = open_elements.pop()
if start_name != name:
raise LintError(_(u"EndTag (%s) does not match StartTag (%s)") % (name, start_name))
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, unicode):
raise LintError(_("Attribute name is not a string: %r") % data)
if not data:
raise LintError(_(u"%s token with empty data") % type)
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_(u"Non-space character(s) found in SpaceCharacters token: ") % data)
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %s") % name)
if not isinstance(name, unicode):
raise LintError(_(u"Tag name is not a string: %r") % name)
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_(u"Unknown token type: %s") % type)
yield token
| WebGL-master | resources/html5lib/src/html5lib/filters/lint.py |
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import ImmutableSet as frozenset
import re
import _base
from html5lib.constants import rcdataElements, spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
SPACES_REGEX = re.compile(u"[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = u" "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| WebGL-master | resources/html5lib/src/html5lib/filters/whitespace.py |
import gettext
_ = gettext.gettext
from html5lib.constants import voidElements, spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def normalizeAttrs(self, attrs):
newattrs = {}
if attrs:
#TODO: treewalkers should always have attrs
for (namespace,name),value in attrs.iteritems():
namespace = unicode(namespace) if namespace else None
name = unicode(name)
value = unicode(value)
newattrs[(namespace,name)] = value
return newattrs
def emptyTag(self, namespace, name, attrs, hasChildren=False):
yield {"type": "EmptyTag", "name": unicode(name),
"namespace":unicode(namespace),
"data": self.normalizeAttrs(attrs)}
if hasChildren:
yield self.error(_("Void element has children"))
def startTag(self, namespace, name, attrs):
return {"type": "StartTag",
"name": unicode(name),
"namespace":unicode(namespace),
"data": self.normalizeAttrs(attrs)}
def endTag(self, namespace, name):
return {"type": "EndTag",
"name": unicode(name),
"namespace":unicode(namespace),
"data": {}}
def text(self, data):
data = unicode(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data)-len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
return {"type": "Comment", "data": unicode(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
return {"type": "Doctype",
"name": name is not None and unicode(name) or u"",
"publicId": publicId,
"systemId": systemId,
"correct": correct}
def entity(self, name):
return {"type": "Entity", "name": unicode(name)}
def unknown(self, nodeType):
return self.error(_("Unknown node type: ") + nodeType)
class RecursiveTreeWalker(TreeWalker):
def walkChildren(self, node):
raise NodeImplementedError
def element(self, node, namespace, name, attrs, hasChildren):
if name in voidElements:
for token in self.emptyTag(namespace, name, attrs, hasChildren):
yield token
else:
yield self.startTag(name, attrs)
if hasChildren:
for token in self.walkChildren(node):
yield token
yield self.endTag(name)
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
endTag = None
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
endTag = name
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/_base.py |
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
import _base
from html5lib.constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in node.attributes.keys():
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI,attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/pulldom.py |
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from genshi.output import NamespaceFlattener
import _base
from html5lib.constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
depth = 0
ignore_until = None
previous = None
for event in self.tree:
if previous is not None:
if previous[0] == START:
depth += 1
if ignore_until <= depth:
ignore_until = None
if ignore_until is None:
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = depth
if previous[0] == END:
depth -= 1
previous = event
if previous is not None:
if ignore_until is None or ignore_until <= depth:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attrib = data
name = tag.localname
namespace = tag.namespace
if tag in voidElements:
for token in self.emptyTag(namespace, name, list(attrib),
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, list(attrib))
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/genshistream.py |
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"beautifulsoup" - Beautiful soup (if installed)
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom", "simpletree"):
mod = __import__(treeType, globals())
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "beautifulsoup":
import soup
treeWalkerCache[treeType] = soup.TreeWalker
elif treeType == "lxml":
import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
import etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/__init__.py |
import re
import gettext
_ = gettext.gettext
from BeautifulSoup import BeautifulSoup, Declaration, Comment, Tag
from html5lib.constants import namespaces
import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
doctype_regexp = re.compile(
r'DOCTYPE\s+(?P<name>[^\s]*)(\s*PUBLIC\s*"(?P<publicId>.*)"\s*"(?P<systemId1>.*)"|\s*SYSTEM\s*"(?P<systemId2>.*)")?')
def getNodeDetails(self, node):
if isinstance(node, BeautifulSoup): # Document or DocumentFragment
return (_base.DOCUMENT,)
elif isinstance(node, Declaration): # DocumentType
string = unicode(node.string)
#Slice needed to remove markup added during unicode conversion,
#but only in some versions of BeautifulSoup/Python
if string.startswith('<!') and string.endswith('>'):
string = string[2:-1]
m = self.doctype_regexp.match(string)
#This regexp approach seems wrong and fragile
#but beautiful soup stores the doctype as a single thing and we want the seperate bits
#It should work as long as the tree is created by html5lib itself but may be wrong if it's
#been modified at all
#We could just feed to it a html5lib tokenizer, I guess...
assert m is not None, "DOCTYPE did not match expected format"
name = m.group('name')
publicId = m.group('publicId')
if publicId is not None:
systemId = m.group('systemId1')
else:
systemId = m.group('systemId2')
return _base.DOCTYPE, name, publicId or "", systemId or ""
elif isinstance(node, Comment):
string = unicode(node.string)
if string.startswith('<!--') and string.endswith('-->'):
string = string[4:-3]
return _base.COMMENT, string
elif isinstance(node, unicode): # TextNode
return _base.TEXT, node
elif isinstance(node, Tag): # Element
return (_base.ELEMENT, namespaces["html"], node.name,
dict(node.attrs).items(), node.contents)
else:
return _base.UNKNOWN, node.__class__.__name__
def getFirstChild(self, node):
return node.contents[0]
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parent
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/soup.py |
from lxml import etree
from html5lib.treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
import _base
from html5lib.constants import voidElements
from html5lib import ihatexml
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self, et.docinfo.root_name,
et.docinfo.public_id,
et.docinfo.system_url))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = self.obj.text
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = self.obj.tail
else:
self.tail = None
self.isstring = isinstance(obj, basestring)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __nonzero__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return unicode(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, getattr(node, key)
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node
elif node.tag == etree.Comment:
return _base.COMMENT, node.text
elif node.tag == etree.Entity:
return _base.ENTITY, node.text[1:-1] # strip &;
else:
#This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = {}
for name, value in node.attrib.items():
match = tag_regexp.match(name)
if match:
attrs[(match.group(1),match.group(2))] = value
else:
attrs[(None,name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return node.tail and (node, "tail") or node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/lxmletree.py |
import gettext
_ = gettext.gettext
try:
from types import ModuleType
except:
from new import module as ModuleType
import copy
import re
import _base
from html5lib.constants import voidElements
tag_regexp = re.compile("{([^}]*)}(.*)")
moduleCache = {}
def getETreeModule(ElementTreeImplementation):
name = "_" + ElementTreeImplementation.__name__+"builder"
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType("_" + ElementTreeImplementation.__name__+"builder")
objs = getETreeBuilder(ElementTreeImplementation)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("<DOCUMENT_ROOT>", "<DOCUMENT_FRAGMENT>"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTree.Comment:
return _base.COMMENT, node.text
else:
assert type(node.tag) in (str, unicode), type(node.tag)
#This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = {}
for name, value in node.attrib.items():
match = tag_regexp.match(name)
if match:
attrs[(match.group(1),match.group(2))] = value
else:
attrs[(None,name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key+1], key+1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/etree.py |
import gettext
_ = gettext.gettext
import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given that simpletree has no performant way of getting a node's
next sibling, this implementation returns "nodes" as tuples with the
following content:
1. The parent Node (Element, Document or DocumentFragment)
2. The child index of the current node in its parent's children list
3. A list used as a stack of all ancestors. It is a pair tuple whose
first item is a parent Node and second item is a child index.
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Node
parent, idx, parents = node
node = parent.childNodes[idx]
# testing node.type allows us not to import treebuilders.simpletree
if node.type in (1, 2): # Document or DocumentFragment
return (_base.DOCUMENT,)
elif node.type == 3: # DocumentType
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.type == 4: # TextNode
return _base.TEXT, node.value
elif node.type == 5: # Element
attrs = {}
for name, value in node.attributes.items():
if isinstance(name, tuple):
attrs[(name[2],name[1])] = value
else:
attrs[(None,name)] = value
return (_base.ELEMENT, node.namespace, node.name,
attrs, node.hasContent())
elif node.type == 6: # CommentNode
return _base.COMMENT, node.data
else:
return _node.UNKNOWN, node.type
def getFirstChild(self, node):
if isinstance(node, tuple): # It might be the root Node
parent, idx, parents = node
parents.append((parent, idx))
node = parent.childNodes[idx]
else:
parents = []
assert node.hasContent(), "Node has no children"
return (node, 0, parents)
def getNextSibling(self, node):
assert isinstance(node, tuple), "Node is not a tuple: " + str(node)
parent, idx, parents = node
idx += 1
if len(parent.childNodes) > idx:
return (parent, idx, parents)
else:
return None
def getParentNode(self, node):
assert isinstance(node, tuple)
parent, idx, parents = node
if parents:
parent, idx = parents.pop()
return parent, idx, parents
else:
# HACK: We could return ``parent`` but None will stop the algorithm the same way
return None
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/simpletree.py |
from xml.dom import Node
import gettext
_ = gettext.gettext
import _base
from html5lib.constants import voidElements
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in node.attributes.keys():
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI,attr.localName)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
| WebGL-master | resources/html5lib/src/html5lib/treewalkers/dom.py |
from htmlserializer import HTMLSerializer
class XHTMLSerializer(HTMLSerializer):
quote_attr_values = True
minimize_boolean_attributes = False
use_trailing_solidus = True
escape_lt_in_attrs = True
omit_optional_tags = False
escape_rcdata = True
| WebGL-master | resources/html5lib/src/html5lib/serializer/xhtmlserializer.py |
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import ImmutableSet as frozenset
import gettext
_ = gettext.gettext
from html5lib.constants import voidElements, booleanAttributes, spaceCharacters
from html5lib.constants import rcdataElements, entities, xmlEntities
from html5lib import utils
from xml.sax.saxutils import escape
spaceCharacters = u"".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
from html5lib.constants import entities
encode_entity_map = {}
is_ucs4 = len(u"\U0010FFFF") == 1
for k, v in entities.items():
#skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
try:
v = ord(v)
except:
print v
raise
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index+2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index+2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;"%(hex(cp)[2:]))
return (u"".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = u'"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"minimize_boolean_attributes", "use_trailing_solidus",
"space_before_trailing_solidus", "omit_optional_tags",
"strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs",
"escape_rcdata", "resolve_entities", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if kwargs.has_key('quote_char'):
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, unicode))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, unicode))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from html5lib.filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# XXX: WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from html5lib.filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from html5lib.filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from html5lib.filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = u"<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += u' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += u" SYSTEM"
if token["systemId"]:
if token["systemId"].find(u'"') >= 0:
if token["systemId"].find(u"'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = u"'"
else:
quote_char = u'"'
doctype += u" %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += u">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict(u"<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
attributes = []
for (attr_namespace,attr_name),attr_value in sorted(token["data"].items()):
#TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(u' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) \
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict(u"=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x,y: x or (y in v),
spaceCharacters + u">\"'=", False)
v = v.replace(u"&", u"&")
if self.escape_lt_in_attrs: v = v.replace(u"<", u"<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if u"'" in v and u'"' not in v:
quote_char = u'"'
elif u'"' in v and u"'" not in v:
quote_char = u"'"
if quote_char == u"'":
v = v.replace(u"'", u"'")
else:
v = v.replace(u'"', u""")
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(u" /")
else:
yield self.encodeStrict(u"/")
yield self.encode(u">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict(u"</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict(u"<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = u"&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return "".join(list(self.serialize(treewalker, encoding)))
else:
return u"".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| WebGL-master | resources/html5lib/src/html5lib/serializer/htmlserializer.py |
from html5lib import treewalkers
from htmlserializer import HTMLSerializer
from xhtmlserializer import XHTMLSerializer
def serialize(input, tree="simpletree", format="html", encoding=None,
**serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
if format == "html":
s = HTMLSerializer(**serializer_opts)
elif format == "xhtml":
s = XHTMLSerializer(**serializer_opts)
else:
raise ValueError, "type must be either html or xhtml"
return s.render(walker(input), encoding)
| WebGL-master | resources/html5lib/src/html5lib/serializer/__init__.py |
from html5lib.constants import scopingElements, tableInsertModeElements, namespaces
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __unicode__(self):
attributesStr = " ".join(["%s=\"%s\""%(name, value)
for name, value in
self.attributes.iteritems()])
if attributesStr:
return "<%s %s>"%(self.name,attributesStr)
else:
return "<%s>"%(self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
#XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
#Document class
documentClass = None
#The class to use for creating a node
elementClass = None
#The class to use for creating comments
commentClass = None
#The class to use for creating doctypes
doctypeClass = None
#Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
#XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
#If we pass a node in we match that. if we pass a string
#match any node with that name
exactNode = hasattr(target, "nameTuple")
listElementsMap = {
None:(scopingElements, False),
"button":(scopingElements | set([(namespaces["html"], "button")]), False),
"list":(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")]), False),
"table":(set([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select":(set([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
#This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() #Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type":"StartTag",
"name":clone.name,
"namespace":clone.namespace,
"data":clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert type(name) == unicode, "Element %s not unicode"%name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
#We should be in the InTable mode. This means we want to do
#special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable=None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
#assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
| WebGL-master | resources/html5lib/src/html5lib/treebuilders/_base.py |
import warnings
import re
import _base
from html5lib.constants import DataLossWarning
import html5lib.constants as constants
import etree as etree_builders
from html5lib import ihatexml
try:
import lxml.etree as etree
except ImportError:
pass
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
"""Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
finalText = None
filter = ihatexml.InfosetFilter()
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
#Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>"%element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">"""%(
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s"%(' '*(indent+2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent+2)
next_element = next_element.getnext()
elif isinstance(element, basestring):
#Text in a fragment
rv.append("|%s\"%s\""%(' '*indent, element))
else:
#Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent+2)
elif type(element.tag) == type(etree.Comment):
rv.append("|%s<!-- %s -->"%(' '*indent, element.text))
else:
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>"%(' '*indent, prefix,
filter.fromXmlName(tag)))
else:
rv.append("|%s<%s>"%(' '*indent,
filter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.iteritems():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = filter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s"%(prefix, name)
else:
attr_string = filter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' '*(indent+2), name, value))
if element.text:
rv.append("|%s\"%s\"" %(' '*(indent+2), element.text))
indent += 2
for child in element.getchildren():
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" %(' '*(indent-2), element.tail))
serializeElement(element, 0)
if finalText is not None:
rv.append("|%s\"%s\""%(' '*2, finalText))
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
finalText = None
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>"%element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif type(element.tag) == type(etree.Comment):
rv.append("<!--%s-->"%(element.text,))
else:
#This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>"%(element.tag,))
else:
attr = " ".join(["%s=\"%s\""%(name, value)
for name, value in element.attrib.iteritems()])
rv.append("<%s %s>"%(element.tag, attr))
if element.text:
rv.append(element.text)
for child in element.getchildren():
serializeElement(child)
rv.append("</%s>"%(element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
if finalText is not None:
rv.append("%s\""%(' '*2, finalText))
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
def __init__(self, namespaceHTMLElements, fullTree = False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
filter = self.filter = ihatexml.InfosetFilter()
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value={}):
self._element = element
dict.__init__(self, value)
for key, value in self.iteritems():
if isinstance(key, tuple):
name = "{%s}%s"%(key[2], filter.coerceAttribute(key[1]))
else:
name = filter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s"%(key[2], filter.coerceAttribute(key[1]))
else:
name = filter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = filter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = filter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return filter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = filter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = filter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = filter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = builder.Comment
#self.fragmentClass = builder.DocumentFragment
_base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
_base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(element.getchildren())
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name or ihatexml.nonXmlNameBMPRegexp.search(name) or name[0] == '"':
warnings.warn("lxml cannot represent null or non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(name, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
self.initial_comments.append(data)
def insertRoot(self, token):
"""Create the document root"""
#Because of the way libxml2 works, it doesn't seem to be possible to
#alter information like the doctype after the tree has been parsed.
#Therefore we need to use the built-in parser to create our iniial
#tree, after which we can add elements like normal
docStr = ""
if self.doctype and self.doctype.name and not self.doctype.name.startswith('"'):
docStr += "<!DOCTYPE %s"%self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += ' PUBLIC "%s" "%s"'%(self.doctype.publicId or "",
self.doctype.systemId or "")
docStr += ">"
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
try:
root = etree.fromstring(docStr)
except etree.XMLSyntaxError:
print docStr
raise
#Append the initial comments:
for comment_token in self.initial_comments:
root.addprevious(etree.Comment(comment_token["data"]))
#Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s"%(namespace, name)
root.tag = etree_tag
#Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
#Reset to the default insert comment function
self.insertComment = super(TreeBuilder, self).insertComment
| WebGL-master | resources/html5lib/src/html5lib/treebuilders/etree_lxml.py |
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.simpletree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
The supplied simpletree module provides a python-only implementation
of a full treebuilder and is a useful reference for the semantics of
the various methods.
"""
treeBuilderCache = {}
import sys
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation for the sake of
backwards compatibility (as releases up until 0.10 had a
builder called "dom" that was a minidom implemenation).
"etree" - A generic builder for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"beautifulsoup" - Beautiful soup (if installed)
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or lxml.etree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
import dom
# XXX: Keep backwards compatibility by using minidom if no implementation is given
if implementation == None:
from xml.dom import minidom
implementation = minidom
# XXX: NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "simpletree":
import simpletree
treeBuilderCache[treeType] = simpletree.TreeBuilder
elif treeType == "beautifulsoup":
import soup
treeBuilderCache[treeType] = soup.TreeBuilder
elif treeType == "lxml":
import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
# Come up with a sane default
if implementation == None:
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
implementation = ET
import etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """%treeType)
return treeBuilderCache.get(treeType)
| WebGL-master | resources/html5lib/src/html5lib/treebuilders/__init__.py |
import warnings
warnings.warn("BeautifulSoup 3.x (as of 3.1) is not fully compatible with html5lib and support will be removed in the future", DeprecationWarning)
from BeautifulSoup import BeautifulSoup, Tag, NavigableString, Comment, Declaration
import _base
from html5lib.constants import namespaces, DataLossWarning
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return self.attrs.items().__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return self.attrs.items()
def keys(self):
return self.attrs.keys()
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in self.attrs.keys()
def __eq__(self, other):
if len(self.keys()) != len(other.keys()):
return False
for item in self.keys():
if item not in other:
return False
if self[item] != other[item]:
return False
return True
class Element(_base.Node):
def __init__(self, element, soup, namespace):
_base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def _nodeIndex(self, node, refNode):
# Finds a node by identity rather than equality
for index in range(len(self.element.contents)):
if id(self.element.contents[index]) == id(refNode.element):
return index
return None
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# (TODO: This has O(n^2) performance, for input like "a</a>a</a>a</a>...")
newStr = NavigableString(self.element.contents[-1]+node.element)
# Remove the old text node
# (Can't simply use .extract() by itself, because it fails if
# an equal text node exists within the parent node)
oldElement = self.element.contents[-1]
del self.element.contents[-1]
oldElement.parent = None
oldElement.extract()
self.element.insert(len(self.element.contents), newStr)
else:
self.element.insert(len(self.element.contents), node.element)
node.parent = self
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in attributes.items():
self.element[name] = value
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(NavigableString(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self._nodeIndex(node, refNode)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
newStr = NavigableString(self.element.contents[index-1]+node.element)
oldNode = self.element.contents[index-1]
del self.element.contents[index-1]
oldNode.parent = None
oldNode.extract()
self.element.insert(index-1, newStr)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
index = self._nodeIndex(node.parent, node)
del node.parent.element.contents[index]
node.element.parent = None
node.element.extract()
node.parent = None
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(Element(child, self.soup, namespaces["html"]))
else:
newParent.appendChild(TextNode(child, self.soup))
def cloneNode(self):
node = Element(Tag(self.soup, self.element.name), self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
_base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
class TreeBuilder(_base.TreeBuilder):
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
warnings.warn("BeautifulSoup cannot represent elements in any namespace", DataLossWarning)
_base.TreeBuilder.__init__(self, namespaceHTMLElements)
def documentClass(self):
self.soup = BeautifulSoup("")
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if publicId:
self.soup.insert(0, Declaration("DOCTYPE %s PUBLIC \"%s\" \"%s\""%(name, publicId, systemId or "")))
elif systemId:
self.soup.insert(0, Declaration("DOCTYPE %s SYSTEM \"%s\""%
(name, systemId)))
else:
self.soup.insert(0, Declaration("DOCTYPE %s"%name))
def elementClass(self, name, namespace):
if namespace is not None:
warnings.warn("BeautifulSoup cannot represent elements in any namespace", DataLossWarning)
return Element(Tag(self.soup, name), self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
self.soup.insert(len(self.soup.contents), node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.soup
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def testSerializer(element):
import re
rv = []
def serializeElement(element, indent=0):
if isinstance(element, Declaration):
doctype_regexp = r'DOCTYPE\s+(?P<name>[^\s]*)( PUBLIC "(?P<publicId>.*)" "(?P<systemId1>.*)"| SYSTEM "(?P<systemId2>.*)")?'
m = re.compile(doctype_regexp).match(element.string)
assert m is not None, "DOCTYPE did not match expected format"
name = m.group('name')
publicId = m.group('publicId')
if publicId is not None:
systemId = m.group('systemId1') or ""
else:
systemId = m.group('systemId2')
if publicId is not None or systemId is not None:
rv.append("""|%s<!DOCTYPE %s "%s" "%s">"""%
(' '*indent, name, publicId or "", systemId or ""))
else:
rv.append("|%s<!DOCTYPE %s>"%(' '*indent, name))
elif isinstance(element, BeautifulSoup):
if element.name == "[document_fragment]":
rv.append("#document-fragment")
else:
rv.append("#document")
elif isinstance(element, Comment):
rv.append("|%s<!-- %s -->"%(' '*indent, element.string))
elif isinstance(element, unicode):
rv.append("|%s\"%s\"" %(' '*indent, element))
else:
rv.append("|%s<%s>"%(' '*indent, element.name))
if element.attrs:
for name, value in sorted(element.attrs):
rv.append('|%s%s="%s"' % (' '*(indent+2), name, value))
indent += 2
if hasattr(element, "contents"):
for child in element.contents:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
| WebGL-master | resources/html5lib/src/html5lib/treebuilders/soup.py |
try:
from types import ModuleType
except:
from new import module as ModuleType
import re
import types
import _base
from html5lib import ihatexml
from html5lib import constants
from html5lib.constants import namespaces
tag_regexp = re.compile("{([^}]*)}(.*)")
moduleCache = {}
def getETreeModule(ElementTreeImplementation, fullTree=False):
name = "_" + ElementTreeImplementation.__name__+"builder"
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType("_" + ElementTreeImplementation.__name__+"builder")
objs = getETreeBuilder(ElementTreeImplementation, fullTree)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s"%(namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
#Delete existing attributes first
#XXX - there may be a better way to do this...
for key in self._element.attrib.keys():
del self._element.attrib[key]
for key, value in attributes.iteritems():
if isinstance(key, tuple):
name = "{%s}%s"%(key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent=None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
#Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
#Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index-1].tail:
self._element[index-1].tail = ""
self._element[index-1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.iteritems():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
#Use the superclass constructor to set all properties on the
#wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get(u"publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set(u"publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get(u"systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set(u"systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "<DOCUMENT_ROOT>")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "<DOCUMENT_FRAGMENT>")
def testSerializer(element):
rv = []
finalText = None
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append( """<!DOCTYPE %s "%s" "%s">"""%(
element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>"%(element.text,))
elif element.tag == "<DOCUMENT_ROOT>":
rv.append("#document")
if element.text:
rv.append("|%s\"%s\""%(' '*(indent+2), element.text))
if element.tail:
finalText = element.tail
elif element.tag == ElementTree.Comment:
rv.append("|%s<!-- %s -->"%(' '*indent, element.text))
else:
assert type(element.tag) in types.StringTypes, "Expected unicode, got %s"%type(element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s"%(prefix, name)
rv.append("|%s<%s>"%(' '*indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.iteritems():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s"%(prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' '*(indent+2), name, value))
if element.text:
rv.append("|%s\"%s\"" %(' '*(indent+2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" %(' '*(indent-2), element.tail))
serializeElement(element, 0)
if finalText is not None:
rv.append("|%s\"%s\""%(' '*2, finalText))
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
finalText = None
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if type(element) == type(ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append( """<!DOCTYPE %s PUBLIC "%s" "%s">"""%(
element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>"%(element.text,))
elif element.tag == "<DOCUMENT_ROOT>":
if element.text:
rv.append(element.text)
if element.tail:
finalText = element.tail
for child in element:
serializeElement(child)
elif type(element.tag) == type(ElementTree.Comment):
rv.append("<!--%s-->"%(element.text,))
else:
#This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>"%(filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\""%(
filter.fromXmlName(name), value)
for name, value in element.attrib.iteritems()])
rv.append("<%s %s>"%(element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>"%(element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
if finalText is not None:
rv.append("%s\""%(' '*2, finalText))
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html"%self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
| WebGL-master | resources/html5lib/src/html5lib/treebuilders/etree.py |
import _base
from html5lib.constants import voidElements, namespaces, prefixes
from xml.sax.saxutils import escape
# Really crappy basic implementation of a DOM-core like thing
class Node(_base.Node):
type = -1
def __init__(self, name):
self.name = name
self.parent = None
self.value = None
self.childNodes = []
self._flags = []
def __iter__(self):
for node in self.childNodes:
yield node
for item in node:
yield item
def __unicode__(self):
return self.name
def toxml(self):
raise NotImplementedError
def printTree(self, indent=0):
tree = '\n|%s%s' % (' '* indent, unicode(self))
for child in self.childNodes:
tree += child.printTree(indent + 2)
return tree
def appendChild(self, node):
assert isinstance(node, Node)
if (isinstance(node, TextNode) and self.childNodes and
isinstance(self.childNodes[-1], TextNode)):
self.childNodes[-1].value += node.value
else:
self.childNodes.append(node)
node.parent = self
def insertText(self, data, insertBefore=None):
assert isinstance(data, unicode), "data %s is of type %s expected unicode"%(repr(data), type(data))
if insertBefore is None:
self.appendChild(TextNode(data))
else:
self.insertBefore(TextNode(data), insertBefore)
def insertBefore(self, node, refNode):
index = self.childNodes.index(refNode)
if (isinstance(node, TextNode) and index > 0 and
isinstance(self.childNodes[index - 1], TextNode)):
self.childNodes[index - 1].value += node.value
else:
self.childNodes.insert(index, node)
node.parent = self
def removeChild(self, node):
try:
self.childNodes.remove(node)
except:
# XXX
raise
node.parent = None
def cloneNode(self):
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self.childNodes)
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class Document(Node):
type = 1
def __init__(self):
Node.__init__(self, None)
def __str__(self):
return "#document"
def __unicode__(self):
return str(self)
def appendChild(self, child):
Node.appendChild(self, child)
def toxml(self, encoding="utf=8"):
result = ""
for child in self.childNodes:
result += child.toxml()
return result.encode(encoding)
def hilite(self, encoding="utf-8"):
result = "<pre>"
for child in self.childNodes:
result += child.hilite()
return result.encode(encoding) + "</pre>"
def printTree(self):
tree = unicode(self)
for child in self.childNodes:
tree += child.printTree(2)
return tree
def cloneNode(self):
return Document()
class DocumentFragment(Document):
type = 2
def __str__(self):
return "#document-fragment"
def __unicode__(self):
return str(self)
def cloneNode(self):
return DocumentFragment()
class DocumentType(Node):
type = 3
def __init__(self, name, publicId, systemId):
Node.__init__(self, name)
self.publicId = publicId
self.systemId = systemId
def __unicode__(self):
if self.publicId or self.systemId:
publicId = self.publicId or ""
systemId = self.systemId or ""
return """<!DOCTYPE %s "%s" "%s">"""%(
self.name, publicId, systemId)
else:
return u"<!DOCTYPE %s>" % self.name
toxml = __unicode__
def hilite(self):
return '<code class="markup doctype"><!DOCTYPE %s></code>' % self.name
def cloneNode(self):
return DocumentType(self.name, self.publicId, self.systemId)
class TextNode(Node):
type = 4
def __init__(self, value):
Node.__init__(self, None)
self.value = value
def __unicode__(self):
return u"\"%s\"" % self.value
def toxml(self):
return escape(self.value)
hilite = toxml
def cloneNode(self):
return TextNode(self.value)
class Element(Node):
type = 5
def __init__(self, name, namespace=None):
Node.__init__(self, name)
self.namespace = namespace
self.attributes = {}
def __unicode__(self):
if self.namespace == None:
return u"<%s>" % self.name
else:
return u"<%s %s>"%(prefixes[self.namespace], self.name)
def toxml(self):
result = '<' + self.name
if self.attributes:
for name,value in self.attributes.iteritems():
result += u' %s="%s"' % (name, escape(value,{'"':'"'}))
if self.childNodes:
result += '>'
for child in self.childNodes:
result += child.toxml()
result += u'</%s>' % self.name
else:
result += u'/>'
return result
def hilite(self):
result = '<<code class="markup element-name">%s</code>' % self.name
if self.attributes:
for name, value in self.attributes.iteritems():
result += ' <code class="markup attribute-name">%s</code>=<code class="markup attribute-value">"%s"</code>' % (name, escape(value, {'"':'"'}))
if self.childNodes:
result += ">"
for child in self.childNodes:
result += child.hilite()
elif self.name in voidElements:
return result + ">"
return result + '</<code class="markup element-name">%s</code>>' % self.name
def printTree(self, indent):
tree = '\n|%s%s' % (' '*indent, unicode(self))
indent += 2
if self.attributes:
for name, value in sorted(self.attributes.iteritems()):
if isinstance(name, tuple):
name = "%s %s"%(name[0], name[1])
tree += '\n|%s%s="%s"' % (' ' * indent, name, value)
for child in self.childNodes:
tree += child.printTree(indent)
return tree
def cloneNode(self):
newNode = Element(self.name)
if hasattr(self, 'namespace'):
newNode.namespace = self.namespace
for attr, value in self.attributes.iteritems():
newNode.attributes[attr] = value
return newNode
class CommentNode(Node):
type = 6
def __init__(self, data):
Node.__init__(self, None)
self.data = data
def __unicode__(self):
return "<!-- %s -->" % self.data
def toxml(self):
return "<!--%s-->" % self.data
def hilite(self):
return '<code class="markup comment"><!--%s--></code>' % escape(self.data)
def cloneNode(self):
return CommentNode(self.data)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = CommentNode
fragmentClass = DocumentFragment
def testSerializer(self, node):
return node.printTree()
| WebGL-master | resources/html5lib/src/html5lib/treebuilders/simpletree.py |
from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE
try:
from types import ModuleType
except:
from new import module as ModuleType
import re
import weakref
import _base
from html5lib import constants, ihatexml
from html5lib.constants import namespaces
moduleCache = {}
def getDomModule(DomImplementation):
name = "_" + DomImplementation.__name__+"builder"
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = getDomBuilder(DomImplementation)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return self.element.attributes.items().__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(self.element.attributes.items())
def items(self):
return [(item[0], item[1]) for item in
self.element.attributes.items()]
def keys(self):
return self.element.attributes.keys()
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self:hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in attributes.items():
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None,None,None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data=data
if parent <> self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if not Node.TEXT_NODE in self.dom._child_node_types:
self.dom._child_node_types=list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append( """|%s<!DOCTYPE %s "%s" "%s">"""%(
' '*indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>"%(' '*indent, element.name))
else:
rv.append("|%s<!DOCTYPE >"%(' '*indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->"%(' '*indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" %(' '*indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI != None):
name = "%s %s"%(constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>"%(' '*indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s"%(constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' '*(indent+2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
def dom2sax(node, handler, nsmap={'xml':XML_NAMESPACE}):
if node.nodeType == Node.ELEMENT_NODE:
if not nsmap:
handler.startElement(node.nodeName, node.attributes)
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endElement(node.nodeName)
else:
attributes = dict(node.attributes.itemsNS())
# gather namespace declarations
prefixes = []
for attrname in node.attributes.keys():
attr = node.getAttributeNode(attrname)
if (attr.namespaceURI == XMLNS_NAMESPACE or
(attr.namespaceURI == None and attr.nodeName.startswith('xmlns'))):
prefix = (attr.nodeName != 'xmlns' and attr.nodeName or None)
handler.startPrefixMapping(prefix, attr.nodeValue)
prefixes.append(prefix)
nsmap = nsmap.copy()
nsmap[prefix] = attr.nodeValue
del attributes[(attr.namespaceURI, attr.nodeName)]
# apply namespace declarations
for attrname in node.attributes.keys():
attr = node.getAttributeNode(attrname)
if attr.namespaceURI == None and ':' in attr.nodeName:
prefix = attr.nodeName.split(':')[0]
if nsmap.has_key(prefix):
del attributes[(attr.namespaceURI, attr.nodeName)]
attributes[(nsmap[prefix],attr.nodeName)]=attr.nodeValue
# SAX events
ns = node.namespaceURI or nsmap.get(None,None)
handler.startElementNS((ns,node.nodeName), node.nodeName, attributes)
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endElementNS((ns, node.nodeName), node.nodeName)
for prefix in prefixes: handler.endPrefixMapping(prefix)
elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
handler.characters(node.nodeValue)
elif node.nodeType == Node.DOCUMENT_NODE:
handler.startDocument()
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endDocument()
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
for child in node.childNodes: dom2sax(child, handler, nsmap)
else:
# ATTRIBUTE_NODE
# ENTITY_NODE
# PROCESSING_INSTRUCTION_NODE
# COMMENT_NODE
# DOCUMENT_TYPE_NODE
# NOTATION_NODE
pass
return locals()
# Keep backwards compatibility with things that directly load
# classes/functions from this module
for key, value in getDomModule(minidom).__dict__.items():
globals()[key] = value
| WebGL-master | resources/html5lib/src/html5lib/treebuilders/dom.py |
import os
import sys
import traceback
import StringIO
import warnings
import re
warnings.simplefilter("error")
from support import html5lib_test_files as data_files
from support import TestData, convert, convertExpected
import html5lib
from html5lib import html5parser, treebuilders, constants
treeTypes = {"simpletree":treebuilders.getTreeBuilder("simpletree"),
"DOM":treebuilders.getTreeBuilder("dom")}
#Try whatever etree implementations are avaliable from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
pass
try:
import xml.etree.cElementTree as cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
try:
import cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
pass
try:
try:
import lxml.html as lxml
except ImportError:
import lxml.etree as lxml
treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml", lxml, fullTree=True)
except ImportError:
pass
try:
import BeautifulSoup
treeTypes["beautifulsoup"] = treebuilders.getTreeBuilder("beautifulsoup", fullTree=True)
except ImportError:
pass
#Try whatever dom implementations are avaliable from a list that are
#"supposed" to work
try:
import pxdom
treeTypes["pxdom"] = treebuilders.getTreeBuilder("dom", pxdom)
except ImportError:
pass
#Run the parse error checks
checkParseErrors = False
#XXX - There should just be one function here but for some reason the testcase
#format differs from the treedump format by a single space character
def convertTreeDump(data):
return "\n".join(convert(3)(data).split("\n")[1:])
namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub
def runParserTest(innerHTML, input, expected, errors, treeClass,
namespaceHTMLElements):
#XXX - move this out into the setup function
#concatenate all consecutive character tokens into a single token
try:
p = html5parser.HTMLParser(tree = treeClass,
namespaceHTMLElements=namespaceHTMLElements)
except constants.DataLossWarning:
return
try:
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
try:
document = p.parse(input)
except constants.DataLossWarning:
return
except:
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nTraceback:", traceback.format_exc()])
assert False, errorMsg.encode("utf8")
output = convertTreeDump(p.tree.testSerializer(document))
expected = convertExpected(expected)
if namespaceHTMLElements:
expected = namespaceExpected(r"\1<html \2>", expected)
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nReceived:", output])
assert expected == output, errorMsg.encode("utf8")
errStr = [u"Line: %i Col: %i %s"%(line, col,
constants.E[errorcode] % datavars if isinstance(datavars, dict) else (datavars,)) for
((line,col), errorcode, datavars) in p.errors]
errorMsg2 = u"\n".join([u"\n\nInput:", input,
u"\nExpected errors (" + str(len(errors)) + u"):\n" + u"\n".join(errors),
u"\nActual errors (" + str(len(p.errors)) + u"):\n" + u"\n".join(errStr)])
if checkParseErrors:
assert len(p.errors) == len(errors), errorMsg2.encode("utf-8")
def test_parser():
sys.stderr.write('Testing tree builders '+ " ".join(treeTypes.keys()) + "\n")
files = data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat","")
tests = TestData(filename, "data")
for index, test in enumerate(tests):
input, errors, innerHTML, expected = [test[key] for key in
'data', 'errors',
'document-fragment',
'document']
if errors:
errors = errors.split("\n")
for treeName, treeCls in treeTypes.iteritems():
for namespaceHTMLElements in (True, False):
print input
yield (runParserTest, innerHTML, input, expected, errors, treeCls,
namespaceHTMLElements)
break
| WebGL-master | resources/html5lib/src/html5lib/tests/test_parser.py |
import sys
import os
import glob
import unittest
#Allow us to import the parent module
os.chdir(os.path.split(os.path.abspath(__file__))[0])
sys.path.insert(0, os.path.abspath(os.curdir))
sys.path.insert(0, os.path.abspath(os.pardir))
sys.path.insert(0, os.path.join(os.path.abspath(os.pardir), "src"))
def buildTestSuite():
suite = unittest.TestSuite()
for testcase in glob.glob('test_*.py'):
if testcase in ("test_tokenizer.py", "test_parser.py", "test_parser2.py"):
module = os.path.splitext(testcase)[0]
suite.addTest(__import__(module).buildTestSuite())
return suite
def main():
results = unittest.TextTestRunner().run(buildTestSuite())
return results
if __name__ == "__main__":
results = main()
if not results.wasSuccessful():
sys.exit(1)
| WebGL-master | resources/html5lib/src/html5lib/tests/runparsertests.py |
import os
import sys
import StringIO
import unittest
import warnings
warnings.simplefilter("error")
from support import html5lib_test_files, TestData, convertExpected
from html5lib import html5parser, treewalkers, treebuilders, constants
from html5lib.filters.lint import Filter as LintFilter, LintError
def PullDOMAdapter(node):
from xml.dom import Node
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise NotImplementedError("DOCTYPE nodes are not supported by PullDOM")
elif node.nodeType == Node.COMMENT_NODE:
yield COMMENT, node
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
yield CHARACTERS, node
elif node.nodeType == Node.ELEMENT_NODE:
yield START_ELEMENT, node
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
yield END_ELEMENT, node
else:
raise NotImplementedError("Node type not supported: " + str(node.nodeType))
treeTypes = {
"simpletree": {"builder": treebuilders.getTreeBuilder("simpletree"),
"walker": treewalkers.getTreeWalker("simpletree")},
"DOM": {"builder": treebuilders.getTreeBuilder("dom"),
"walker": treewalkers.getTreeWalker("dom")},
"PullDOM": {"builder": treebuilders.getTreeBuilder("dom"),
"adapter": PullDOMAdapter,
"walker": treewalkers.getTreeWalker("pulldom")},
}
#Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
except ImportError:
pass
try:
import xml.etree.cElementTree as ElementTree
treeTypes['cElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
except ImportError:
try:
import cElementTree as ElementTree
treeTypes['cElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
except ImportError:
pass
try:
import lxml.etree as ElementTree
# treeTypes['lxml_as_etree'] = \
# {"builder": treebuilders.getTreeBuilder("etree", ElementTree),
# "walker": treewalkers.getTreeWalker("etree", ElementTree)}
treeTypes['lxml_native'] = \
{"builder": treebuilders.getTreeBuilder("lxml"),
"walker": treewalkers.getTreeWalker("lxml")}
except ImportError:
pass
try:
import BeautifulSoup
treeTypes["beautifulsoup"] = \
{"builder": treebuilders.getTreeBuilder("beautifulsoup"),
"walker": treewalkers.getTreeWalker("beautifulsoup")}
except ImportError:
pass
#Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import pxdom
treeTypes['pxdom'] = \
{"builder": treebuilders.getTreeBuilder("dom", pxdom),
"walker": treewalkers.getTreeWalker("dom")}
except ImportError:
pass
try:
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def GenshiAdapter(tree):
text = None
for token in treewalkers.getTreeWalker("simpletree")(tree):
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if text is None:
text = token["data"]
else:
text += token["data"]
elif text is not None:
yield TEXT, text, (None, -1, -1)
text = None
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = u"{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield (START,
(QName(name),
Attrs([(QName(attr),value) for attr,value in token["data"]])),
(None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
yield END, QName(token["name"]), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text is not None:
yield TEXT, text, (None, -1, -1)
#treeTypes["genshi"] = \
# {"builder": treebuilders.getTreeBuilder("simpletree"),
# "adapter": GenshiAdapter,
# "walker": treewalkers.getTreeWalker("genshi")}
except ImportError:
pass
def concatenateCharacterTokens(tokens):
charactersToken = None
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if charactersToken is None:
charactersToken = {"type": "Characters", "data": token["data"]}
else:
charactersToken["data"] += token["data"]
else:
if charactersToken is not None:
yield charactersToken
charactersToken = None
yield token
if charactersToken is not None:
yield charactersToken
def convertTokens(tokens):
output = []
indent = 0
for token in concatenateCharacterTokens(tokens):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
if (token["namespace"] and
token["namespace"] != constants.namespaces["html"]):
if token["namespace"] in constants.prefixes:
name = constants.prefixes[token["namespace"]]
else:
name = token["namespace"]
name += u" " + token["name"]
else:
name = token["name"]
output.append(u"%s<%s>" % (" "*indent, name))
indent += 2
attrs = token["data"]
if attrs:
#TODO: Remove this if statement, attrs should always exist
for (namespace,name),value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
outputname = constants.prefixes[namespace]
else:
outputname = namespace
outputname += u" " + name
else:
outputname = name
output.append(u"%s%s=\"%s\"" % (" "*indent, outputname, value))
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" "*indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">"""%
(" "*indent, token["name"],
token["publicId"],
token["systemId"] and token["systemId"] or ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">"""%
(" "*indent, token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>"%(" "*indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" "*indent,))
elif type in ("Characters", "SpaceCharacters"):
output.append("%s\"%s\"" % (" "*indent, token["data"]))
else:
pass # TODO: what to do with errors?
return u"\n".join(output)
import re
attrlist = re.compile(r"^(\s+)\w+=.*(\n\1\w+=.*)+",re.M)
def sortattrs(x):
lines = x.group(0).split("\n")
lines.sort()
return "\n".join(lines)
class TokenTestCase(unittest.TestCase):
def test_all_tokens(self):
expected = [
{'data': {}, 'type': 'StartTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'html'},
{'data': {}, 'type': 'StartTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'head'},
{'data': {}, 'type': 'EndTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'head'},
{'data': {}, 'type': 'StartTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'body'},
{'data': u'a', 'type': 'Characters'},
{'data': {}, 'type': 'StartTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'div'},
{'data': u'b', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'div'},
{'data': u'c', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'body'},
{'data': {}, 'type': 'EndTag', 'namespace': u'http://www.w3.org/1999/xhtml', 'name': u'html'}
]
for treeName, treeCls in treeTypes.iteritems():
p = html5parser.HTMLParser(tree = treeCls["builder"])
document = p.parse("<html><head></head><body>a<div>b</div>c</body></html>")
document = treeCls.get("adapter", lambda x: x)(document)
output = treeCls["walker"](document)
for expectedToken, outputToken in zip(expected, output):
self.assertEquals(expectedToken, outputToken)
def run_test(innerHTML, input, expected, errors, treeClass):
try:
p = html5parser.HTMLParser(tree = treeClass["builder"])
if innerHTML:
document = p.parseFragment(StringIO.StringIO(input), innerHTML)
else:
document = p.parse(StringIO.StringIO(input))
except constants.DataLossWarning:
#Ignore testcases we know we don't pass
return
document = treeClass.get("adapter", lambda x: x)(document)
try:
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
expected = attrlist.sub(sortattrs, convertExpected(expected))
assert expected == output, "\n".join([
"", "Input:", input,
"", "Expected:", expected,
"", "Received:", output
])
except NotImplementedError:
pass # Amnesty for those that confess...
def test_treewalker():
sys.stdout.write('Testing tree walkers '+ " ".join(treeTypes.keys()) + "\n")
for treeName, treeCls in treeTypes.iteritems():
files = html5lib_test_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat","")
tests = TestData(filename, "data")
for index, test in enumerate(tests):
(input, errors,
innerHTML, expected) = [test[key] for key in ("data", "errors",
"document-fragment",
"document")]
errors = errors.split("\n")
yield run_test, innerHTML, input, expected, errors, treeCls
| WebGL-master | resources/html5lib/src/html5lib/tests/test_treewalkers.py |
import support
from html5lib import html5parser
from html5lib.constants import namespaces
from html5lib.treebuilders import dom
import unittest
# tests that aren't autogenerated from text files
class MoreParserTests(unittest.TestCase):
def test_assertDoctypeCloneable(self):
parser = html5parser.HTMLParser(tree=dom.TreeBuilder)
doc = parser.parse('<!DOCTYPE HTML>')
self.assert_(doc.cloneNode(True))
def test_line_counter(self):
# http://groups.google.com/group/html5lib-discuss/browse_frm/thread/f4f00e4a2f26d5c0
parser = html5parser.HTMLParser(tree=dom.TreeBuilder)
parser.parse("<pre>\nx\n>\n</pre>")
def test_namespace_html_elements_0(self):
parser = html5parser.HTMLParser(namespaceHTMLElements=True)
doc = parser.parse("<html></html>")
self.assert_(doc.childNodes[0].namespace == namespaces["html"])
def test_namespace_html_elements_1(self):
parser = html5parser.HTMLParser(namespaceHTMLElements=False)
doc = parser.parse("<html></html>")
self.assert_(doc.childNodes[0].namespace == None)
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
| WebGL-master | resources/html5lib/src/html5lib/tests/test_parser2.py |
import unittest
from html5lib.filters.whitespace import Filter
from html5lib.constants import spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
class TestCase(unittest.TestCase):
def runTest(self, input, expected):
output = list(Filter(input))
errorMsg = "\n".join(["\n\nInput:", str(input),
"\nExpected:", str(expected),
"\nReceived:", str(output)])
self.assertEquals(output, expected, errorMsg)
def runTestUnmodifiedOutput(self, input):
self.runTest(input, input)
def testPhrasingElements(self):
self.runTestUnmodifiedOutput(
[{"type": u"Characters", "data": u"This is a " },
{"type": u"StartTag", "name": u"span", "data": [] },
{"type": u"Characters", "data": u"phrase" },
{"type": u"EndTag", "name": u"span", "data": []},
{"type": u"SpaceCharacters", "data": u" " },
{"type": u"Characters", "data": u"with" },
{"type": u"SpaceCharacters", "data": u" " },
{"type": u"StartTag", "name": u"em", "data": [] },
{"type": u"Characters", "data": u"emphasised text" },
{"type": u"EndTag", "name": u"em", "data": []},
{"type": u"Characters", "data": u" and an " },
{"type": u"StartTag", "name": u"img", "data": [[u"alt", u"image"]] },
{"type": u"Characters", "data": u"." }])
def testLeadingWhitespace(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"Characters", "data": u"foo"},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"SpaceCharacters", "data": u" "},
{"type": u"Characters", "data": u"foo"},
{"type": u"EndTag", "name": u"p", "data": []}])
def testLeadingWhitespaceAsCharacters(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": spaceCharacters + u"foo"},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u" foo"},
{"type": u"EndTag", "name": u"p", "data": []}])
def testTrailingWhitespace(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo"},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo"},
{"type": u"SpaceCharacters", "data": u" "},
{"type": u"EndTag", "name": u"p", "data": []}])
def testTrailingWhitespaceAsCharacters(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo "},
{"type": u"EndTag", "name": u"p", "data": []}])
def testWhitespace(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters + "bar"},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo bar"},
{"type": u"EndTag", "name": u"p", "data": []}])
def testLeadingWhitespaceInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"Characters", "data": u"foo"},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testLeadingWhitespaceAsCharactersInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": spaceCharacters + u"foo"},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testTrailingWhitespaceInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": u"foo"},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testTrailingWhitespaceAsCharactersInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testWhitespaceInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters + "bar"},
{"type": u"EndTag", "name": u"pre", "data": []}])
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
| WebGL-master | resources/html5lib/src/html5lib/tests/test_whitespace_filter.py |
import os
import sys
import codecs
import glob
base_path = os.path.split(__file__)[0]
if os.path.exists(os.path.join(base_path, 'testdata')):
#release
test_dir = os.path.join(base_path, 'testdata')
else:
#development
test_dir = os.path.abspath(
os.path.join(base_path,
os.path.pardir, os.path.pardir,
os.path.pardir, 'testdata'))
assert os.path.exists(test_dir), "Test data not found"
#import the development html5lib
sys.path.insert(0, os.path.abspath(os.path.join(base_path,
os.path.pardir,
os.path.pardir)))
import html5lib
from html5lib import html5parser, treebuilders
del base_path
#Build a dict of avaliable trees
treeTypes = {"simpletree":treebuilders.getTreeBuilder("simpletree"),
"DOM":treebuilders.getTreeBuilder("dom")}
#Try whatever etree implementations are avaliable from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
pass
try:
import xml.etree.cElementTree as cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
try:
import cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
pass
try:
import lxml.etree as lxml
treeTypes['lxml'] = treebuilders.getTreeBuilder("etree", lxml, fullTree=True)
except ImportError:
pass
try:
import BeautifulSoup
treeTypes["beautifulsoup"] = treebuilders.getTreeBuilder("beautifulsoup", fullTree=True)
except ImportError:
pass
def html5lib_test_files(subdirectory, files='*.dat'):
return glob.glob(os.path.join(test_dir,subdirectory,files))
class DefaultDict(dict):
def __init__(self, default, *args, **kwargs):
self.default = default
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
return dict.get(self, key, self.default)
class TestData(object):
def __init__(self, filename, newTestHeading="data"):
self.f = codecs.open(filename, encoding="utf8")
self.newTestHeading = newTestHeading
def __iter__(self):
data = DefaultDict(None)
key=None
for line in self.f:
heading = self.isSectionHeading(line)
if heading:
if data and heading == self.newTestHeading:
#Remove trailing newline
data[key] = data[key][:-1]
yield self.normaliseOutput(data)
data = DefaultDict(None)
key = heading
data[key]=""
elif key is not None:
data[key] += line
if data:
yield self.normaliseOutput(data)
def isSectionHeading(self, line):
"""If the current heading is a test section heading return the heading,
otherwise return False"""
if line.startswith("#"):
return line[1:].strip()
else:
return False
def normaliseOutput(self, data):
#Remove trailing newlines
for key,value in data.iteritems():
if value.endswith("\n"):
data[key] = value[:-1]
return data
def convert(stripChars):
def convertData(data):
"""convert the output of str(document) to the format used in the testcases"""
data = data.split("\n")
rv = []
for line in data:
if line.startswith("|"):
rv.append(line[stripChars:])
else:
rv.append(line)
return "\n".join(rv)
return convertData
convertExpected = convert(2)
| WebGL-master | resources/html5lib/src/html5lib/tests/support.py |
import sys
import os
import unittest
import cStringIO
import warnings
import re
try:
import json
except ImportError:
import simplejson as json
from support import html5lib_test_files
from html5lib.tokenizer import HTMLTokenizer
from html5lib import constants
class TokenizerTestParser(object):
def __init__(self, initialState, lastStartTag=None):
self.tokenizer = HTMLTokenizer
self._state = initialState
self._lastStartTag = lastStartTag
def parse(self, stream, encoding=None, innerHTML=False):
tokenizer = self.tokenizer(stream, encoding)
self.outputTokens = []
tokenizer.state = getattr(tokenizer, self._state)
if self._lastStartTag is not None:
tokenizer.currentToken = {"type": "startTag",
"name":self._lastStartTag}
types = dict((v,k) for k,v in constants.tokenTypes.iteritems())
for token in tokenizer:
getattr(self, 'process%s' % types[token["type"]])(token)
return self.outputTokens
def processDoctype(self, token):
self.outputTokens.append([u"DOCTYPE", token["name"], token["publicId"],
token["systemId"], token["correct"]])
def processStartTag(self, token):
self.outputTokens.append([u"StartTag", token["name"],
dict(token["data"][::-1]), token["selfClosing"]])
def processEmptyTag(self, token):
if token["name"] not in constants.voidElements:
self.outputTokens.append(u"ParseError")
self.outputTokens.append([u"StartTag", token["name"], dict(token["data"][::-1])])
def processEndTag(self, token):
self.outputTokens.append([u"EndTag", token["name"],
token["selfClosing"]])
def processComment(self, token):
self.outputTokens.append([u"Comment", token["data"]])
def processSpaceCharacters(self, token):
self.outputTokens.append([u"Character", token["data"]])
self.processSpaceCharacters = self.processCharacters
def processCharacters(self, token):
self.outputTokens.append([u"Character", token["data"]])
def processEOF(self, token):
pass
def processParseError(self, token):
self.outputTokens.append([u"ParseError", token["data"]])
def concatenateCharacterTokens(tokens):
outputTokens = []
for token in tokens:
if not "ParseError" in token and token[0] == "Character":
if (outputTokens and not "ParseError" in outputTokens[-1] and
outputTokens[-1][0] == "Character"):
outputTokens[-1][1] += token[1]
else:
outputTokens.append(token)
else:
outputTokens.append(token)
return outputTokens
def normalizeTokens(tokens):
# TODO: convert tests to reflect arrays
for i, token in enumerate(tokens):
if token[0] == u'ParseError':
tokens[i] = token[0]
return tokens
def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
ignoreErrors=False):
"""Test whether the test has passed or failed
If the ignoreErrorOrder flag is set to true we don't test the relative
positions of parse errors and non parse errors
"""
checkSelfClosing= False
for token in expectedTokens:
if (token[0] == "StartTag" and len(token) == 4
or token[0] == "EndTag" and len(token) == 3):
checkSelfClosing = True
break
if not checkSelfClosing:
for token in receivedTokens:
if token[0] == "StartTag" or token[0] == "EndTag":
token.pop()
if not ignoreErrorOrder and not ignoreErrors:
return expectedTokens == receivedTokens
else:
#Sort the tokens into two groups; non-parse errors and parse errors
tokens = {"expected":[[],[]], "received":[[],[]]}
for tokenType, tokenList in zip(tokens.keys(),
(expectedTokens, receivedTokens)):
for token in tokenList:
if token != "ParseError":
tokens[tokenType][0].append(token)
else:
if not ignoreErrors:
tokens[tokenType][1].append(token)
return tokens["expected"] == tokens["received"]
def unescape_test(test):
def decode(inp):
return inp.decode("unicode-escape")
test["input"] = decode(test["input"])
for token in test["output"]:
if token == "ParseError":
continue
else:
token[1] = decode(token[1])
if len(token) > 2:
for key, value in token[2]:
del token[2][key]
token[2][decode(key)] = decode(value)
return test
def runTokenizerTest(test):
#XXX - move this out into the setup function
#concatenate all consecutive character tokens into a single token
if 'doubleEscaped' in test:
test = unescape_test(test)
expected = concatenateCharacterTokens(test['output'])
if 'lastStartTag' not in test:
test['lastStartTag'] = None
outBuffer = cStringIO.StringIO()
stdout = sys.stdout
sys.stdout = outBuffer
parser = TokenizerTestParser(test['initialState'],
test['lastStartTag'])
tokens = parser.parse(test['input'])
tokens = concatenateCharacterTokens(tokens)
received = normalizeTokens(tokens)
errorMsg = u"\n".join(["\n\nInitial state:",
test['initialState'] ,
"\nInput:", unicode(test['input']),
"\nExpected:", unicode(expected),
"\nreceived:", unicode(tokens)])
errorMsg = errorMsg.encode("utf-8")
ignoreErrorOrder = test.get('ignoreErrorOrder', False)
assert tokensMatch(expected, received, ignoreErrorOrder), errorMsg
def _doCapitalize(match):
return match.group(1).upper()
_capitalizeRe = re.compile(r"\W+(\w)").sub
def capitalize(s):
s = s.lower()
s = _capitalizeRe(_doCapitalize, s)
return s
def test_tokenizer():
for filename in html5lib_test_files('tokenizer', '*.test'):
tests = json.load(file(filename))
testName = os.path.basename(filename).replace(".test","")
if 'tests' in tests:
for index,test in enumerate(tests['tests']):
#Skip tests with a self closing flag
skip = False
if 'initialStates' not in test:
test["initialStates"] = ["Data state"]
for initialState in test["initialStates"]:
test["initialState"] = capitalize(initialState)
yield runTokenizerTest, test
| WebGL-master | resources/html5lib/src/html5lib/tests/test_tokenizer.py |
import sys
import unittest
from html5lib.filters.formfiller import SimpleFilter
class FieldStorage(dict):
def getlist(self, name):
l = self[name]
if isinstance(l, list):
return l
elif isinstance(l, tuple) or hasattr(l, '__iter__'):
return list(l)
return [l]
class TestCase(unittest.TestCase):
def runTest(self, input, formdata, expected):
try:
output = list(SimpleFilter(input, formdata))
except NotImplementedError, nie:
# Amnesty for those that confess...
print >>sys.stderr, "Not implemented:", str(nie)
else:
errorMsg = "\n".join(["\n\nInput:", str(input),
"\nForm data:", str(formdata),
"\nExpected:", str(expected),
"\nReceived:", str(output)])
self.assertEquals(output, expected, errorMsg)
def testSingleTextInputWithValue(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"quux")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"bar")]}])
def testSingleTextInputWithoutValue(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"bar")]}])
def testSingleCheckbox(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}])
def testSingleCheckboxShouldBeUnchecked(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}])
def testSingleCheckboxCheckedByDefault(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}])
def testSingleCheckboxCheckedByDefaultShouldBeUnchecked(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux"), (u"checked", u"")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}])
def testSingleTextareaWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"textarea", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"textarea", "data": []}])
def testSingleTextareaWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"EndTag", "name": u"textarea", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"textarea", "data": []}])
def testSingleSelectWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"selected", u"")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithoutValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"baz")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"baz")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"selected", u"")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithoutValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"baz"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"baz"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectMultiple(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo"), (u"multiple", u"")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": ["bar", "quux"]}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo"), (u"multiple", u"")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testTwoSelect(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []},
{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": ["bar", "quux"]}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []},
{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
| WebGL-master | resources/html5lib/src/html5lib/tests/test_formfiller.py |
import os
import unittest
from support import html5lib_test_files
try:
import json
except ImportError:
import simplejson as json
import html5lib
from html5lib import html5parser, serializer, constants
from html5lib.treewalkers._base import TreeWalker
optionals_loaded = []
try:
from lxml import etree
optionals_loaded.append("lxml")
except ImportError:
pass
default_namespace = constants.namespaces["html"]
class JsonWalker(TreeWalker):
def __iter__(self):
for token in self.tree:
type = token[0]
if type == "StartTag":
if len(token) == 4:
namespace, name, attrib = token[1:4]
else:
namespace = default_namespace
name, attrib = token[1:3]
yield self.startTag(namespace, name, self._convertAttrib(attrib))
elif type == "EndTag":
if len(token) == 3:
namespace, name = token[1:3]
else:
namespace = default_namespace
name = token[1]
yield self.endTag(namespace, name)
elif type == "EmptyTag":
if len(token) == 4:
namespace, name, attrib = token[1:]
else:
namespace = default_namespace
name, attrib = token[1:]
for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)):
yield token
elif type == "Comment":
yield self.comment(token[1])
elif type in ("Characters", "SpaceCharacters"):
for token in self.text(token[1]):
yield token
elif type == "Doctype":
if len(token) == 4:
yield self.doctype(token[1], token[2], token[3])
elif len(token) == 3:
yield self.doctype(token[1], token[2])
else:
yield self.doctype(token[1])
else:
raise ValueError("Unknown token type: " + type)
def _convertAttrib(self, attribs):
"""html5lib tree-walkers use a dict of (namespace, name): value for
attributes, but JSON cannot represent this. Convert from the format
in the serializer tests (a list of dicts with "namespace", "name",
and "value" as keys) to html5lib's tree-walker format."""
attrs = {}
for attrib in attribs:
name = (attrib["namespace"], attrib["name"])
assert(name not in attrs)
attrs[name] = attrib["value"]
return attrs
def serialize_html(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.HTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def serialize_xhtml(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.XHTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def make_test(input, expected, xhtml, options):
result = serialize_html(input, options)
if len(expected) == 1:
assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:False\n%s"%(expected[0], result, str(options))
elif result not in expected:
assert False, "Expected: %s, Received: %s" % (expected, result)
if not xhtml:
return
result = serialize_xhtml(input, options)
if len(xhtml) == 1:
assert xhtml[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:True\n%s"%(xhtml[0], result, str(options))
elif result not in xhtml:
assert False, "Expected: %s, Received: %s" % (xhtml, result)
class EncodingTestCase(unittest.TestCase):
def throwsWithLatin1(self, input):
self.assertRaises(UnicodeEncodeError, serialize_html, input, {"encoding": "iso-8859-1"})
def testDoctypeName(self):
self.throwsWithLatin1([["Doctype", u"\u0101"]])
def testDoctypePublicId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"\u0101"]])
def testDoctypeSystemId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"potato", u"\u0101"]])
def testCdataCharacters(self):
self.assertEquals("<style>ā", serialize_html([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}],
["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testCharacters(self):
self.assertEquals("ā", serialize_html([["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testStartTagName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testEmptyTagName(self):
self.throwsWithLatin1([["EmptyTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testAttributeName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"span", [{"namespace": None, "name": u"\u0101", "value": u"potato"}]]])
def testAttributeValue(self):
self.assertEquals("<span potato=ā>", serialize_html([["StartTag", u"http://www.w3.org/1999/xhtml", u"span",
[{"namespace": None, "name": u"potato", "value": u"\u0101"}]]],
{"encoding": "iso-8859-1"}))
def testEndTagName(self):
self.throwsWithLatin1([["EndTag", u"http://www.w3.org/1999/xhtml", u"\u0101"]])
def testComment(self):
self.throwsWithLatin1([["Comment", u"\u0101"]])
if "lxml" in optionals_loaded:
class LxmlTestCase(unittest.TestCase):
def setUp(self):
self.parser = etree.XMLParser(resolve_entities=False)
self.treewalker = html5lib.getTreeWalker("lxml")
self.serializer = serializer.HTMLSerializer()
def testEntityReplacement(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>""", result)
def testEntityXML(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>""", result)
def testEntityNoResolve(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False,
resolve_entities=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>""", result)
def test_serializer():
for filename in html5lib_test_files('serializer', '*.test'):
tests = json.load(file(filename))
test_name = os.path.basename(filename).replace('.test','')
for index, test in enumerate(tests['tests']):
xhtml = test.get("xhtml", test["expected"])
if test_name == 'optionaltags':
xhtml = None
yield make_test, test["input"], test["expected"], xhtml, test.get("options", {})
| WebGL-master | resources/html5lib/src/html5lib/tests/test_serializer.py |
import os
import unittest
from support import html5lib_test_files, TestData, test_dir
from html5lib import HTMLParser, inputstream
import re, unittest
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name(self):
self.assertEquals(inputstream.codecName("utf-8"), "utf-8")
self.assertEquals(inputstream.codecName("utf8"), "utf-8")
self.assertEquals(inputstream.codecName(" utf8 "), "utf-8")
self.assertEquals(inputstream.codecName("ISO_8859--1"), "windows-1252")
def buildTestSuite():
for filename in html5lib_test_files("encoding"):
test_name = os.path.basename(filename).replace('.dat',''). \
replace('-','')
tests = TestData(filename, "data")
for idx, test in enumerate(tests):
def encodingTest(self, data=test['data'],
encoding=test['encoding']):
p = HTMLParser()
t = p.parse(data, useChardet=False)
errorMessage = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n"%
(data, repr(encoding.lower()),
repr(p.tokenizer.stream.charEncoding)))
self.assertEquals(encoding.lower(),
p.tokenizer.stream.charEncoding[0],
errorMessage)
setattr(Html5EncodingTestCase, 'test_%s_%d' % (test_name, idx+1),
encodingTest)
try:
import chardet
def test_chardet(self):
data = open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt")).read()
encoding = inputstream.HTMLInputStream(data).charEncoding
assert encoding[0].lower() == "big5"
setattr(Html5EncodingTestCase, 'test_chardet', test_chardet)
except ImportError:
print "chardet not found, skipping chardet tests"
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
| WebGL-master | resources/html5lib/src/html5lib/tests/test_encoding.py |
import sys
import os
parent_path = os.path.abspath(os.path.join(os.path.split(__file__)[0], ".."))
if not parent_path in sys.path:
sys.path.insert(0, parent_path)
del parent_path
from runtests import buildTestSuite
import support
| WebGL-master | resources/html5lib/src/html5lib/tests/__init__.py |
import sys
import os
if __name__ == '__main__':
#Allow us to import from the src directory
os.chdir(os.path.split(os.path.abspath(__file__))[0])
sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, "src")))
from tokenizer import HTMLTokenizer
class HTMLParser(object):
""" Fake parser to test tokenizer output """
def parse(self, stream, output=True):
tokenizer = HTMLTokenizer(stream)
for token in tokenizer:
if output:
print token
if __name__ == "__main__":
x = HTMLParser()
if len(sys.argv) > 1:
if len(sys.argv) > 2:
import hotshot, hotshot.stats
prof = hotshot.Profile('stats.prof')
prof.runcall(x.parse, sys.argv[1], False)
prof.close()
stats = hotshot.stats.load('stats.prof')
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats()
else:
x.parse(sys.argv[1])
else:
print """Usage: python mockParser.py filename [stats]
If stats is specified the hotshots profiler will run and output the
stats instead.
"""
| WebGL-master | resources/html5lib/src/html5lib/tests/mockParser.py |
import os
import sys
import unittest
try:
import json
except ImportError:
import simplejson as json
from html5lib import html5parser, sanitizer, constants
def runSanitizerTest(name, expected, input):
expected = ''.join([token.toxml() for token in html5parser.HTMLParser().
parseFragment(expected).childNodes])
expected = json.loads(json.dumps(expected))
assert expected == sanitize_html(input)
def sanitize_html(stream):
return ''.join([token.toxml() for token in
html5parser.HTMLParser(tokenizer=sanitizer.HTMLSanitizer).
parseFragment(stream).childNodes])
def test_should_handle_astral_plane_characters():
assert u"<p>\U0001d4b5 \U0001d538</p>" == sanitize_html("<p>𝒵 𝔸</p>")
def test_sanitizer():
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr']:
continue ### TODO
if tag_name != tag_name.lower():
continue ### TODO
if tag_name == 'image':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<img title=\"1\"/>foo <bad>bar</bad> baz",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name,tag_name))
elif tag_name == 'br':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<br title=\"1\"/>foo <bad>bar</bad> baz<br/>",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name,tag_name))
elif tag_name in constants.voidElements:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\"/>foo <bad>bar</bad> baz" % tag_name,
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name,tag_name))
else:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name,tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name,tag_name))
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
tag_name = tag_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name,tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name,tag_name))
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
if attribute_name != attribute_name.lower(): continue ### TODO
if attribute_name == 'style': continue
yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
"<p %s=\"foo\">foo <bad>bar</bad> baz</p>" % attribute_name,
"<p %s='foo'>foo <bad>bar</bad> baz</p>" % attribute_name)
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
attribute_name = attribute_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_attribute" % attribute_name,
"<p>foo <bad>bar</bad> baz</p>",
"<p %s='display: none;'>foo <bad>bar</bad> baz</p>" % attribute_name)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol)
| WebGL-master | resources/html5lib/src/html5lib/tests/test_sanitizer.py |
import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\|\s*)<html ([^>]+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
tree = p.parse(test_data["input"])
output = p.tree.testSerializer(tree)
output = "\n".join(("| "+ line[3:]) if line.startswith("| ") else line
for line in output.split("\n"))
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
| WebGL-master | resources/html5lib/src/html5lib/tests/tokenizertotree.py |
import sys
import os
import glob
import unittest
def buildTestSuite():
suite = unittest.TestSuite()
for testcase in glob.glob('test_*.py'):
module = os.path.splitext(testcase)[0]
suite.addTest(__import__(module).buildTestSuite())
return suite
def main():
results = unittest.TextTestRunner().run(buildTestSuite())
return results
if __name__ == "__main__":
results = main()
if not results.wasSuccessful():
sys.exit(1)
| WebGL-master | resources/html5lib/src/html5lib/tests/runtests.py |
import support
import unittest, codecs
from html5lib.inputstream import HTMLInputStream
class HTMLInputStreamShortChunk(HTMLInputStream):
_defaultChunkSize = 2
class HTMLInputStreamTest(unittest.TestCase):
def test_char_ascii(self):
stream = HTMLInputStream("'", encoding='ascii')
self.assertEquals(stream.charEncoding[0], 'ascii')
self.assertEquals(stream.char(), "'")
def test_char_null(self):
stream = HTMLInputStream("\x00")
self.assertEquals(stream.char(), u'\ufffd')
def test_char_utf8(self):
stream = HTMLInputStream(u'\u2018'.encode('utf-8'), encoding='utf-8')
self.assertEquals(stream.charEncoding[0], 'utf-8')
self.assertEquals(stream.char(), u'\u2018')
def test_char_win1252(self):
stream = HTMLInputStream(u"\xa9\xf1\u2019".encode('windows-1252'))
self.assertEquals(stream.charEncoding[0], 'windows-1252')
self.assertEquals(stream.char(), u"\xa9")
self.assertEquals(stream.char(), u"\xf1")
self.assertEquals(stream.char(), u"\u2019")
def test_bom(self):
stream = HTMLInputStream(codecs.BOM_UTF8 + "'")
self.assertEquals(stream.charEncoding[0], 'utf-8')
self.assertEquals(stream.char(), "'")
def test_utf_16(self):
stream = HTMLInputStream((' '*1025).encode('utf-16'))
self.assert_(stream.charEncoding[0] in ['utf-16-le', 'utf-16-be'], stream.charEncoding)
self.assertEquals(len(stream.charsUntil(' ', True)), 1025)
def test_newlines(self):
stream = HTMLInputStreamShortChunk(codecs.BOM_UTF8 + "a\nbb\r\nccc\rddddxe")
self.assertEquals(stream.position(), (1, 0))
self.assertEquals(stream.charsUntil('c'), u"a\nbb\n")
self.assertEquals(stream.position(), (3, 0))
self.assertEquals(stream.charsUntil('x'), u"ccc\ndddd")
self.assertEquals(stream.position(), (4, 4))
self.assertEquals(stream.charsUntil('e'), u"x")
self.assertEquals(stream.position(), (4, 5))
def test_newlines2(self):
size = HTMLInputStream._defaultChunkSize
stream = HTMLInputStream("\r" * size + "\n")
self.assertEquals(stream.charsUntil('x'), "\n" * size)
def test_position(self):
stream = HTMLInputStreamShortChunk(codecs.BOM_UTF8 + "a\nbb\nccc\nddde\nf\ngh")
self.assertEquals(stream.position(), (1, 0))
self.assertEquals(stream.charsUntil('c'), u"a\nbb\n")
self.assertEquals(stream.position(), (3, 0))
stream.unget(u"\n")
self.assertEquals(stream.position(), (2, 2))
self.assertEquals(stream.charsUntil('c'), u"\n")
self.assertEquals(stream.position(), (3, 0))
stream.unget(u"\n")
self.assertEquals(stream.position(), (2, 2))
self.assertEquals(stream.char(), u"\n")
self.assertEquals(stream.position(), (3, 0))
self.assertEquals(stream.charsUntil('e'), u"ccc\nddd")
self.assertEquals(stream.position(), (4, 3))
self.assertEquals(stream.charsUntil('h'), u"e\nf\ng")
self.assertEquals(stream.position(), (6, 1))
def test_position2(self):
stream = HTMLInputStreamShortChunk("abc\nd")
self.assertEquals(stream.position(), (1, 0))
self.assertEquals(stream.char(), u"a")
self.assertEquals(stream.position(), (1, 1))
self.assertEquals(stream.char(), u"b")
self.assertEquals(stream.position(), (1, 2))
self.assertEquals(stream.char(), u"c")
self.assertEquals(stream.position(), (1, 3))
self.assertEquals(stream.char(), u"\n")
self.assertEquals(stream.position(), (2, 0))
self.assertEquals(stream.char(), u"d")
self.assertEquals(stream.position(), (2, 1))
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
| WebGL-master | resources/html5lib/src/html5lib/tests/test_stream.py |
def f1():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x += y + z
def f2():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = x + y + z
def f3():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "".join((x, y, z))
def f4():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "%s%s%s" % (x, y, z)
import timeit
for x in xrange(4):
statement = "f%s" % (x + 1)
t = timeit.Timer(statement, "from __main__ import " + statement)
r = t.repeat(3, 1000000)
print r, min(r)
| WebGL-master | resources/html5lib/src/html5lib/tests/performance/concatenation.py |
'''
Created on 23 Dec 2014
@author: Alberto Lopez
Mobica LTD
'''
'''
Python Script TO COMPILE JAVASCRIPTS WITH closure.jar locally in the selected directory
STEPS:
- paste a copy of the compiler.jar found on https://developers.google.com/closure/compiler/docs/gettingstarted_app
- execute/paste this script in the folder where the JavaScripts are contained
FEATURES:
1) By Default, this script compiles each JavaScript contained in the current folder with the 3 different Closure Compiler levels available
WHITESPACE_ONLY, SIMPLE_OPTIMIZATIONS and ADVANCED_OPTIMIZATIONS
These compilations levels are represented in the main program as:
- whitespace= True (True when this compilation level is undertaken)
- simple= True (True when this compilation level is undertaken)
- advanced= True (True when this compilation level is undertaken)
2) Each JavaScript compiled generates a .txt file with the corresponding output from the Closure Compiler.
3) By using this script and the Closure Compiler Application(closure.jar), the Warning and Error report Output is always more restrictive than
the output obtained from the Closure Compiler Service UI (http://closure-compiler.appspot.com/home).
4) If there are no errors in the compiled JavaScript, a copy of the compressed JavaScript is returned in its corresponding field within
the .txt file generated.
5) By executing this Python script, NONE .js output files are generated, to avoid compilation of generated js in the local directory while running this script
'''
#!python3
import re
import os
import subprocess
import threading
from sys import stdout, stderr
#simpleCompilationCmdInput= "java -jar compiler.jar --compilation_level SIMPLE --js glu-draw.js --js_output_file gluDrawCompiled.js --externs js-test-pre.js --warning_level VERBOSE"
def getShadersJavaScript(whitespaceCompilation, simpleCompilation, advancedCompilation):
directory=os.getcwd()
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".js"):
compileJavaScript(file, whitespaceCompilation, simpleCompilation, advancedCompilation)
def compileJavaScript(file, whitespaceCompilation, simpleCompilation, advancedCompilation):
print("RUNNING CLOSURE COMPILER OUTPUT for " +file+" ...")
outputCompilerFile= file.strip()[0:-3]
outputCompilerFile= outputCompilerFile +".txt"
with open(outputCompilerFile, "w") as out_file:
out_file.write("CLOSURE COMPILER OUTPUT " + "\n")
out_file.write("JavaScript shader file: " + file + "\n")
out_file.write("Output file from CLOSURE COMPILER: " + outputCompilerFile + "\n")
out_file.flush()
if whitespaceCompilation==True:
cmdInput= "java -jar compiler.jar --compilation_level WHITESPACE_ONLY --js "+file+" --warning_level VERBOSE"
with open(outputCompilerFile, "a") as out_file:
out_file.write("\n"+ "------------------------------------------" + "\n")
out_file.write("COMPILATION LEVEL: WHITESPACE_ONLY " + "\n")
out_file.flush()
writeOutputAmendFile(outputCompilerFile, cmdInput)
if simpleCompilation==True:
cmdInput= "java -jar compiler.jar --compilation_level SIMPLE_OPTIMIZATIONS --js "+file+" --warning_level VERBOSE"
with open(outputCompilerFile, "a") as out_file:
out_file.write("\n"+"\n"+ "------------------------------------------" + "\n")
out_file.write("COMPILATION LEVEL: SIMPLE_OPTIMIZATIONS" + "\n")
out_file.flush()
writeOutputAmendFile(outputCompilerFile, cmdInput)
if advancedCompilation==True:
cmdInput= "java -jar compiler.jar --compilation_level ADVANCED_OPTIMIZATIONS --js "+file+" --warning_level VERBOSE"
with open(outputCompilerFile, "a") as out_file:
out_file.write("\n"+"\n"+ "------------------------------------------" + "\n")
out_file.write("COMPILATION LEVEL: ADVANCED_OPTIMIZATIONS" + "\n")
out_file.flush()
writeOutputAmendFile(outputCompilerFile, cmdInput)
print("JavaScript " +file + " SUCCESSFULLY COMPILED!")
print("Output saved in " +outputCompilerFile + " in current working directory " + os.getcwd() + "\n")
def writeOutputAmendFile(outputCompilerFile, cmdInput):
with open(outputCompilerFile, "ab") as out_file:
proc = subprocess.Popen(cmdInput, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while proc.poll() is None:
line = proc.stdout.readline()
out_file.write(line)
out_file.flush()
proc.wait()
#proc = subprocess.Popen(simpleCompilationCmdInput, shell=True)
#main program
whitespace= True
simple= True
advanced= True
getShadersJavaScript(whitespace, simple, advanced)
print("------ END EXECUTION Python script: compiler-shaders-local.py ------" + "\n")
| WebGL-master | sdk/tests/deqp/compile-shaders-local.py |
import os
import re
# Generate an HTML file for each .test file in the current directory
#
TEST_LIST_FILE = '00_test_list.txt';
TEMPLATE = 'template.html';
def genHTML(template, test):
contents = re.sub('___TEST_NAME___', "'" + test + "'", template);
filename = test + '.html';
print "Generating " + filename;
with open(test + '.html', 'w') as f:
f.write(contents);
return filename;
def process_test_files(template):
generated = [];
files = os.listdir(os.getcwd());
for file in files:
found = re.search('(^[^.].*)\.test$', file);
if found:
generated.append(genHTML(template,found.group(1)));
return generated;
def readTemplate():
contents = None;
with open(TEMPLATE, 'r') as f:
contents = f.read();
return contents;
template = readTemplate();
if (template):
test_list = process_test_files(template);
print "Generating " + TEST_LIST_FILE;
with open(TEST_LIST_FILE, 'w') as f:
for item in test_list:
f.write(item + '\n');
else:
print "Couldn't find template file: " + TEMPLATE;
| WebGL-master | sdk/tests/deqp/genHTMLfromTest.py |
#!/usr/bin/python
"""generates tests from OpenGL ES 2.0 .run/.test files."""
import os
import os.path
import sys
import re
import json
import shutil
from optparse import OptionParser
from xml.dom.minidom import parse
if sys.version < '2.6':
print 'Wrong Python Version !!!: Need >= 2.6'
sys.exit(1)
# each shader test generates up to 3 512x512 images.
# a 512x512 image takes 1meg of memory so set this
# number apporpriate for the platform with
# the smallest memory issue. At 8 that means
# at least 24 meg is needed to run the test.
MAX_TESTS_PER_SET = 8
VERBOSE = False
FILTERS = [
re.compile("GL/"),
]
LICENSE = """
/*
** Copyright (c) 2012 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
"""
COMMENT_RE = re.compile("/\*\n\*\*\s+Copyright.*?\*/",
re.IGNORECASE | re.DOTALL)
REMOVE_COPYRIGHT_RE = re.compile("\/\/\s+Copyright.*?\n",
re.IGNORECASE | re.DOTALL)
MATRIX_RE = re.compile("Matrix(\\d)")
VALID_UNIFORM_TYPES = [
"uniform1f",
"uniform1fv",
"uniform1fv",
"uniform1i",
"uniform1iv",
"uniform1iv",
"uniform2f",
"uniform2fv",
"uniform2fv",
"uniform2i",
"uniform2iv",
"uniform2iv",
"uniform3f",
"uniform3fv",
"uniform3fv",
"uniform3i",
"uniform3iv",
"uniform3iv",
"uniform4f",
"uniform4fv",
"uniform4fv",
"uniform4i",
"uniform4iv",
"uniform4ivy",
"uniformMatrix2fv",
"uniformMatrix2fv",
"uniformMatrix3fv",
"uniformMatrix3fv",
"uniformMatrix4fv",
"uniformMatrix4fv",
]
SUBSTITUTIONS = [
("uniformmat3fv", "uniformMatrix3fv"),
("uniformmat4fv", "uniformMatrix4fv"),
]
def Log(msg):
global VERBOSE
if VERBOSE:
print msg
def TransposeMatrix(values, dim):
size = dim * dim
count = len(values) / size
for m in range(0, count):
offset = m * size
for i in range(0, dim):
for j in range(i + 1, dim):
t = values[offset + i * dim + j]
values[offset + i * dim + j] = values[offset + j * dim + i]
values[offset + j * dim + i] = t
def GetValidTypeName(type_name):
global VALID_UNIFORM_TYPES
global SUBSTITUTIONS
for subst in SUBSTITUTIONS:
type_name = type_name.replace(subst[0], subst[1])
if not type_name in VALID_UNIFORM_TYPES:
print "unknown type name: ", type_name
raise SyntaxError
return type_name
def WriteOpen(filename):
dirname = os.path.dirname(filename)
if len(dirname) > 0 and not os.path.exists(dirname):
os.makedirs(dirname)
return open(filename, "wb")
class TxtWriter():
def __init__(self, filename):
self.filename = filename
self.lines = []
def Write(self, line):
self.lines.append(line)
def Close(self):
if len(self.lines) > 0:
Log("Writing: %s" % self.filename)
f = WriteOpen(self.filename)
f.write("# this file is auto-generated. DO NOT EDIT.\n")
f.write("".join(self.lines))
f.close()
def ReadFileAsLines(filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
return [line.strip() for line in lines]
def ReadFile(filename):
f = open(filename, "r")
content = f.read()
f.close()
return content.replace("\r\n", "\n")
def Chunkify(list, chunk_size):
"""divides an array into chunks of chunk_size"""
return [list[i:i + chunk_size] for i in range(0, len(list), chunk_size)]
def GetText(nodelist):
"""Gets the text of from a list of nodes"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def GetElementText(node, name):
"""Gets the text of an element"""
elements = node.getElementsByTagName(name)
if len(elements) > 0:
return GetText(elements[0].childNodes)
else:
return None
def GetBoolElement(node, name):
text = GetElementText(node, name)
return text.lower() == "true"
def GetModel(node):
"""Gets the model"""
model = GetElementText(node, "model")
if model and len(model.strip()) == 0:
elements = node.getElementsByTagName("model")
if len(elements) > 0:
model = GetElementText(elements[0], "filename")
return model
def RelativizePaths(base, paths, template):
"""converts paths to relative paths"""
rels = []
for p in paths:
#print "---"
#print "base: ", os.path.abspath(base)
#print "path: ", os.path.abspath(p)
relpath = os.path.relpath(os.path.abspath(p), os.path.dirname(os.path.abspath(base))).replace("\\", "/")
#print "rel : ", relpath
rels.append(template % relpath)
return "\n".join(rels)
def CopyFile(filename, src, dst):
s = os.path.abspath(os.path.join(os.path.dirname(src), filename))
d = os.path.abspath(os.path.join(os.path.dirname(dst), filename))
dst_dir = os.path.dirname(d)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
shutil.copyfile(s, d)
def CopyShader(filename, src, dst):
s = os.path.abspath(os.path.join(os.path.dirname(src), filename))
d = os.path.abspath(os.path.join(os.path.dirname(dst), filename))
text = ReadFile(s)
# By agreement with the Khronos OpenGL working group we are allowed
# to open source only the .vert and .frag files from the OpenGL ES 2.0
# conformance tests. All other files from the OpenGL ES 2.0 conformance
# tests are not included.
marker = "insert-copyright-here"
new_text = COMMENT_RE.sub(marker, text)
if new_text == text:
print "no matching license found:", s
raise RuntimeError
new_text = REMOVE_COPYRIGHT_RE.sub("", new_text)
new_text = new_text.replace(marker, LICENSE)
f = WriteOpen(d)
f.write(new_text)
f.close()
def IsOneOf(string, regexs):
for regex in regexs:
if re.match(regex, string):
return True
return False
def CheckForUnknownTags(valid_tags, node, depth=1):
"""do a hacky check to make sure we're not missing something."""
for child in node.childNodes:
if child.localName and not IsOneOf(child.localName, valid_tags[0]):
print "unsupported tag:", child.localName
print "depth:", depth
raise SyntaxError
else:
if len(valid_tags) > 1:
CheckForUnknownTags(valid_tags[1:], child, depth + 1)
def IsFileWeWant(filename):
for f in FILTERS:
if f.search(filename):
return True
return False
class TestReader():
"""class to read and parse tests"""
def __init__(self, basepath):
self.tests = []
self.modes = {}
self.patterns = {}
self.basepath = basepath
def Print(self, msg):
if self.verbose:
print msg
def MakeOutPath(self, filename):
relpath = os.path.relpath(os.path.abspath(filename), os.path.dirname(os.path.abspath(self.basepath)))
return relpath
def ReadTests(self, filename):
"""reads a .run file and parses."""
Log("reading %s" % filename)
outname = self.MakeOutPath(filename + ".txt")
f = TxtWriter(outname)
dirname = os.path.dirname(filename)
lines = ReadFileAsLines(filename)
count = 0
tests_data = []
for line in lines:
if len(line) > 0 and not line.startswith("#"):
fname = os.path.join(dirname, line)
if line.endswith(".run"):
if self.ReadTests(fname):
f.Write(line + ".txt\n")
count += 1
elif line.endswith(".test"):
tests_data.extend(self.ReadTest(fname))
else:
print "Error in %s:%d:%s" % (filename, count, line)
raise SyntaxError()
if len(tests_data):
global MAX_TESTS_PER_SET
sets = Chunkify(tests_data, MAX_TESTS_PER_SET)
id = 1
for set in sets:
suffix = "_%03d_to_%03d" % (id, id + len(set) - 1)
test_outname = self.MakeOutPath(filename + suffix + ".html")
if os.path.basename(test_outname).startswith("input.run"):
dname = os.path.dirname(test_outname)
folder_name = os.path.basename(dname)
test_outname = os.path.join(dname, folder_name + suffix + ".html")
self.WriteTests(filename, test_outname, {"tests":set})
f.Write(os.path.basename(test_outname) + "\n")
id += len(set)
count += 1
f.Close()
return count
def ReadTest(self, filename):
"""reads a .test file and parses."""
Log("reading %s" % filename)
dom = parse(filename)
tests = dom.getElementsByTagName("test")
tests_data = []
outname = self.MakeOutPath(filename + ".html")
for test in tests:
if not IsFileWeWant(filename):
self.CopyShaders(test, filename, outname)
else:
test_data = self.ProcessTest(test, filename, outname, len(tests_data))
if test_data:
tests_data.append(test_data)
return tests_data
def ProcessTest(self, test, filename, outname, id):
"""Process a test"""
mode = test.getAttribute("mode")
pattern = test.getAttribute("pattern")
self.modes[mode] = 1
self.patterns[pattern] = 1
Log ("%d: mode: %s pattern: %s" % (id, mode, pattern))
method = getattr(self, 'Process_' + pattern)
test_data = method(test, filename, outname)
if test_data:
test_data["pattern"] = pattern
return test_data
def WriteTests(self, filename, outname, tests_data):
Log("Writing %s" % outname)
template = """<!DOCTYPE html>
<!-- this file is auto-generated. DO NOT EDIT.
%(license)s
-->
<html>
<head>
<meta charset="utf-8">
<title>WebGL GLSL conformance test: %(title)s</title>
%(css)s
%(scripts)s
</head>
<body>
<canvas id="example" width="500" height="500" style="width: 16px; height: 16px;"></canvas>
<div id="description"></div>
<div id="console"></div>
</body>
<script>
"use strict";
OpenGLESTestRunner.run(%(tests_data)s);
var successfullyParsed = true;
</script>
</html>
"""
css = [
"../../resources/js-test-style.css",
"../resources/ogles-tests.css",
]
scripts = [
"../../resources/js-test-pre.js",
"../resources/webgl-test-utils.js",
"ogles-utils.js",
]
css_html = RelativizePaths(outname, css, '<link rel="stylesheet" href="%s" />')
scripts_html = RelativizePaths(outname, scripts, '<script src="%s"></script>')
f = WriteOpen(outname)
f.write(template % {
"license": LICENSE,
"css": css_html,
"scripts": scripts_html,
"title": os.path.basename(outname),
"tests_data": json.dumps(tests_data, indent=2)
})
f.close()
def CopyShaders(self, test, filename, outname):
"""For tests we don't actually support yet, at least copy the shaders"""
shaders = test.getElementsByTagName("shader")
for shader in shaders:
for name in ["vertshader", "fragshader"]:
s = GetElementText(shader, name)
if s and s != "empty":
CopyShader(s, filename, outname)
#
# pattern handlers.
#
def Process_compare(self, test, filename, outname):
global MATRIX_RE
valid_tags = [
["shader", "model", "glstate"],
["uniform", "vertshader", "fragshader", "filename", "depthrange"],
["name", "count", "transpose", "uniform*", "near", "far"],
]
CheckForUnknownTags(valid_tags, test)
# parse the test
shaders = test.getElementsByTagName("shader")
shaderInfos = []
for shader in shaders:
v = GetElementText(shader, "vertshader")
f = GetElementText(shader, "fragshader")
CopyShader(v, filename, outname)
CopyShader(f, filename, outname)
info = {
"vertexShader": v,
"fragmentShader": f,
}
shaderInfos.append(info)
uniformElems = shader.getElementsByTagName("uniform")
if len(uniformElems) > 0:
uniforms = {}
info["uniforms"] = uniforms
for uniformElem in uniformElems:
uniform = {"count": 1}
for child in uniformElem.childNodes:
if child.localName == None:
pass
elif child.localName == "name":
uniforms[GetText(child.childNodes)] = uniform
elif child.localName == "count":
uniform["count"] = int(GetText(child.childNodes))
elif child.localName == "transpose":
uniform["transpose"] = (GetText(child.childNodes) == "true")
else:
if "type" in uniform:
print "utype was:", uniform["type"], " found ", child.localName
raise SyntaxError
type_name = GetValidTypeName(child.localName)
uniform["type"] = type_name
valueText = GetText(child.childNodes).replace(",", " ")
uniform["value"] = [float(t) for t in valueText.split()]
m = MATRIX_RE.search(type_name)
if m:
# Why are these backward from the API?!?!?
TransposeMatrix(uniform["value"], int(m.group(1)))
data = {
"name": os.path.basename(outname),
"model": GetModel(test),
"referenceProgram": shaderInfos[1],
"testProgram": shaderInfos[0],
}
gl_states = test.getElementsByTagName("glstate")
if len(gl_states) > 0:
state = {}
data["state"] = state
for gl_state in gl_states:
for state_name in gl_state.childNodes:
if state_name.localName:
values = {}
for field in state_name.childNodes:
if field.localName:
values[field.localName] = GetText(field.childNodes)
state[state_name.localName] = values
return data
def Process_shaderload(self, test, filename, outname):
"""no need for shaderload tests"""
self.CopyShaders(test, filename, outname)
def Process_extension(self, test, filename, outname):
"""no need for extension tests"""
self.CopyShaders(test, filename, outname)
def Process_createtests(self, test, filename, outname):
Log("createtests Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_GL2Test(self, test, filename, outname):
Log("GL2Test Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_uniformquery(self, test, filename, outname):
Log("uniformquery Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_egl_image_external(self, test, filename, outname):
"""no need for egl_image_external tests"""
self.CopyShaders(test, filename, outname)
def Process_dismount(self, test, filename, outname):
Log("dismount Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_build(self, test, filename, outname):
"""don't need build tests"""
valid_tags = [
["shader", "compstat", "linkstat"],
["vertshader", "fragshader"],
]
CheckForUnknownTags(valid_tags, test)
shader = test.getElementsByTagName("shader")
if not shader:
return None
vs = GetElementText(shader[0], "vertshader")
fs = GetElementText(shader[0], "fragshader")
if vs and vs != "empty":
CopyShader(vs, filename, outname)
if fs and fs != "empty":
CopyShader(fs, filename, outname)
data = {
"name": os.path.basename(outname),
"compstat": bool(GetBoolElement(test, "compstat")),
"linkstat": bool(GetBoolElement(test, "linkstat")),
"testProgram": {
"vertexShader": vs,
"fragmentShader": fs,
},
}
attach = test.getElementsByTagName("attach")
if len(attach) > 0:
data["attachError"] = GetElementText(attach[0], "attacherror")
return data
def Process_coverage(self, test, filename, outname):
Log("coverage Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_attributes(self, test, filename, outname):
Log("attributes Not implemented: %s" % filename)
self.CopyShaders(test, filename, outname)
def Process_fixed(self, test, filename, outname):
"""no need for fixed function tests"""
self.CopyShaders(test, filename, outname)
def main(argv):
"""This is the main function."""
global VERBOSE
parser = OptionParser()
parser.add_option(
"-v", "--verbose", action="store_true",
help="prints more output.")
(options, args) = parser.parse_args(args=argv)
if len(args) < 1:
pass # fix me
os.chdir(os.path.dirname(__file__) or '.')
VERBOSE = options.verbose
filename = args[0]
test_reader = TestReader(filename)
test_reader.ReadTests(filename)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| WebGL-master | sdk/tests/conformance/ogles/process-ogles2-tests.py |
#!/usr/bin/env python
us = [float(f) for f in file('mine.txt')]
them = [float(f) for f in file('them.txt')]
assert len(us) == len(them)
for i, (a, b) in enumerate(zip(us, them)):
diff = a - b
if diff > 2./65536:
vert, coord = divmod(i, 3)
round, vert = divmod(vert, 6)
print "%d:%d:%d" % (round, vert, coord), i, diff, a, b
| WebGL-master | sdk/demos/google/san-angeles/gles/bob/compare.py |
"""Modulus Project Configurator"""
| modulus-toolchain-master | mpc/__init__.py |
from bottle import run, request, post, get
import json
@get("/info")
def index():
return {"a": 3}
@post("/infer")
def index():
postdata = json.loads(request.body.read())
print(postdata) # this goes to log file only, not to client
return "Hi {name}".format(name=postdata["name"])
run(host="localhost", port=8081, debug=True)
| modulus-toolchain-master | mpc/rest_api.py |
modulus-toolchain-master | mpc/rest_server/__init__.py |
|
import sys, os
from subprocess import Popen, PIPE, check_call, run
CMD = "python -m mpc.rest_server.launcher_subprocess".split()
stageid = sys.argv[1]
path = f"infer"
print(os.getcwd(), path)
print(CMD)
p = Popen(CMD, stderr=PIPE, stdin=PIPE, stdout=PIPE, universal_newlines=True)
p.stdin.write(f"load-infer-function {path}\n")
p.stdin.flush()
ll, l = "", ""
while not l.startswith(f"[mpc] loaded {path}"):
l = p.stdout.readline()
ll += l
print(l, end="")
p.stdin.write(f"info\n")
p.stdin.flush()
import json
from pprint import pprint
info = json.loads(p.stdout.readline())
info["load-infer"] = ll
pprint(info)
| modulus-toolchain-master | mpc/rest_server/start.py |
"""
This module receives commands from sys.stdin, one line at a time
and loads infer() functions.
This module is inside the sub-process specific to a infer.py (a stage-specific
sub-process)
Valid commands:
load-infer-function [path]
info
infer
exit
"""
import sys
import importlib
import time
import numpy as np
n = 0
while True:
n += 1
line = sys.stdin.readline()
if line == "":
sys.exit(0)
cmd = line.split()[0]
rest = " ".join(line.split(" ")[1:])[:-1]
if cmd == "exit":
sys.exit()
if cmd == "load-infer-function":
path = " ".join(line.split(" ")[1:])[:-1]
t0 = time.time()
m = importlib.import_module(path)
t1 = time.time()
print(f"[mpc] loaded {path} in {t1-t0:.3f} s", flush=True)
if cmd == "info":
import json
# d = {"m.__file__": m.__file__, "m.infer.__doc__": m.infer.__doc__}
# print(json.dumps(d), flush=True)
print(json.dumps(m.info), flush=True)
if cmd == "infer":
r = m.infer(**eval(rest))
for k, v in r.items():
fname = f"/dev/shm/mpc/{k}.npy"
np.save(file=fname, arr=v)
print("wrote", fname, flush=True)
# load-infer-function infer
# infer {"Q": np.array([1]).reshape(-1, 1),"x": np.array([1]).reshape(-1, 1),"y": np.array([1]).reshape(-1, 1), "a": np.array([1]).reshape(-1, 1), "b": np.array([1]).reshape(-1, 1), "q": np.array([1]).reshape(-1, 1), "airT": np.array([1]).reshape(-1, 1)}
| modulus-toolchain-master | mpc/rest_server/launcher_subprocess.py |
import ipyleaflet as ipl
class BaseMap:
def __init__(self):
self._m = ipl.Map(layers=[], crs=ipl.projections.Simple)
| modulus-toolchain-master | mpc/nvapp/mapcomponent.py |
modulus-toolchain-master | mpc/nvapp/__init__.py |
|
import os, sys
import importlib
# RELOAD_PATHS = ["/home/pavel/work/repos/L2RPN/individual/pavel/topo-sim/"]
def should_reload(m, RELOAD_PATHS):
if m is None:
return False
try:
for rp in RELOAD_PATHS:
if "__file__" in dir(m) and rp in m.__file__:
return True
except:
pass
return False
def reload(mod, RELOAD_PATHS):
# prepare list of modules that should be reloaded (by removing them)
dellst = []
for mname, m in sys.modules.items():
if should_reload(m, RELOAD_PATHS): # and mod != m:
dellst += [mname]
for mname in dellst:
if mname in sys.modules:
del sys.modules[mname]
# now reload
# importlib.reload(mod)
mod = importlib.import_module(modname)
return mod
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def reload_module(modname: str):
if modname not in sys.modules:
if "." in modname:
a, b = modname.split(".")
importlib.import_module(a)
mod = importlib.import_module(modname)
else:
mod = sys.modules[modname]
RELOAD_PATHS = [os.path.dirname(mod.__file__) + "/"]
try:
mod = reload(mod, RELOAD_PATHS)
except:
mod = importlib.import_module(modname)
return mod
| modulus-toolchain-master | mpc/nvapp/common.py |
Subsets and Splits