repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/sas_tasks.py
|
SAS_FILE_VERSION = 3
DEBUG = False
class SASTask:
"""Planning task in finite-domain representation.
The user is responsible for making sure that the data fits a
number of structural restrictions. For example, conditions should
generally be sorted and mention each variable at most once. See
the validate methods for details."""
def __init__(self, variables, mutexes, init, goal,
operators, axioms, metric):
self.variables = variables
self.mutexes = mutexes
self.init = init
self.goal = goal
self.operators = sorted(operators, key=lambda op: (
op.name, op.prevail, op.pre_post))
self.axioms = sorted(axioms, key=lambda axiom: (
axiom.condition, axiom.effect))
self.metric = metric
if DEBUG:
self.validate()
def validate(self):
"""Fail an assertion if the task is invalid.
A task is valid if all its components are valid. Valid tasks
are almost in a kind of "canonical form", but not quite. For
example, operators and axioms are permitted to be listed in
any order, even though it would be possible to require some
kind of canonical sorting.
Note that we require that all derived variables are binary.
This is stricter than what later parts of the planner are
supposed to handle, but some parts of the translator rely on
this. We might want to consider making this a general
requirement throughout the planner.
Note also that there is *no* general rule on what the init (=
fallback) value of a derived variable is. For example, in
PSR-Large #1, it can be either 0 or 1. While it is "usually"
1, code should not rely on this.
"""
self.variables.validate()
for mutex in self.mutexes:
mutex.validate(self.variables)
self.init.validate(self.variables)
self.goal.validate(self.variables)
for op in self.operators:
op.validate(self.variables)
for axiom in self.axioms:
axiom.validate(self.variables, self.init)
assert self.metric is False or self.metric is True, self.metric
def dump(self):
print("variables:")
self.variables.dump()
print("%d mutex groups:" % len(self.mutexes))
for mutex in self.mutexes:
print("group:")
mutex.dump()
print("init:")
self.init.dump()
print("goal:")
self.goal.dump()
print("%d operators:" % len(self.operators))
for operator in self.operators:
operator.dump()
print("%d axioms:" % len(self.axioms))
for axiom in self.axioms:
axiom.dump()
print("metric: %s" % self.metric)
def output(self, stream):
print("begin_version", file=stream)
print(SAS_FILE_VERSION, file=stream)
print("end_version", file=stream)
print("begin_metric", file=stream)
print(int(self.metric), file=stream)
print("end_metric", file=stream)
self.variables.output(stream)
print(len(self.mutexes), file=stream)
for mutex in self.mutexes:
mutex.output(stream)
self.init.output(stream)
self.goal.output(stream)
print(len(self.operators), file=stream)
for op in self.operators:
op.output(stream)
print(len(self.axioms), file=stream)
for axiom in self.axioms:
axiom.output(stream)
def get_encoding_size(self):
task_size = 0
task_size += self.variables.get_encoding_size()
for mutex in self.mutexes:
task_size += mutex.get_encoding_size()
task_size += self.goal.get_encoding_size()
for op in self.operators:
task_size += op.get_encoding_size()
for axiom in self.axioms:
task_size += axiom.get_encoding_size()
return task_size
class SASVariables:
def __init__(self, ranges, axiom_layers, value_names):
self.ranges = ranges
self.axiom_layers = axiom_layers
self.value_names = value_names
def validate(self):
"""Validate variables.
All variables must have range at least 2, and derived
variables must have range exactly 2. See comment on derived
variables in the docstring of SASTask.validate.
"""
assert len(self.ranges) == len(self.axiom_layers) == len(
self.value_names)
for (var_range, layer, var_value_names) in zip(
self.ranges, self.axiom_layers, self.value_names):
assert var_range == len(var_value_names)
assert var_range >= 2
assert layer == -1 or layer >= 0
if layer != -1:
assert var_range == 2
def validate_fact(self, fact):
"""Assert that fact is a valid (var, value) pair."""
var, value = fact
assert 0 <= var < len(self.ranges)
assert 0 <= value < self.ranges[var]
def validate_condition(self, condition):
"""Assert that the condition (list of facts) is sorted, mentions each
variable at most once, and only consists of valid facts."""
last_var = -1
for (var, value) in condition:
self.validate_fact((var, value))
assert var > last_var
last_var = var
def dump(self):
for var, (rang, axiom_layer) in enumerate(
zip(self.ranges, self.axiom_layers)):
if axiom_layer != -1:
axiom_str = " [axiom layer %d]" % axiom_layer
else:
axiom_str = ""
print("v%d in {%s}%s" % (var, list(range(rang)), axiom_str))
def output(self, stream):
print(len(self.ranges), file=stream)
for var, (rang, axiom_layer, values) in enumerate(zip(
self.ranges, self.axiom_layers, self.value_names)):
print("begin_variable", file=stream)
print("var%d" % var, file=stream)
print(axiom_layer, file=stream)
print(rang, file=stream)
assert rang == len(values), (rang, values)
for value in values:
print(value, file=stream)
print("end_variable", file=stream)
def get_encoding_size(self):
# A variable with range k has encoding size k + 1 to also give the
# variable itself some weight.
return len(self.ranges) + sum(self.ranges)
class SASMutexGroup:
def __init__(self, facts):
self.facts = sorted(facts)
def validate(self, variables):
"""Assert that the facts in the mutex group are sorted and unique
and that they are all valid."""
for fact in self.facts:
variables.validate_fact(fact)
assert self.facts == sorted(set(self.facts))
def dump(self):
for var, val in self.facts:
print("v%d: %d" % (var, val))
def output(self, stream):
print("begin_mutex_group", file=stream)
print(len(self.facts), file=stream)
for var, val in self.facts:
print(var, val, file=stream)
print("end_mutex_group", file=stream)
def get_encoding_size(self):
return len(self.facts)
class SASInit:
def __init__(self, values):
self.values = values
def validate(self, variables):
"""Validate initial state.
Assert that the initial state contains the correct number of
values and that all values are in range.
"""
assert len(self.values) == len(variables.ranges)
for fact in enumerate(self.values):
variables.validate_fact(fact)
def dump(self):
for var, val in enumerate(self.values):
print("v%d: %d" % (var, val))
def output(self, stream):
print("begin_state", file=stream)
for val in self.values:
print(val, file=stream)
print("end_state", file=stream)
class SASGoal:
def __init__(self, pairs):
self.pairs = sorted(pairs)
def validate(self, variables):
"""Assert that the goal is nonempty and a valid condition."""
assert self.pairs
variables.validate_condition(self.pairs)
def dump(self):
for var, val in self.pairs:
print("v%d: %d" % (var, val))
def output(self, stream):
print("begin_goal", file=stream)
print(len(self.pairs), file=stream)
for var, val in self.pairs:
print(var, val, file=stream)
print("end_goal", file=stream)
def get_encoding_size(self):
return len(self.pairs)
class SASOperator:
def __init__(self, name, prevail, pre_post, cost):
self.name = name
self.prevail = sorted(prevail)
self.pre_post = self._canonical_pre_post(pre_post)
self.cost = cost
def _canonical_pre_post(self, pre_post):
# Return a sorted and uniquified version of pre_post. We would
# like to just use sorted(set(pre_post)), but this fails because
# the effect conditions are a list and hence not hashable.
def tuplify(entry):
var, pre, post, cond = entry
return var, pre, post, tuple(cond)
def listify(entry):
var, pre, post, cond = entry
return var, pre, post, list(cond)
pre_post = map(tuplify, pre_post)
pre_post = sorted(set(pre_post))
pre_post = list(map(listify, pre_post))
return pre_post
def validate(self, variables):
"""Validate the operator.
Assert that
1. Prevail conditions are valid conditions (i.e., sorted and
all referring to different variables)
2. The pre_post list is sorted by (var, pre, post, cond), and the
same (var, pre, post, cond) 4-tuple is not repeated.
3. Effect conditions are valid conditions and do not contain variables
from the pre- or prevail conditions.
4. Variables occurring in pre_post rules do not have a prevail
condition.
5. Preconditions in pre_post are -1 or valid facts.
6. Effects are valid facts.
7. Effect variables are non-derived.
8. If a variable has multiple pre_post rules, then pre is
identical in all these rules.
9. There is at least one effect.
10. Costs are non-negative integers.
Odd things that are *not* illegal:
- The effect in a pre_post rule may be identical to the
precondition or to an effect condition of that effect.
TODO/open question:
- It is currently not very clear what the semantics of operators
should be when effects "conflict", i.e., when multiple effects
trigger and want to set a given variable to two different
values. In the case where both are unconditional effects, we
should make sure that our representation doesn't actually
contain two such effects, but when at least one of them is
conditional, things are not so easy.
To make our life simpler when generating SAS+ tasks from
PDDL tasks, it probably makes most sense to generalize the
PDDL rule in this case: there is a value order where certain
values "win" over others in this situation. It probably
makes sense to say the "highest" values should win in this
case, because that's consistent with the PDDL rules if we
say false = 0 and true = 1, and also with our sort order of
effects it means we get the right result if we just apply
effects in sequence.
But whatever we end up deciding, we need to be clear about it,
document it and make sure that all of our code knows the rules
and follows them.
"""
variables.validate_condition(self.prevail)
assert self.pre_post == self._canonical_pre_post(self.pre_post)
prevail_vars = {var for (var, value) in self.prevail}
pre_values = {}
for var, pre, post, cond in self.pre_post:
variables.validate_condition(cond)
assert var not in prevail_vars
if pre != -1:
variables.validate_fact((var, pre))
variables.validate_fact((var, post))
assert variables.axiom_layers[var] == -1
if var in pre_values:
assert pre_values[var] == pre
else:
pre_values[var] = pre
for var, pre, post, cond in self.pre_post:
for cvar, cval in cond:
assert(cvar not in pre_values or pre_values[cvar] == -1)
assert(cvar not in prevail_vars)
assert self.pre_post
assert self.cost >= 0 and self.cost == int(self.cost)
def dump(self):
print(self.name)
print("Prevail:")
for var, val in self.prevail:
print(" v%d: %d" % (var, val))
print("Pre/Post:")
for var, pre, post, cond in self.pre_post:
if cond:
cond_str = " [%s]" % ", ".join(
["%d: %d" % tuple(c) for c in cond])
else:
cond_str = ""
print(" v%d: %d -> %d%s" % (var, pre, post, cond_str))
def output(self, stream):
print("begin_operator", file=stream)
print(self.name[1:-1], file=stream)
print(len(self.prevail), file=stream)
for var, val in self.prevail:
print(var, val, file=stream)
print(len(self.pre_post), file=stream)
for var, pre, post, cond in self.pre_post:
print(len(cond), end=' ', file=stream)
for cvar, cval in cond:
print(cvar, cval, end=' ', file=stream)
print(var, pre, post, file=stream)
print(self.cost, file=stream)
print("end_operator", file=stream)
def get_encoding_size(self):
size = 1 + len(self.prevail)
for var, pre, post, cond in self.pre_post:
size += 1 + len(cond)
if pre != -1:
size += 1
return size
def get_applicability_conditions(self):
"""Return the combined applicability conditions
(prevail conditions and preconditions) of the operator.
Returns a sorted list of (var, value) pairs. This is
guaranteed to contain at most one fact per variable and
must hence be non-contradictory."""
conditions = {}
for var, val in self.prevail:
assert var not in conditions
conditions[var] = val
for var, pre, post, cond in self.pre_post:
if pre != -1:
assert var not in conditions or conditions[var] == pre
conditions[var] = pre
return sorted(conditions.items())
class SASAxiom:
def __init__(self, condition, effect):
self.condition = sorted(condition)
self.effect = effect
assert self.effect[1] in (0, 1)
for _, val in condition:
assert val >= 0, condition
def validate(self, variables, init):
"""Validate the axiom.
Assert that the axiom condition is a valid condition, that the
effect is a valid fact, that the effect variable is a derived
variable, and that the layering condition is satisfied.
See the docstring of SASTask.validate for information on the
restriction on derived variables. The layering condition boils
down to:
1. Axioms always set the "non-init" value of the derived
variable.
2. Derived variables in the condition must have a lower of
equal layer to derived variables appearing in the effect.
3. Conditions with equal layer are only allowed when the
condition uses the "non-init" value of that variable.
TODO/bug: rule #1 is currently disabled because we currently
have axioms that violate it. This is likely due to the
"extended domain transition graphs" described in the Fast
Downward paper, Section 5.1. However, we want to eventually
changes this. See issue454. For cases where rule #1 is violated,
"non-init" should be "init" in rule #3.
"""
variables.validate_condition(self.condition)
variables.validate_fact(self.effect)
eff_var, eff_value = self.effect
eff_layer = variables.axiom_layers[eff_var]
assert eff_layer >= 0
eff_init_value = init.values[eff_var]
## The following rule is currently commented out because of
## the TODO/bug mentioned in the docstring.
# assert eff_value != eff_init_value
for cond_var, cond_value in self.condition:
cond_layer = variables.axiom_layers[cond_var]
if cond_layer != -1:
assert cond_layer <= eff_layer
if cond_layer == eff_layer:
cond_init_value = init.values[cond_var]
## Once the TODO/bug above is addressed, the
## following four lines can be simplified because
## we are guaranteed to land in the "if" branch.
if eff_value != eff_init_value:
assert cond_value != cond_init_value
else:
assert cond_value == cond_init_value
def dump(self):
print("Condition:")
for var, val in self.condition:
print(" v%d: %d" % (var, val))
print("Effect:")
var, val = self.effect
print(" v%d: %d" % (var, val))
def output(self, stream):
print("begin_rule", file=stream)
print(len(self.condition), file=stream)
for var, val in self.condition:
print(var, val, file=stream)
var, val = self.effect
print(var, 1 - val, val, file=stream)
print("end_rule", file=stream)
def get_encoding_size(self):
return 1 + len(self.condition)
| 18,064 | 36.792887 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/sccs.py
|
"""Tarjan's algorithm for maximal strongly connected components.
We provide two versions of the algorithm for different graph
representations.
Since the original recursive version exceeds python's maximal
recursion depth on some planning instances, this is an iterative
version with an explicit recursion stack (iter_stack).
Note that the derived graph where each SCC is a single "supernode" is
necessarily acyclic. The SCCs returned by the algorithm are in a
topological sort order with respect to this derived DAG.
"""
from collections import defaultdict
__all__ = ["get_sccs_adjacency_list", "get_sccs_adjacency_dict"]
def get_sccs_adjacency_list(adjacency_list):
"""Compute SCCs for a graph represented as an adjacency list.
`adjacency_list` is a list (or similar data structure) whose
indices correspond to the graph nodes. For example, if
`len(adjacency_list)` is N, the graph nodes are {0, ..., N-1}.
For every node `u`, `adjacency_list[u]` is the list (or similar data
structure) of successors of `u`.
Returns a list of lists that defines a partition of {0, ..., N-1},
where each block in the partition is an SCC of the graph, and
the partition is given in a topologically sort order."""
return StronglyConnectedComponentComputation(adjacency_list).get_result()
def get_sccs_adjacency_dict(adjacency_dict):
"""Compute SCCs for a graph represented as an adjacency dict.
`adjacency_dict` is a dictionary whose keys are the vertices of
the graph.
For every node `u`, adjacency_dict[u]` is the list (or similar
data structure) of successors of `u`.
Returns a list of lists that defines a partition of the graph
nodes, where each block in the partition is an SCC of the graph,
and the partition is given in a topologically sort order."""
node_to_index = {}
index_to_node = []
for index, node in enumerate(adjacency_dict):
node_to_index[node] = index
index_to_node.append(node)
adjacency_list = []
for index, node in enumerate(index_to_node):
successors = adjacency_dict[node]
successor_indices = [node_to_index[v] for v in successors]
adjacency_list.append(successor_indices)
result_indices = get_sccs_adjacency_list(adjacency_list)
result = []
for block_indices in result_indices:
block = [index_to_node[index] for index in block_indices]
result.append(block)
return result
class StronglyConnectedComponentComputation:
def __init__(self, unweighted_graph):
self.graph = unweighted_graph
self.BEGIN, self.CONTINUE, self.RETURN = 0, 1, 2 # "recursion" handling
def get_result(self):
self.indices = dict()
self.lowlinks = defaultdict(lambda: -1)
self.stack_indices = dict()
self.current_index = 0
self.stack = []
self.sccs = []
for i in range(len(self.graph)):
if i not in self.indices:
self.visit(i)
self.sccs.reverse()
return self.sccs
def visit(self, vertex):
iter_stack = [(vertex, None, None, self.BEGIN)]
while iter_stack:
v, w, succ_index, state = iter_stack.pop()
if state == self.BEGIN:
self.current_index += 1
self.indices[v] = self.current_index
self.lowlinks[v] = self.current_index
self.stack_indices[v] = len(self.stack)
self.stack.append(v)
iter_stack.append((v, None, 0, self.CONTINUE))
elif state == self.CONTINUE:
successors = self.graph[v]
if succ_index == len(successors):
if self.lowlinks[v] == self.indices[v]:
stack_index = self.stack_indices[v]
scc = self.stack[stack_index:]
del self.stack[stack_index:]
for n in scc:
del self.stack_indices[n]
self.sccs.append(scc)
else:
w = successors[succ_index]
if w not in self.indices:
iter_stack.append((v, w, succ_index, self.RETURN))
iter_stack.append((w, None, None, self.BEGIN))
else:
if w in self.stack_indices:
self.lowlinks[v] = min(self.lowlinks[v],
self.indices[w])
iter_stack.append(
(v, None, succ_index + 1, self.CONTINUE))
elif state == self.RETURN:
self.lowlinks[v] = min(self.lowlinks[v], self.lowlinks[w])
iter_stack.append((v, None, succ_index + 1, self.CONTINUE))
| 4,835 | 38 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/f_expression.py
|
class FunctionalExpression:
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
if value != int(value):
raise ValueError("Fractional numbers are not supported")
self.value = int(value)
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.value == other.value)
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
self.hash = hash((self.__class__, self.symbol, self.args))
def __hash__(self):
return self.hash
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.symbol == other.symbol
and self.args == other.args)
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_assignments):
args = [var_mapping.get(arg, arg) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert self.symbol != "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
result = init_assignments.get(pne)
assert result is not None, "Could not find instantiation for PNE: %r" % (str(pne),)
return result
class FunctionAssignment:
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| 3,485 | 40.011765 | 91 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/pddl_types.py
|
# Renamed from types.py to avoid clash with stdlib module.
# In the future, use explicitly relative imports or absolute
# imports as a better solution.
import itertools
def _get_type_predicate_name(type_name):
# PDDL allows mixing types and predicates, but some PDDL files
# have name collisions between types and predicates. We want to
# support both the case where such name collisions occur and the
# case where types are used as predicates.
#
# We internally give types predicate names that cannot be confused
# with non-type predicates. When the input uses a PDDL type as a
# predicate, we automatically map it to this internal name.
return "type@%s" % type_name
class Type:
def __init__(self, name, basetype_name=None):
self.name = name
self.basetype_name = basetype_name
def __str__(self):
return self.name
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.basetype_name)
def get_predicate_name(self):
return _get_type_predicate_name(self.name)
class TypedObject:
def __init__(self, name, type_name):
self.name = name
self.type_name = type_name
def __hash__(self):
return hash((self.name, self.type_name))
def __eq__(self, other):
return self.name == other.name and self.type_name == other.type_name
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (self.name, self.type_name)
def __repr__(self):
return "<TypedObject %s: %s>" % (self.name, self.type_name)
def uniquify_name(self, type_map, renamings):
if self.name not in type_map:
type_map[self.name] = self.type_name
return self
for counter in itertools.count(1):
new_name = self.name + str(counter)
if new_name not in type_map:
renamings[self.name] = new_name
type_map[new_name] = self.type_name
return TypedObject(new_name, self.type_name)
def get_atom(self):
# TODO: Resolve cyclic import differently.
from . import conditions
predicate_name = _get_type_predicate_name(self.type_name)
return conditions.Atom(predicate_name, [self.name])
| 2,290 | 31.267606 | 76 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/effects.py
|
from . import conditions
def cartesian_product(*sequences):
# TODO: Also exists in tools.py outside the pddl package (defined slightly
# differently). Not good. Need proper import paths.
if not sequences:
yield ()
else:
for tup in cartesian_product(*sequences[1:]):
for item in sequences[0]:
yield (item,) + tup
class Effect:
def __init__(self, parameters, condition, literal):
self.parameters = parameters
self.condition = condition
self.literal = literal
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.condition == other.condition and
self.literal == other.literal)
def dump(self):
indent = " "
if self.parameters:
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
indent += " "
if self.condition != conditions.Truth():
print("%sif" % indent)
self.condition.dump(indent + " ")
print("%sthen" % indent)
indent += " "
print("%s%s" % (indent, self.literal))
def copy(self):
return Effect(self.parameters, self.condition, self.literal)
def uniquify_variables(self, type_map):
renamings = {}
self.parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
self.condition = self.condition.uniquify_variables(type_map, renamings)
self.literal = self.literal.rename_variables(renamings)
def instantiate(self, var_mapping, init_facts, fluent_facts,
objects_by_type, result):
if self.parameters:
var_mapping = var_mapping.copy() # Will modify this.
object_lists = [objects_by_type.get(par.type_name, [])
for par in self.parameters]
for object_tuple in cartesian_product(*object_lists):
for (par, obj) in zip(self.parameters, object_tuple):
var_mapping[par.name] = obj
self._instantiate(var_mapping, init_facts, fluent_facts, result)
else:
self._instantiate(var_mapping, init_facts, fluent_facts, result)
def _instantiate(self, var_mapping, init_facts, fluent_facts, result):
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return
effects = []
self.literal.instantiate(var_mapping, init_facts, fluent_facts, effects)
assert len(effects) <= 1
if effects:
result.append((condition, effects[0]))
def relaxed(self):
if self.literal.negated:
return None
else:
return Effect(self.parameters, self.condition.relaxed(), self.literal)
def simplified(self):
return Effect(self.parameters, self.condition.simplified(), self.literal)
class ConditionalEffect:
def __init__(self, condition, effect):
if isinstance(effect, ConditionalEffect):
self.condition = conditions.Conjunction([condition, effect.condition])
self.effect = effect.effect
else:
self.condition = condition
self.effect = effect
def dump(self, indent=" "):
print("%sif" % (indent))
self.condition.dump(indent + " ")
print("%sthen" % (indent))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect)
new_effects.append(ConditionalEffect(self.condition, effect))
return ConjunctiveEffect(new_effects)
elif isinstance(norm_effect, UniversalEffect):
child = norm_effect.effect
cond_effect = ConditionalEffect(self.condition, child)
return UniversalEffect(norm_effect.parameters, cond_effect)
else:
return ConditionalEffect(self.condition, norm_effect)
def extract_cost(self):
return None, self
class UniversalEffect:
def __init__(self, parameters, effect):
if isinstance(effect, UniversalEffect):
self.parameters = parameters + effect.parameters
self.effect = effect.effect
else:
self.parameters = parameters
self.effect = effect
def dump(self, indent=" "):
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect)\
or isinstance(effect, UniversalEffect)
new_effects.append(UniversalEffect(self.parameters, effect))
return ConjunctiveEffect(new_effects)
else:
return UniversalEffect(self.parameters, norm_effect)
def extract_cost(self):
return None, self
class ConjunctiveEffect:
def __init__(self, effects):
flattened_effects = []
for effect in effects:
if isinstance(effect, ConjunctiveEffect):
flattened_effects += effect.effects
else:
flattened_effects.append(effect)
self.effects = flattened_effects
def dump(self, indent=" "):
print("%sand" % (indent))
for eff in self.effects:
eff.dump(indent + " ")
def normalize(self):
new_effects = []
for effect in self.effects:
new_effects.append(effect.normalize())
return ConjunctiveEffect(new_effects)
def extract_cost(self):
new_effects = []
cost_effect = None
for effect in self.effects:
if isinstance(effect, CostEffect):
cost_effect = effect
else:
new_effects.append(effect)
return cost_effect, ConjunctiveEffect(new_effects)
class SimpleEffect:
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
return None, self
class CostEffect:
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
# This only happens if an action has no effect apart from the cost effect.
return self, None
| 7,027 | 38.483146 | 97 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/functions.py
|
class Function:
def __init__(self, name, arguments, type_name):
self.name = name
self.arguments = arguments
if type_name != "number":
raise SystemExit("Error: object fluents not supported\n" +
"(function %s has type %s)" % (name, type_name))
self.type_name = type_name
def __str__(self):
result = "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
if self.type_name:
result += ": %s" % self.type_name
return result
| 542 | 35.2 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/actions.py
|
import copy
from . import conditions
class Action:
def __init__(self, name, parameters, num_external_parameters,
precondition, effects, cost):
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
# num_external_parameters denotes how many of the parameters
# are "external", i.e., should be part of the grounded action
# name. Usually all parameters are external, but "invisible"
# parameters can be created when compiling away existential
# quantifiers in conditions.
self.num_external_parameters = num_external_parameters
self.precondition = precondition
self.effects = effects
self.cost = cost
self.uniquify_variables() # TODO: uniquify variables in cost?
def __repr__(self):
return "<Action %r at %#x>" % (self.name, id(self))
def dump(self):
print("%s(%s)" % (self.name, ", ".join(map(str, self.parameters))))
print("Precondition:")
self.precondition.dump()
print("Effects:")
for eff in self.effects:
eff.dump()
print("Cost:")
if(self.cost):
self.cost.dump()
else:
print(" None")
def uniquify_variables(self):
self.type_map = {par.name: par.type_name for par in self.parameters}
self.precondition = self.precondition.uniquify_variables(self.type_map)
for effect in self.effects:
effect.uniquify_variables(self.type_map)
def relaxed(self):
new_effects = []
for eff in self.effects:
relaxed_eff = eff.relaxed()
if relaxed_eff:
new_effects.append(relaxed_eff)
return Action(self.name, self.parameters, self.num_external_parameters,
self.precondition.relaxed().simplified(),
new_effects)
def untyped(self):
# We do not actually remove the types from the parameter lists,
# just additionally incorporate them into the conditions.
# Maybe not very nice.
result = copy.copy(self)
parameter_atoms = [par.to_untyped_strips() for par in self.parameters]
new_precondition = self.precondition.untyped()
result.precondition = conditions.Conjunction(parameter_atoms + [new_precondition])
result.effects = [eff.untyped() for eff in self.effects]
return result
def instantiate(self, var_mapping, init_facts, init_assignments,
fluent_facts, objects_by_type, metric):
"""Return a PropositionalAction which corresponds to the instantiation of
this action with the arguments in var_mapping. Only fluent parts of the
conditions (those in fluent_facts) are included. init_facts are evaluated
while instantiating.
Precondition and effect conditions must be normalized for this to work.
Returns None if var_mapping does not correspond to a valid instantiation
(because it has impossible preconditions or an empty effect list.)"""
arg_list = [var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s %s)" % (self.name, " ".join(arg_list))
precondition = []
try:
self.precondition.instantiate(var_mapping, init_facts,
fluent_facts, precondition)
except conditions.Impossible:
return None
effects = []
for eff in self.effects:
eff.instantiate(var_mapping, init_facts, fluent_facts,
objects_by_type, effects)
if effects:
if metric:
if self.cost is None:
cost = 0
else:
cost = int(self.cost.instantiate(
var_mapping, init_assignments).expression.value)
else:
cost = 1
return PropositionalAction(name, precondition, effects, cost)
else:
return None
class PropositionalAction:
def __init__(self, name, precondition, effects, cost):
self.name = name
self.precondition = precondition
self.add_effects = []
self.del_effects = []
for condition, effect in effects:
if not effect.negated:
self.add_effects.append((condition, effect))
# Warning: This is O(N^2), could be turned into O(N).
# But that might actually harm performance, since there are
# usually few effects.
# TODO: Measure this in critical domains, then use sets if acceptable.
for condition, effect in effects:
if effect.negated and (condition, effect.negate()) not in self.add_effects:
self.del_effects.append((condition, effect.negate()))
self.cost = cost
def __repr__(self):
return "<PropositionalAction %r at %#x>" % (self.name, id(self))
def dump(self):
print(self.name)
for fact in self.precondition:
print("PRE: %s" % fact)
for cond, fact in self.add_effects:
print("ADD: %s -> %s" % (", ".join(map(str, cond)), fact))
for cond, fact in self.del_effects:
print("DEL: %s -> %s" % (", ".join(map(str, cond)), fact))
print("cost:", self.cost)
| 5,426 | 39.804511 | 90 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/predicates.py
|
class Predicate:
def __init__(self, name, arguments):
self.name = name
self.arguments = arguments
def __str__(self):
return "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
def get_arity(self):
return len(self.arguments)
| 278 | 24.363636 | 74 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/__init__.py
|
from .pddl_types import Type
from .pddl_types import TypedObject
from .tasks import Task
from .tasks import Requirements
from .predicates import Predicate
from .functions import Function
from .actions import Action
from .actions import PropositionalAction
from .axioms import Axiom
from .axioms import PropositionalAxiom
from .conditions import Literal
from .conditions import Atom
from .conditions import NegatedAtom
from .conditions import Falsity
from .conditions import Truth
from .conditions import Conjunction
from .conditions import Disjunction
from .conditions import UniversalCondition
from .conditions import ExistentialCondition
from .effects import ConditionalEffect
from .effects import ConjunctiveEffect
from .effects import CostEffect
from .effects import Effect
from .effects import SimpleEffect
from .effects import UniversalEffect
from .f_expression import Assign
from .f_expression import Increase
from .f_expression import NumericConstant
from .f_expression import PrimitiveNumericExpression
| 1,020 | 25.868421 | 52 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/tasks.py
|
from . import axioms
from . import predicates
class Task:
def __init__(self, domain_name, task_name, requirements,
types, objects, predicates, functions, init, goal,
actions, axioms, use_metric):
self.domain_name = domain_name
self.task_name = task_name
self.requirements = requirements
self.types = types
self.objects = objects
self.predicates = predicates
self.functions = functions
self.init = init
self.goal = goal
self.actions = actions
self.axioms = axioms
self.axiom_counter = 0
self.use_min_cost_metric = use_metric
def add_axiom(self, parameters, condition):
name = "new-axiom@%d" % self.axiom_counter
self.axiom_counter += 1
axiom = axioms.Axiom(name, parameters, len(parameters), condition)
self.predicates.append(predicates.Predicate(name, parameters))
self.axioms.append(axiom)
return axiom
def dump(self):
print("Problem %s: %s [%s]" % (
self.domain_name, self.task_name, self.requirements))
print("Types:")
for type in self.types:
print(" %s" % type)
print("Objects:")
for obj in self.objects:
print(" %s" % obj)
print("Predicates:")
for pred in self.predicates:
print(" %s" % pred)
print("Functions:")
for func in self.functions:
print(" %s" % func)
print("Init:")
for fact in self.init:
print(" %s" % fact)
print("Goal:")
self.goal.dump()
print("Actions:")
for action in self.actions:
action.dump()
if self.axioms:
print("Axioms:")
for axiom in self.axioms:
axiom.dump()
class Requirements:
def __init__(self, requirements):
self.requirements = requirements
for req in requirements:
assert req in (
":strips", ":adl", ":typing", ":negation", ":equality",
":negative-preconditions", ":disjunctive-preconditions",
":existential-preconditions", ":universal-preconditions",
":quantified-preconditions", ":conditional-effects",
":derived-predicates", ":action-costs"), req
def __str__(self):
return ", ".join(self.requirements)
| 2,414 | 33.014085 | 74 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/axioms.py
|
from . import conditions
class Axiom:
def __init__(self, name, parameters, num_external_parameters, condition):
# For an explanation of num_external_parameters, see the
# related Action class. Note that num_external_parameters
# always equals the arity of the derived predicate.
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
self.num_external_parameters = num_external_parameters
self.condition = condition
self.uniquify_variables()
def dump(self):
args = map(str, self.parameters[:self.num_external_parameters])
print("Axiom %s(%s)" % (self.name, ", ".join(args)))
self.condition.dump()
def uniquify_variables(self):
self.type_map = {par.name: par.type_name for par in self.parameters}
self.condition = self.condition.uniquify_variables(self.type_map)
def instantiate(self, var_mapping, init_facts, fluent_facts):
# The comments for Action.instantiate apply accordingly.
arg_list = [self.name] + [
var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s)" % " ".join(arg_list)
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return None
effect_args = [var_mapping.get(arg.name, arg.name)
for arg in self.parameters[:self.num_external_parameters]]
effect = conditions.Atom(self.name, effect_args)
return PropositionalAxiom(name, condition, effect)
class PropositionalAxiom:
def __init__(self, name, condition, effect):
self.name = name
self.condition = condition
self.effect = effect
def clone(self):
return PropositionalAxiom(self.name, list(self.condition), self.effect)
def dump(self):
if self.effect.negated:
print("not", end=' ')
print(self.name)
for fact in self.condition:
print("PRE: %s" % fact)
print("EFF: %s" % self.effect)
@property
def key(self):
return (self.name, self.condition, self.effect)
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __eq__(self, other):
return self.key == other.key
def __repr__(self):
return '<PropositionalAxiom %s %s -> %s>' % (
self.name, self.condition, self.effect)
| 2,609 | 32.896104 | 88 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl/conditions.py
|
# Conditions (of any type) are immutable, because they need to
# be hashed occasionally. Immutability also allows more efficient comparison
# based on a precomputed hash value.
#
# Careful: Most other classes (e.g. Effects, Axioms, Actions) are not!
class Condition:
def __init__(self, parts):
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parts))
def __hash__(self):
return self.hash
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.hash < other.hash
def __le__(self, other):
return self.hash <= other.hash
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def _postorder_visit(self, method_name, *args):
part_results = [part._postorder_visit(method_name, *args)
for part in self.parts]
method = getattr(self, method_name, self._propagate)
return method(part_results, *args)
def _propagate(self, parts, *args):
return self.change_parts(parts)
def simplified(self):
return self._postorder_visit("_simplified")
def relaxed(self):
return self._postorder_visit("_relaxed")
def untyped(self):
return self._postorder_visit("_untyped")
def uniquify_variables(self, type_map, renamings={}):
# Cannot used _postorder_visit because this requires preorder
# for quantified effects.
if not self.parts:
return self
else:
return self.__class__([part.uniquify_variables(type_map, renamings)
for part in self.parts])
def to_untyped_strips(self):
raise ValueError("Not a STRIPS condition: %s" % self.__class__.__name__)
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise ValueError("Cannot instantiate condition: not normalized")
def free_variables(self):
result = set()
for part in self.parts:
result |= part.free_variables()
return result
def has_disjunction(self):
for part in self.parts:
if part.has_disjunction():
return True
return False
def has_existential_part(self):
for part in self.parts:
if part.has_existential_part():
return True
return False
def has_universal_part(self):
for part in self.parts:
if part.has_universal_part():
return True
return False
class ConstantCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = ()
def __init__(self):
self.hash = hash(self.__class__)
def change_parts(self, parts):
return self
def __eq__(self, other):
return self.__class__ is other.__class__
class Impossible(Exception):
pass
class Falsity(ConstantCondition):
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise Impossible()
def negate(self):
return Truth()
class Truth(ConstantCondition):
def to_untyped_strips(self):
return []
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
pass
def negate(self):
return Falsity()
class JunctorCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parts == other.parts)
def change_parts(self, parts):
return self.__class__(parts)
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif not isinstance(part, Truth):
result_parts.append(part)
if not result_parts:
return Truth()
if len(result_parts) == 1:
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts])
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif not isinstance(part, Falsity):
result_parts.append(part)
if not result_parts:
return Falsity()
if len(result_parts) == 1:
return result_parts[0]
return Disjunction(result_parts)
def negate(self):
return Conjunction([p.negate() for p in self.parts])
def has_disjunction(self):
return True
class QuantifiedCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __init__(self, parameters, parts):
self.parameters = tuple(parameters)
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parameters, self.parts))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.parts == other.parts)
def _dump(self, indent=" "):
arglist = ", ".join(map(str, self.parameters))
return "%s %s" % (self.__class__.__name__, arglist)
def _simplified(self, parts):
if isinstance(parts[0], ConstantCondition):
return parts[0]
else:
return self._propagate(parts)
def uniquify_variables(self, type_map, renamings={}):
renamings = dict(renamings) # Create a copy.
new_parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
new_parts = (self.parts[0].uniquify_variables(type_map, renamings),)
return self.__class__(new_parameters, new_parts)
def free_variables(self):
result = Condition.free_variables(self)
for par in self.parameters:
result.discard(par.name)
return result
def change_parts(self, parts):
return self.__class__(self.parameters, parts)
class UniversalCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom().negate() for par in self.parameters]
return UniversalCondition(self.parameters,
[Disjunction(type_literals + parts)])
def negate(self):
return ExistentialCondition(self.parameters, [p.negate() for p in self.parts])
def has_universal_part(self):
return True
class ExistentialCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom() for par in self.parameters]
return ExistentialCondition(self.parameters,
[Conjunction(type_literals + parts)])
def negate(self):
return UniversalCondition(self.parameters, [p.negate() for p in self.parts])
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
self.parts[0].instantiate(var_mapping, init_facts, fluent_facts, result)
def has_existential_part(self):
return True
class Literal(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = []
__slots__ = ["predicate", "args", "hash"]
def __init__(self, predicate, args):
self.predicate = predicate
self.args = tuple(args)
self.hash = hash((self.__class__, self.predicate, self.args))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.predicate == other.predicate and
self.args == other.args)
def __ne__(self, other):
return not self == other
@property
def key(self):
return str(self.predicate), self.args
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __str__(self):
return "%s %s(%s)" % (self.__class__.__name__, self.predicate,
", ".join(map(str, self.args)))
def __repr__(self):
return '<%s>' % self
def _dump(self):
return str(self)
def change_parts(self, parts):
return self
def uniquify_variables(self, type_map, renamings={}):
return self.rename_variables(renamings)
def rename_variables(self, renamings):
new_args = tuple(renamings.get(arg, arg) for arg in self.args)
return self.__class__(self.predicate, new_args)
def replace_argument(self, position, new_arg):
new_args = list(self.args)
new_args[position] = new_arg
return self.__class__(self.predicate, new_args)
def free_variables(self):
return {arg for arg in self.args if arg[0] == "?"}
class Atom(Literal):
negated = False
def to_untyped_strips(self):
return [self]
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(atom)
elif atom not in init_facts:
raise Impossible()
def negate(self):
return NegatedAtom(self.predicate, self.args)
def positive(self):
return self
class NegatedAtom(Literal):
negated = True
def _relaxed(self, parts):
return Truth()
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(NegatedAtom(self.predicate, args))
elif atom in init_facts:
raise Impossible()
def negate(self):
return Atom(self.predicate, self.args)
positive = negate
| 11,103 | 36.897611 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/tests/test_normalization.py
|
from io import StringIO
import pddl
from pddl_to_prolog import Rule, PrologProgram
def test_normalization():
prog = PrologProgram()
prog.add_fact(pddl.Atom("at", ["foo", "bar"]))
prog.add_fact(pddl.Atom("truck", ["bollerwagen"]))
prog.add_fact(pddl.Atom("truck", ["segway"]))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"])], pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?X"])))
prog.add_rule(Rule([pddl.Atom("p", ["?Y", "?Z", "?Y", "?Z"])],
pddl.Atom("q", ["?Y", "?Y"])))
prog.add_rule(Rule([], pddl.Atom("foo", [])))
prog.add_rule(Rule([], pddl.Atom("bar", ["X"])))
prog.normalize()
output = StringIO()
prog.dump(file=output)
sorted_output = "\n".join(sorted(output.getvalue().splitlines()))
assert sorted_output == """\
Atom @object(bar).
Atom @object(bollerwagen).
Atom @object(foo).
Atom @object(segway).
Atom at(foo, bar).
Atom bar(X).
Atom foo().
Atom truck(bollerwagen).
Atom truck(segway).
none Atom at(?X, ?X@0) :- Atom truck(?X), Atom location(?Y), Atom =(?X, ?X@0).
none Atom at(?X, ?Y) :- Atom truck(?X), Atom @object(?Y).
none Atom at(?X, ?Y) :- Atom truck(X), Atom location(?Y), Atom @object(?X).
none Atom q(?Y, ?Y@0) :- Atom p(?Y, ?Z, ?Y, ?Z), Atom =(?Y, ?Y@0), Atom =(?Y, ?Y@1), Atom =(?Z, ?Z@2)."""
| 1,535 | 39.421053 | 105 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/tests/test_scripts.py
|
import os.path
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
TRANSLATE_DIR = os.path.dirname(DIR)
REPO = os.path.abspath(os.path.join(DIR, "..", "..", ".."))
BENCHMARKS = os.path.join(REPO, "misc", "tests", "benchmarks")
DOMAIN = os.path.join(BENCHMARKS, "gripper", "domain.pddl")
PROBLEM = os.path.join(BENCHMARKS, "gripper", "prob01.pddl")
SCRIPTS = [
"build_model.py",
"graph.py",
"instantiate.py",
"invariant_finder.py",
"normalize.py",
"pddl_to_prolog.py",
"translate.py",
]
def test_scripts():
for script in SCRIPTS:
script = os.path.join(TRANSLATE_DIR, script)
folder, filename = os.path.split(script)
assert subprocess.check_call([sys.executable, filename, DOMAIN, PROBLEM], cwd=folder) == 0
| 790 | 29.423077 | 98 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl_parser/lisp_parser.py
|
__all__ = ["ParseError", "parse_nested_list"]
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
# Basic functions for parsing PDDL (Lisp) files.
def parse_nested_list(input_file):
tokens = tokenize(input_file)
next_token = next(tokens)
if next_token != "(":
raise ParseError("Expected '(', got %s." % next_token)
result = list(parse_list_aux(tokens))
for tok in tokens: # Check that generator is exhausted.
raise ParseError("Unexpected token: %s." % tok)
return result
def tokenize(input):
for line in input:
line = line.split(";", 1)[0] # Strip comments.
try:
line.encode("ascii")
except UnicodeEncodeError:
raise ParseError("Non-ASCII character outside comment: %s" %
line[0:-1])
line = line.replace("(", " ( ").replace(")", " ) ").replace("?", " ?")
for token in line.split():
yield token.lower()
def parse_list_aux(tokenstream):
# Leading "(" has already been swallowed.
while True:
try:
token = next(tokenstream)
except StopIteration:
raise ParseError("Missing ')'")
if token == ")":
return
elif token == "(":
yield list(parse_list_aux(tokenstream))
else:
yield token
| 1,422 | 30.622222 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl_parser/pddl_file.py
|
import options
from . import lisp_parser
from . import parsing_functions
file_open = open
def parse_pddl_file(type, filename):
try:
# The builtin open function is shadowed by this module's open function.
# We use the Latin-1 encoding (which allows a superset of ASCII, of the
# Latin-* encodings and of UTF-8) to allow special characters in
# comments. In all other parts, we later validate that only ASCII is
# used.
return lisp_parser.parse_nested_list(file_open(filename,
encoding='ISO-8859-1'))
except OSError as e:
raise SystemExit("Error: Could not read file: %s\nReason: %s." %
(e.filename, e))
except lisp_parser.ParseError as e:
raise SystemExit("Error: Could not parse %s file: %s\nReason: %s." %
(type, filename, e))
def open(domain_filename=None, task_filename=None):
task_filename = task_filename or options.task
domain_filename = domain_filename or options.domain
domain_pddl = parse_pddl_file("domain", domain_filename)
task_pddl = parse_pddl_file("task", task_filename)
return parsing_functions.parse_task(domain_pddl, task_pddl)
| 1,255 | 35.941176 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl_parser/__init__.py
|
from .pddl_file import open
| 28 | 13.5 | 27 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl_parser/parsing_functions.py
|
import sys
import graph
import pddl
def parse_typed_list(alist, only_variables=False,
constructor=pddl.TypedObject,
default_type="object"):
result = []
while alist:
try:
separator_position = alist.index("-")
except ValueError:
items = alist
_type = default_type
alist = []
else:
items = alist[:separator_position]
_type = alist[separator_position + 1]
alist = alist[separator_position + 2:]
for item in items:
assert not only_variables or item.startswith("?"), \
"Expected item to be a variable: %s in (%s)" % (
item, " ".join(items))
entry = constructor(item, _type)
result.append(entry)
return result
def set_supertypes(type_list):
# TODO: This is a two-stage construction, which is perhaps
# not a great idea. Might need more thought in the future.
type_name_to_type = {}
child_types = []
for type in type_list:
type.supertype_names = []
type_name_to_type[type.name] = type
if type.basetype_name:
child_types.append((type.name, type.basetype_name))
for (desc_name, anc_name) in graph.transitive_closure(child_types):
type_name_to_type[desc_name].supertype_names.append(anc_name)
def parse_predicate(alist):
name = alist[0]
arguments = parse_typed_list(alist[1:], only_variables=True)
return pddl.Predicate(name, arguments)
def parse_function(alist, type_name):
name = alist[0]
arguments = parse_typed_list(alist[1:])
return pddl.Function(name, arguments, type_name)
def parse_condition(alist, type_dict, predicate_dict):
condition = parse_condition_aux(alist, False, type_dict, predicate_dict)
return condition.uniquify_variables({}).simplified()
def parse_condition_aux(alist, negated, type_dict, predicate_dict):
"""Parse a PDDL condition. The condition is translated into NNF on the fly."""
tag = alist[0]
if tag in ("and", "or", "not", "imply"):
args = alist[1:]
if tag == "imply":
assert len(args) == 2
if tag == "not":
assert len(args) == 1
return parse_condition_aux(
args[0], not negated, type_dict, predicate_dict)
elif tag in ("forall", "exists"):
parameters = parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
else:
return parse_literal(alist, type_dict, predicate_dict, negated=negated)
if tag == "imply":
parts = [parse_condition_aux(
args[0], not negated, type_dict, predicate_dict),
parse_condition_aux(
args[1], negated, type_dict, predicate_dict)]
tag = "or"
else:
parts = [parse_condition_aux(part, negated, type_dict, predicate_dict)
for part in args]
if tag == "and" and not negated or tag == "or" and negated:
return pddl.Conjunction(parts)
elif tag == "or" and not negated or tag == "and" and negated:
return pddl.Disjunction(parts)
elif tag == "forall" and not negated or tag == "exists" and negated:
return pddl.UniversalCondition(parameters, parts)
elif tag == "exists" and not negated or tag == "forall" and negated:
return pddl.ExistentialCondition(parameters, parts)
def parse_literal(alist, type_dict, predicate_dict, negated=False):
if alist[0] == "not":
assert len(alist) == 2
alist = alist[1]
negated = not negated
pred_id, arity = _get_predicate_id_and_arity(
alist[0], type_dict, predicate_dict)
if arity != len(alist) - 1:
raise SystemExit("predicate used with wrong arity: (%s)"
% " ".join(alist))
if negated:
return pddl.NegatedAtom(pred_id, alist[1:])
else:
return pddl.Atom(pred_id, alist[1:])
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = False
def _get_predicate_id_and_arity(text, type_dict, predicate_dict):
global SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH
the_type = type_dict.get(text)
the_predicate = predicate_dict.get(text)
if the_type is None and the_predicate is None:
raise SystemExit("Undeclared predicate: %s" % text)
elif the_predicate is not None:
if the_type is not None and not SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH:
msg = ("Warning: name clash between type and predicate %r.\n"
"Interpreting as predicate in conditions.") % text
print(msg, file=sys.stderr)
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = True
return the_predicate.name, the_predicate.get_arity()
else:
assert the_type is not None
return the_type.get_predicate_name(), 1
def parse_effects(alist, result, type_dict, predicate_dict):
"""Parse a PDDL effect (any combination of simple, conjunctive, conditional, and universal)."""
tmp_effect = parse_effect(alist, type_dict, predicate_dict)
normalized = tmp_effect.normalize()
cost_eff, rest_effect = normalized.extract_cost()
add_effect(rest_effect, result)
if cost_eff:
return cost_eff.effect
else:
return None
def add_effect(tmp_effect, result):
"""tmp_effect has the following structure:
[ConjunctiveEffect] [UniversalEffect] [ConditionalEffect] SimpleEffect."""
if isinstance(tmp_effect, pddl.ConjunctiveEffect):
for effect in tmp_effect.effects:
add_effect(effect, result)
return
else:
parameters = []
condition = pddl.Truth()
if isinstance(tmp_effect, pddl.UniversalEffect):
parameters = tmp_effect.parameters
if isinstance(tmp_effect.effect, pddl.ConditionalEffect):
condition = tmp_effect.effect.condition
assert isinstance(tmp_effect.effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect.effect
else:
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
elif isinstance(tmp_effect, pddl.ConditionalEffect):
condition = tmp_effect.condition
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
else:
assert isinstance(tmp_effect, pddl.SimpleEffect)
effect = tmp_effect.effect
assert isinstance(effect, pddl.Literal)
# Check for contradictory effects
condition = condition.simplified()
new_effect = pddl.Effect(parameters, condition, effect)
contradiction = pddl.Effect(parameters, condition, effect.negate())
if contradiction not in result:
result.append(new_effect)
else:
# We use add-after-delete semantics, keep positive effect
if isinstance(contradiction.literal, pddl.NegatedAtom):
result.remove(contradiction)
result.append(new_effect)
def parse_effect(alist, type_dict, predicate_dict):
tag = alist[0]
if tag == "and":
return pddl.ConjunctiveEffect(
[parse_effect(eff, type_dict, predicate_dict) for eff in alist[1:]])
elif tag == "forall":
assert len(alist) == 3
parameters = parse_typed_list(alist[1])
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.UniversalEffect(parameters, effect)
elif tag == "when":
assert len(alist) == 3
condition = parse_condition(
alist[1], type_dict, predicate_dict)
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.ConditionalEffect(condition, effect)
elif tag == "increase":
assert len(alist) == 3
assert alist[1] == ['total-cost']
assignment = parse_assignment(alist)
return pddl.CostEffect(assignment)
else:
# We pass in {} instead of type_dict here because types must
# be static predicates, so cannot be the target of an effect.
return pddl.SimpleEffect(parse_literal(alist, {}, predicate_dict))
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return pddl.PrimitiveNumericExpression(functionsymbol, exp[1:])
elif exp.replace(".", "").isdigit():
return pddl.NumericConstant(float(exp))
elif exp[0] == "-":
raise ValueError("Negative numbers are not supported")
else:
return pddl.PrimitiveNumericExpression(exp, [])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return pddl.Assign(head, exp)
elif op == "increase":
return pddl.Increase(head, exp)
else:
assert False, "Assignment operator not supported."
def parse_action(alist, type_dict, predicate_dict):
iterator = iter(alist)
action_tag = next(iterator)
assert action_tag == ":action"
name = next(iterator)
parameters_tag_opt = next(iterator)
if parameters_tag_opt == ":parameters":
parameters = parse_typed_list(next(iterator),
only_variables=True)
precondition_tag_opt = next(iterator)
else:
parameters = []
precondition_tag_opt = parameters_tag_opt
if precondition_tag_opt == ":precondition":
precondition_list = next(iterator)
if not precondition_list:
# Note that :precondition () is allowed in PDDL.
precondition = pddl.Conjunction([])
else:
precondition = parse_condition(
precondition_list, type_dict, predicate_dict)
effect_tag = next(iterator)
else:
precondition = pddl.Conjunction([])
effect_tag = precondition_tag_opt
assert effect_tag == ":effect"
effect_list = next(iterator)
eff = []
if effect_list:
try:
cost = parse_effects(
effect_list, eff, type_dict, predicate_dict)
except ValueError as e:
raise SystemExit("Error in Action %s\nReason: %s." % (name, e))
for rest in iterator:
assert False, rest
if eff:
return pddl.Action(name, parameters, len(parameters),
precondition, eff, cost)
else:
return None
def parse_axiom(alist, type_dict, predicate_dict):
assert len(alist) == 3
assert alist[0] == ":derived"
predicate = parse_predicate(alist[1])
condition = parse_condition(
alist[2], type_dict, predicate_dict)
return pddl.Axiom(predicate.name, predicate.arguments,
len(predicate.arguments), condition)
def parse_task(domain_pddl, task_pddl):
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= parse_domain_pddl(domain_pddl)
task_name, task_domain_name, task_requirements, objects, init, goal, use_metric = parse_task_pddl(task_pddl, type_dict, predicate_dict)
assert domain_name == task_domain_name
requirements = pddl.Requirements(sorted(set(
domain_requirements.requirements +
task_requirements.requirements)))
objects = constants + objects
check_for_duplicates(
[o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
init += [pddl.Atom("=", (obj.name, obj.name)) for obj in objects]
return pddl.Task(
domain_name, task_name, requirements, types, objects,
predicates, functions, init, goal, actions, axioms, use_metric)
def parse_domain_pddl(domain_pddl):
iterator = iter(domain_pddl)
define_tag = next(iterator)
assert define_tag == "define"
domain_line = next(iterator)
assert domain_line[0] == "domain" and len(domain_line) == 2
yield domain_line[1]
## We allow an arbitrary order of the requirement, types, constants,
## predicates and functions specification. The PDDL BNF is more strict on
## this, so we print a warning if it is violated.
requirements = pddl.Requirements([":strips"])
the_types = [pddl.Type("object")]
constants, the_predicates, the_functions = [], [], []
correct_order = [":requirements", ":types", ":constants", ":predicates",
":functions"]
seen_fields = []
first_action = None
for opt in iterator:
field = opt[0]
if field not in correct_order:
first_action = opt
break
if field in seen_fields:
raise SystemExit("Error in domain specification\n" +
"Reason: two '%s' specifications." % field)
if (seen_fields and
correct_order.index(seen_fields[-1]) > correct_order.index(field)):
msg = "\nWarning: %s specification not allowed here (cf. PDDL BNF)" % field
print(msg, file=sys.stderr)
seen_fields.append(field)
if field == ":requirements":
requirements = pddl.Requirements(opt[1:])
elif field == ":types":
the_types.extend(parse_typed_list(
opt[1:], constructor=pddl.Type))
elif field == ":constants":
constants = parse_typed_list(opt[1:])
elif field == ":predicates":
the_predicates = [parse_predicate(entry)
for entry in opt[1:]]
the_predicates += [pddl.Predicate("=", [
pddl.TypedObject("?x", "object"),
pddl.TypedObject("?y", "object")])]
elif field == ":functions":
the_functions = parse_typed_list(
opt[1:],
constructor=parse_function,
default_type="number")
set_supertypes(the_types)
yield requirements
yield the_types
type_dict = {type.name: type for type in the_types}
yield type_dict
yield constants
yield the_predicates
predicate_dict = {pred.name: pred for pred in the_predicates}
yield predicate_dict
yield the_functions
entries = []
if first_action is not None:
entries.append(first_action)
entries.extend(iterator)
the_axioms = []
the_actions = []
for entry in entries:
if entry[0] == ":derived":
axiom = parse_axiom(entry, type_dict, predicate_dict)
the_axioms.append(axiom)
else:
action = parse_action(entry, type_dict, predicate_dict)
if action is not None:
the_actions.append(action)
yield the_actions
yield the_axioms
def parse_task_pddl(task_pddl, type_dict, predicate_dict):
iterator = iter(task_pddl)
define_tag = next(iterator)
assert define_tag == "define"
problem_line = next(iterator)
assert problem_line[0] == "problem" and len(problem_line) == 2
yield problem_line[1]
domain_line = next(iterator)
assert domain_line[0] == ":domain" and len(domain_line) == 2
yield domain_line[1]
requirements_opt = next(iterator)
if requirements_opt[0] == ":requirements":
requirements = requirements_opt[1:]
objects_opt = next(iterator)
else:
requirements = []
objects_opt = requirements_opt
yield pddl.Requirements(requirements)
if objects_opt[0] == ":objects":
yield parse_typed_list(objects_opt[1:])
init = next(iterator)
else:
yield []
init = objects_opt
assert init[0] == ":init"
initial = []
initial_true = set()
initial_false = set()
initial_assignments = dict()
for fact in init[1:]:
if fact[0] == "=":
try:
assignment = parse_assignment(fact)
except ValueError as e:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s." % e)
if not isinstance(assignment.expression,
pddl.NumericConstant):
raise SystemExit("Illegal assignment in initial state " +
"specification:\n%s" % assignment)
if assignment.fluent in initial_assignments:
prev = initial_assignments[assignment.fluent]
if assignment.expression == prev.expression:
print("Warning: %s is specified twice" % assignment,
"in initial state specification")
else:
raise SystemExit("Error in initial state specification\n" +
"Reason: conflicting assignment for " +
"%s." % assignment.fluent)
else:
initial_assignments[assignment.fluent] = assignment
initial.append(assignment)
elif fact[0] == "not":
atom = pddl.Atom(fact[1][0], fact[1][1:])
check_atom_consistency(atom, initial_false, initial_true, False)
initial_false.add(atom)
else:
atom = pddl.Atom(fact[0], fact[1:])
check_atom_consistency(atom, initial_true, initial_false)
initial_true.add(atom)
initial.extend(initial_true)
yield initial
goal = next(iterator)
assert goal[0] == ":goal" and len(goal) == 2
yield parse_condition(goal[1], type_dict, predicate_dict)
use_metric = False
for entry in iterator:
if entry[0] == ":metric":
if entry[1] == "minimize" and entry[2][0] == "total-cost":
use_metric = True
else:
assert False, "Unknown metric."
yield use_metric
for entry in iterator:
assert False, entry
def check_atom_consistency(atom, same_truth_value, other_truth_value, atom_is_true=True):
if atom in other_truth_value:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s is true and false." % atom)
if atom in same_truth_value:
if not atom_is_true:
atom = atom.negate()
print("Warning: %s is specified twice in initial state specification" % atom)
def check_for_duplicates(elements, errmsg, finalmsg):
seen = set()
errors = []
for element in elements:
if element in seen:
errors.append(errmsg % element)
else:
seen.add(element)
if errors:
raise SystemExit("\n".join(errors) + "\n" + finalmsg)
| 18,654 | 36.38477 | 139 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/main.py
|
import logging
import os
import sys
from . import aliases
from . import arguments
from . import cleanup
from . import run_components
from . import __version__
def main():
args = arguments.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format="%(levelname)-8s %(message)s",
stream=sys.stdout)
logging.debug("processed args: %s" % args)
if args.version:
print(__version__)
sys.exit()
if args.show_aliases:
aliases.show_aliases()
sys.exit()
if args.cleanup:
cleanup.cleanup_temporary_files(args)
sys.exit()
exitcode = None
for component in args.components:
if component == "translate":
(exitcode, continue_execution) = run_components.run_translate(args)
elif component == "search":
(exitcode, continue_execution) = run_components.run_search(args)
if not args.keep_sas_file:
print("Remove intermediate file {}".format(args.sas_file))
os.remove(args.sas_file)
elif component == "validate":
(exitcode, continue_execution) = run_components.run_validate(args)
else:
assert False, "Error: unhandled component: {}".format(component)
print("{component} exit code: {exitcode}".format(**locals()))
print()
if not continue_execution:
print("Driver aborting after {}".format(component))
break
# Exit with the exit code of the last component that ran successfully.
# This means for example that if no plan was found, validate is not run,
# and therefore the return code is that of the search.
sys.exit(exitcode)
if __name__ == "__main__":
main()
| 1,793 | 30.473684 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/tests.py
|
"""
Test module for Fast Downward driver script. Run with
py.test driver/tests.py
"""
import os
import subprocess
import pytest
from .aliases import ALIASES, PORTFOLIOS
from .arguments import EXAMPLES
from . import limits
from . import returncodes
from .util import REPO_ROOT_DIR, find_domain_filename
def translate():
"""Create translated task."""
cmd = ["./fast-downward.py", "--translate",
"misc/tests/benchmarks/gripper/prob01.pddl"]
subprocess.check_call(cmd, cwd=REPO_ROOT_DIR)
def cleanup():
subprocess.check_call(["./fast-downward.py", "--cleanup"],
cwd=REPO_ROOT_DIR)
def run_driver(cmd):
cleanup()
translate()
return subprocess.check_call(cmd, cwd=REPO_ROOT_DIR)
def test_commandline_args():
for description, cmd in EXAMPLES:
cmd = [x.strip('"') for x in cmd]
run_driver(cmd)
def test_aliases():
for alias, config in ALIASES.items():
cmd = ["./fast-downward.py", "--alias", alias, "output.sas"]
run_driver(cmd)
def test_show_aliases():
run_driver(["./fast-downward.py", "--show-aliases"])
def test_portfolios():
for name, portfolio in PORTFOLIOS.items():
cmd = ["./fast-downward.py", "--portfolio", portfolio,
"--search-time-limit", "30m", "output.sas"]
run_driver(cmd)
def test_hard_time_limit():
def preexec_fn():
limits.set_time_limit(10)
cmd = [
"./fast-downward.py", "--translate", "--translate-time-limit",
"10s", "misc/tests/benchmarks/gripper/prob01.pddl"]
subprocess.check_call(cmd, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
cmd = [
"./fast-downward.py", "--translate", "--translate-time-limit",
"20s", "misc/tests/benchmarks/gripper/prob01.pddl"]
with pytest.raises(subprocess.CalledProcessError) as exception_info:
subprocess.check_call(cmd, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
assert exception_info.value.returncode == returncodes.DRIVER_INPUT_ERROR
def test_automatic_domain_file_name_computation():
benchmarks_dir = os.path.join(REPO_ROOT_DIR, "benchmarks")
for dirpath, dirnames, filenames in os.walk(benchmarks_dir):
for filename in filenames:
if "domain" not in filename:
assert find_domain_filename(os.path.join(dirpath, filename))
| 2,354 | 27.373494 | 76 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/arguments.py
|
import argparse
import os.path
import re
import sys
from . import aliases
from . import returncodes
from . import util
DESCRIPTION = """Fast Downward driver script.
Input files can be either a PDDL problem file (with an optional PDDL domain
file), in which case the driver runs both planner components (translate and
search), or a SAS+ translator output file, in which case the driver runs just
the search component. You can override this default behaviour by selecting
components manually with the flags below. The first component to be run
determines the required input files:
--translate: [DOMAIN] PROBLEM
--search: TRANSLATE_OUTPUT
Arguments given before the specified input files are interpreted by the driver
script ("driver options"). Arguments given after the input files are passed on
to the planner components ("component options"). In exceptional cases where no
input files are needed, use "--" to separate driver from component options. In
even more exceptional cases where input files begin with "--", use "--" to
separate driver options from input files and also to separate input files from
component options.
By default, component options are passed to the search component. Use
"--translate-options" or "--search-options" within the component options to
override the default for the following options, until overridden again. (See
below for examples.)"""
LIMITS_HELP = """You can limit the time or memory for individual components
or the whole planner. The effective limit for each component is the minimum
of the component, overall, external soft, and external hard limits.
Limits are given in seconds or MiB. You can change the unit by using the
suffixes s, m, h and K, M, G.
By default, all limits are inactive. Only external limits (e.g. set with
ulimit) are respected.
Portfolios require that a time limit is in effect. Portfolio configurations
that exceed their time or memory limit are aborted, and the next
configuration is run."""
EXAMPLE_PORTFOLIO = os.path.relpath(
aliases.PORTFOLIOS["seq-opt-fdss-1"], start=util.REPO_ROOT_DIR)
EXAMPLES = [
("Translate and find a plan with A* + LM-Cut:",
["./fast-downward.py", "misc/tests/benchmarks/gripper/prob01.pddl",
"--search", '"astar(lmcut())"']),
("Translate and run no search:",
["./fast-downward.py", "--translate",
"misc/tests/benchmarks/gripper/prob01.pddl"]),
("Run predefined configuration (LAMA-2011) on translated task:",
["./fast-downward.py", "--alias", "seq-sat-lama-2011", "output.sas"]),
("Run a portfolio on a translated task:",
["./fast-downward.py", "--portfolio", EXAMPLE_PORTFOLIO,
"--search-time-limit", "30m", "output.sas"]),
("Run the search component in debug mode (with assertions enabled) "
"and validate the resulting plan:",
["./fast-downward.py", "--debug", "output.sas", "--search", '"astar(ipdb())"']),
("Pass options to translator and search components:",
["./fast-downward.py", "misc/tests/benchmarks/gripper/prob01.pddl",
"--translate-options", "--full-encoding",
"--search-options", "--search", '"astar(lmcut())"']),
("Find a plan and validate it:",
["./fast-downward.py", "--validate",
"misc/tests/benchmarks/gripper/prob01.pddl",
"--search", '"astar(cegar())"']),
]
EPILOG = """component options:
--translate-options OPTION1 OPTION2 ...
--search-options OPTION1 OPTION2 ...
pass OPTION1 OPTION2 ... to specified planner component
(default: pass component options to search)
Examples:
%s
""" % "\n\n".join("%s\n%s" % (desc, " ".join(cmd)) for desc, cmd in EXAMPLES)
COMPONENTS_PLUS_OVERALL = ["translate", "search", "validate", "overall"]
DEFAULT_SAS_FILE = "output.sas"
"""
Function to emulate the behavior of ArgumentParser.error, but with our
custom exit codes instead of 2.
"""
def print_usage_and_exit_with_driver_input_error(parser, msg):
parser.print_usage()
returncodes.exit_with_driver_input_error("{}: error: {}".format(os.path.basename(sys.argv[0]), msg))
class RawHelpFormatter(argparse.HelpFormatter):
"""Preserve newlines and spacing."""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _format_args(self, action, default_metavar):
"""Show explicit help for remaining args instead of "..."."""
if action.nargs == argparse.REMAINDER:
return "INPUT_FILE1 [INPUT_FILE2] [COMPONENT_OPTION ...]"
else:
return argparse.HelpFormatter._format_args(self, action, default_metavar)
def _rindex(seq, element):
"""Like list.index, but gives the index of the *last* occurrence."""
seq = list(reversed(seq))
reversed_index = seq.index(element)
return len(seq) - 1 - reversed_index
def _split_off_filenames(planner_args):
"""Given the list of arguments to be passed on to the planner
components, split it into a prefix of filenames and a suffix of
options. Returns a pair (filenames, options).
If a "--" separator is present, the last such separator serves as
the border between filenames and options. The separator itself is
not returned. (This implies that "--" can be a filename, but never
an option to a planner component.)
If no such separator is present, the first argument that begins
with "-" and consists of at least two characters starts the list
of options, and all previous arguments are filenames."""
if "--" in planner_args:
separator_pos = _rindex(planner_args, "--")
num_filenames = separator_pos
del planner_args[separator_pos]
else:
num_filenames = 0
for arg in planner_args:
# We treat "-" by itself as a filename because by common
# convention it denotes stdin or stdout, and we might want
# to support this later.
if arg.startswith("-") and arg != "-":
break
num_filenames += 1
return planner_args[:num_filenames], planner_args[num_filenames:]
def _split_planner_args(parser, args):
"""Partition args.planner_args, the list of arguments for the
planner components, into args.filenames, args.translate_options
and args.search_options. Modifies args directly and removes the original
args.planner_args list."""
args.filenames, options = _split_off_filenames(args.planner_args)
args.translate_options = []
args.search_options = []
curr_options = args.search_options
for option in options:
if option == "--translate-options":
curr_options = args.translate_options
elif option == "--search-options":
curr_options = args.search_options
else:
curr_options.append(option)
def _check_mutex_args(parser, args, required=False):
for pos, (name1, is_specified1) in enumerate(args):
for name2, is_specified2 in args[pos + 1:]:
if is_specified1 and is_specified2:
print_usage_and_exit_with_driver_input_error(
parser, "cannot combine %s with %s" % (name1, name2))
if required and not any(is_specified for _, is_specified in args):
print_usage_and_exit_with_driver_input_error(
parser, "exactly one of {%s} has to be specified" %
", ".join(name for name, _ in args))
def _looks_like_search_input(filename):
with open(filename) as input_file:
first_line = next(input_file, "").rstrip()
return first_line == "begin_version"
def _set_components_automatically(parser, args):
"""Guess which planner components to run based on the specified
filenames and set args.components accordingly. Currently
implements some simple heuristics:
1. If there is exactly one input file and it looks like a
Fast-Downward-generated file, run search only.
2. Otherwise, run all components."""
if len(args.filenames) == 1 and _looks_like_search_input(args.filenames[0]):
args.components = ["search"]
else:
args.components = ["translate", "search"]
def _set_components_and_inputs(parser, args):
"""Set args.components to the planner components to be run and set
args.translate_inputs and args.search_input to the correct input
filenames.
Rules:
1. If any --run-xxx option is specified, then the union
of the specified components is run.
2. If nothing is specified, use automatic rules. See
separate function."""
args.components = []
if args.translate or args.run_all:
args.components.append("translate")
if args.search or args.run_all:
args.components.append("search")
if not args.components:
_set_components_automatically(parser, args)
# We implicitly activate validation in debug mode. However, for
# validation we need the PDDL input files and a plan, therefore both
# components must be active.
if args.validate or (args.debug and len(args.components) == 2):
args.components.append("validate")
args.translate_inputs = []
assert args.components
first = args.components[0]
num_files = len(args.filenames)
# When passing --help to any of the components (or -h to the
# translator), we don't require input filenames and silently
# swallow any that are provided. This is undocumented to avoid
# cluttering the driver's --help output.
if first == "translate":
if "--help" in args.translate_options or "-h" in args.translate_options:
args.translate_inputs = []
elif num_files == 1:
task_file, = args.filenames
domain_file = util.find_domain_filename(task_file)
args.translate_inputs = [domain_file, task_file]
elif num_files == 2:
args.translate_inputs = args.filenames
else:
print_usage_and_exit_with_driver_input_error(
parser, "translator needs one or two input files")
elif first == "search":
if "--help" in args.search_options:
args.search_input = None
elif num_files == 1:
args.search_input, = args.filenames
else:
print_usage_and_exit_with_driver_input_error(
parser, "search needs exactly one input file")
else:
assert False, first
def _set_translator_output_options(parser, args):
if any("--sas-file" in opt for opt in args.translate_options):
print_usage_and_exit_with_driver_input_error(
parser, "Cannot pass the \"--sas-file\" option to translate.py from the "
"fast-downward.py script. Pass it directly to fast-downward.py instead.")
args.search_input = args.sas_file
args.translate_options += ["--sas-file", args.search_input]
def _get_time_limit_in_seconds(limit, parser):
match = re.match(r"^(\d+)(s|m|h)?$", limit, flags=re.I)
if not match:
print_usage_and_exit_with_driver_input_error(parser, "malformed time limit parameter: {}".format(limit))
time = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "m":
time *= 60
elif suffix == "h":
time *= 3600
return time
def _get_memory_limit_in_bytes(limit, parser):
match = re.match(r"^(\d+)(k|m|g)?$", limit, flags=re.I)
if not match:
print_usage_and_exit_with_driver_input_error(parser, "malformed memory limit parameter: {}".format(limit))
memory = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "k":
memory *= 1024
elif suffix is None or suffix == "m":
memory *= 1024 * 1024
elif suffix == "g":
memory *= 1024 * 1024 * 1024
return memory
def set_time_limit_in_seconds(parser, args, component):
param = component + "_time_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_time_limit_in_seconds(limit, parser))
def set_memory_limit_in_bytes(parser, args, component):
param = component + "_memory_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_memory_limit_in_bytes(limit, parser))
def _convert_limits_to_ints(parser, args):
for component in COMPONENTS_PLUS_OVERALL:
set_time_limit_in_seconds(parser, args, component)
set_memory_limit_in_bytes(parser, args, component)
def parse_args():
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EPILOG,
formatter_class=RawHelpFormatter,
add_help=False)
help_options = parser.add_argument_group(
title=("driver options that show information and exit "
"(don't run planner)"))
# We manually add the help option because we want to control
# how it is grouped in the output.
help_options.add_argument(
"-h", "--help",
action="help", default=argparse.SUPPRESS,
help="show this help message and exit")
help_options.add_argument(
"-v", "--version", action="store_true",
help="print version number and exit")
help_options.add_argument(
"--show-aliases", action="store_true",
help="show the known aliases (see --alias) and exit")
components = parser.add_argument_group(
title=("driver options selecting the planner components to be run\n"
"(may select several; default: auto-select based on input file(s))"))
components.add_argument(
"--run-all", action="store_true",
help="run all components of the planner")
components.add_argument(
"--translate", action="store_true",
help="run translator component")
components.add_argument(
"--search", action="store_true",
help="run search component")
limits = parser.add_argument_group(
title="time and memory limits", description=LIMITS_HELP)
for component in COMPONENTS_PLUS_OVERALL:
limits.add_argument("--{}-time-limit".format(component))
limits.add_argument("--{}-memory-limit".format(component))
driver_other = parser.add_argument_group(
title="other driver options")
driver_other.add_argument(
"--alias",
help="run a config with an alias (e.g. seq-sat-lama-2011)")
driver_other.add_argument(
"--build",
help="BUILD can be a predefined build name like release "
"(default) and debug, a custom build name, or the path to "
"a directory holding the planner binaries. The driver "
"first looks for the planner binaries under 'BUILD'. If "
"this path does not exist, it tries the directory "
"'<repo>/builds/BUILD/bin', where the build script creates "
"them by default.")
driver_other.add_argument(
"--debug", action="store_true",
help="alias for --build=debug --validate")
driver_other.add_argument(
"--validate", action="store_true",
help='validate plans (implied by --debug); needs "validate" (VAL) on PATH')
driver_other.add_argument(
"--log-level", choices=["debug", "info", "warning"],
default="info",
help="set log level (most verbose: debug; least verbose: warning; default: %(default)s)")
driver_other.add_argument(
"--plan-file", metavar="FILE", default="sas_plan",
help="write plan(s) to FILE (default: %(default)s; anytime configurations append .1, .2, ...)")
driver_other.add_argument(
"--sas-file", metavar="FILE",
help="intermediate file for storing the translator output "
"(implies --keep-sas-file, default: {})".format(DEFAULT_SAS_FILE))
driver_other.add_argument(
"--keep-sas-file", action="store_true",
help="keep translator output file (implied by --sas-file, default: "
"delete file if translator and search component are active)")
driver_other.add_argument(
"--portfolio", metavar="FILE",
help="run a portfolio specified in FILE")
driver_other.add_argument(
"--portfolio-bound", metavar="VALUE", default=None, type=int,
help="exclusive bound on plan costs (only supported for satisficing portfolios)")
driver_other.add_argument(
"--portfolio-single-plan", action="store_true",
help="abort satisficing portfolio after finding the first plan")
driver_other.add_argument(
"--cleanup", action="store_true",
help="clean up temporary files (translator output and plan files) and exit")
parser.add_argument(
"planner_args", nargs=argparse.REMAINDER,
help="file names and options passed on to planner components")
# Using argparse.REMAINDER relies on the fact that the first
# argument that doesn't belong to the driver doesn't look like an
# option, i.e., doesn't start with "-". This is usually satisfied
# because the argument is a filename; in exceptional cases, "--"
# can be used as an explicit separator. For example, "./fast-downward.py --
# --help" passes "--help" to the search code.
args = parser.parse_args()
if args.sas_file:
args.keep_sas_file = True
else:
args.sas_file = DEFAULT_SAS_FILE
if args.build and args.debug:
print_usage_and_exit_with_driver_input_error(
parser, "The option --debug is an alias for --build=debug "
"--validate. Do no specify both --debug and --build.")
if not args.build:
if args.debug:
args.build = "debug"
else:
args.build = "release"
_split_planner_args(parser, args)
_check_mutex_args(parser, [
("--alias", args.alias is not None),
("--portfolio", args.portfolio is not None),
("options for search component", bool(args.search_options))])
_set_translator_output_options(parser, args)
_convert_limits_to_ints(parser, args)
if args.alias:
try:
aliases.set_options_for_alias(args.alias, args)
except KeyError:
print_usage_and_exit_with_driver_input_error(
parser, "unknown alias: %r" % args.alias)
if args.portfolio_bound is not None and not args.portfolio:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-bound may only be used for portfolios.")
if args.portfolio_bound is not None and args.portfolio_bound < 0:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-bound must not be negative.")
if args.portfolio_single_plan and not args.portfolio:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-single_plan may only be used for portfolios.")
if not args.version and not args.show_aliases and not args.cleanup:
_set_components_and_inputs(parser, args)
if "translate" not in args.components or "search" not in args.components:
args.keep_sas_file = True
return args
| 19,119 | 38.66805 | 114 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/limits.py
|
from . import returncodes
from . import util
try:
import resource
except ImportError:
resource = None
import sys
"""
Notes on limits: On Windows, the resource module does not exist and hence we
cannot enforce any limits there. Furthermore, while the module exists on macOS,
memory limits are not enforced by that OS and hence we do not support imposing
memory limits there.
"""
CANNOT_LIMIT_MEMORY_MSG = "Setting memory limits is not supported on your platform."
CANNOT_LIMIT_TIME_MSG = "Setting time limits is not supported on your platform."
def can_set_time_limit():
return resource is not None
def can_set_memory_limit():
return resource is not None and sys.platform != "darwin"
def set_time_limit(time_limit):
if time_limit is None:
return
if not can_set_time_limit():
raise NotImplementedError(CANNOT_LIMIT_TIME_MSG)
# Reaching the soft time limit leads to a (catchable) SIGXCPU signal,
# which we catch to gracefully exit. Reaching the hard limit leads to
# a SIGKILL, which is unpreventable. We set a hard limit one second
# higher than the soft limit to make sure we abort also in cases where
# the graceful shutdown doesn't work, or doesn't work reasonably
# quickly.
try:
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit + 1))
except ValueError:
# If the previous call failed, we try again without the extra second.
# In particular, this is necessary if there already exists an external
# hard limit equal to time_limit.
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit))
def set_memory_limit(memory):
"""*memory* must be given in bytes or None."""
if memory is None:
return
if not can_set_memory_limit():
raise NotImplementedError(CANNOT_LIMIT_MEMORY_MSG)
resource.setrlimit(resource.RLIMIT_AS, (memory, memory))
def convert_to_mb(num_bytes):
return num_bytes / (1024 * 1024)
def get_memory_limit(component_limit, overall_limit):
"""
Return the minimum of the component and overall limits or None if neither is set.
"""
limits = [limit for limit in [component_limit, overall_limit] if limit is not None]
return min(limits) if limits else None
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by the component and overall limits.
"""
limit = component_limit
if overall_limit is not None:
try:
elapsed_time = util.get_elapsed_time()
except NotImplementedError:
returncodes.exit_with_driver_unsupported_error(CANNOT_LIMIT_TIME_MSG)
else:
remaining_time = max(0, overall_limit - elapsed_time)
if limit is None or remaining_time < limit:
limit = remaining_time
return limit
| 2,854 | 32.197674 | 87 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/cleanup.py
|
from itertools import count
import os
def _try_remove(f):
try:
os.remove(f)
except OSError:
return False
return True
def cleanup_temporary_files(args):
_try_remove(args.sas_file)
_try_remove(args.plan_file)
for i in count(1):
if not _try_remove("%s.%s" % (args.plan_file, i)):
break
| 346 | 18.277778 | 58 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/returncodes.py
|
import sys
"""
We document Fast Downward exit codes at
http://www.fast-downward.org/ExitCodes. Please update this documentation when
making changes below.
"""
SUCCESS = 0
SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY = 1
SEARCH_PLAN_FOUND_AND_OUT_OF_TIME = 2
SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY_AND_TIME = 3
TRANSLATE_UNSOLVABLE = 10
SEARCH_UNSOLVABLE = 11
SEARCH_UNSOLVED_INCOMPLETE = 12
TRANSLATE_OUT_OF_MEMORY = 20
TRANSLATE_OUT_OF_TIME = 21
SEARCH_OUT_OF_MEMORY = 22
SEARCH_OUT_OF_TIME = 23
SEARCH_OUT_OF_MEMORY_AND_TIME = 24
TRANSLATE_CRITICAL_ERROR = 30
TRANSLATE_INPUT_ERROR = 31
SEARCH_CRITICAL_ERROR = 32
SEARCH_INPUT_ERROR = 33
SEARCH_UNSUPPORTED = 34
DRIVER_CRITICAL_ERROR = 35
DRIVER_INPUT_ERROR = 36
DRIVER_UNSUPPORTED = 37
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def is_unrecoverable(exitcode):
# Exit codes in the range from 30 to 39 represent unrecoverable failures.
return 30 <= exitcode < 40
def exit_with_driver_critical_error(msg):
print_stderr(msg)
sys.exit(DRIVER_CRITICAL_ERROR)
def exit_with_driver_input_error(msg):
print_stderr(msg)
sys.exit(DRIVER_INPUT_ERROR)
def exit_with_driver_unsupported_error(msg):
print_stderr(msg)
sys.exit(DRIVER_UNSUPPORTED)
def generate_portfolio_exitcode(exitcodes):
"""A portfolio's exitcode is determined as follows:
There is exactly one type of unexpected exit code -> use it.
There are multiple types of unexpected exit codes -> SEARCH_CRITICAL_ERROR.
[..., SUCCESS, ...] -> SUCCESS
[..., SEARCH_UNSOLVABLE, ...] -> SEARCH_UNSOLVABLE
[..., SEARCH_UNSOLVED_INCOMPLETE, ...] -> SEARCH_UNSOLVED_INCOMPLETE
[..., SEARCH_OUT_OF_MEMORY, ..., SEARCH_OUT_OF_TIME, ...] -> SEARCH_OUT_OF_MEMORY_AND_TIME
[..., SEARCH_OUT_OF_TIME, ...] -> SEARCH_OUT_OF_TIME
[..., SEARCH_OUT_OF_MEMORY, ...] -> SEARCH_OUT_OF_MEMORY
"""
print("Exit codes: {}".format(exitcodes))
exitcodes = set(exitcodes)
unrecoverable_codes = [code for code in exitcodes if is_unrecoverable(code)]
# There are unrecoverable exit codes.
if unrecoverable_codes:
print("Error: Unexpected exit codes: {}".format(unrecoverable_codes))
if len(unrecoverable_codes) == 1:
return (unrecoverable_codes[0], False)
else:
return (SEARCH_CRITICAL_ERROR, False)
# At least one plan was found.
if SUCCESS in exitcodes:
if SEARCH_OUT_OF_MEMORY in exitcodes and SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY_AND_TIME, True)
elif SEARCH_OUT_OF_MEMORY in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY, True)
elif SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_TIME, True)
else:
return (SUCCESS, True)
# A config proved unsolvability or did not find a plan.
for code in [SEARCH_UNSOLVABLE, SEARCH_UNSOLVED_INCOMPLETE]:
if code in exitcodes:
return (code, False)
# No plan was found due to hitting resource limits.
if SEARCH_OUT_OF_MEMORY in exitcodes and SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_OUT_OF_MEMORY_AND_TIME, False)
elif SEARCH_OUT_OF_MEMORY in exitcodes:
return (SEARCH_OUT_OF_MEMORY, False)
elif SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_OUT_OF_TIME, False)
assert False, "Error: Unhandled exit codes: {}".format(exitcodes)
| 3,461 | 31.35514 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/run_components.py
|
import errno
import logging
import os.path
import subprocess
import sys
from . import call
from . import limits
from . import portfolio_runner
from . import returncodes
from . import util
from .plan_manager import PlanManager
# TODO: We might want to turn translate into a module and call it with "python3 -m translate".
REL_TRANSLATE_PATH = os.path.join("translate", "translate.py")
if os.name == "posix":
REL_SEARCH_PATH = "downward"
VALIDATE = "validate"
elif os.name == "nt":
REL_SEARCH_PATH = "downward.exe"
VALIDATE = "validate.exe"
else:
returncodes.exit_with_driver_unsupported_error("Unsupported OS: " + os.name)
def get_executable(build, rel_path):
# First, consider 'build' to be a path directly to the binaries.
# The path can be absolute or relative to the current working
# directory.
build_dir = build
if not os.path.exists(build_dir):
# If build is not a full path to the binaries, it might be the
# name of a build in our standard directory structure.
# in this case, the binaries are in
# '<repo-root>/builds/<buildname>/bin'.
build_dir = os.path.join(util.BUILDS_DIR, build, "bin")
if not os.path.exists(build_dir):
returncodes.exit_with_driver_input_error(
"Could not find build '{build}' at {build_dir}. "
"Please run './build.py {build}'.".format(**locals()))
abs_path = os.path.join(build_dir, rel_path)
if not os.path.exists(abs_path):
returncodes.exit_with_driver_input_error(
"Could not find '{rel_path}' in build '{build}'. "
"Please run './build.py {build}'.".format(**locals()))
return abs_path
def run_translate(args):
logging.info("Running translator.")
time_limit = limits.get_time_limit(
args.translate_time_limit, args.overall_time_limit)
memory_limit = limits.get_memory_limit(
args.translate_memory_limit, args.overall_memory_limit)
translate = get_executable(args.build, REL_TRANSLATE_PATH)
assert sys.executable, "Path to interpreter could not be found"
cmd = [sys.executable] + [translate] + args.translate_inputs + args.translate_options
stderr, returncode = call.get_error_output_and_returncode(
"translator",
cmd,
time_limit=time_limit,
memory_limit=memory_limit)
# We collect stderr of the translator and print it here, unless
# the translator ran out of memory and all output in stderr is
# related to MemoryError.
do_print_on_stderr = True
if returncode == returncodes.TRANSLATE_OUT_OF_MEMORY:
output_related_to_memory_error = True
if not stderr:
output_related_to_memory_error = False
for line in stderr.splitlines():
if "MemoryError" not in line:
output_related_to_memory_error = False
break
if output_related_to_memory_error:
do_print_on_stderr = False
if do_print_on_stderr and stderr:
returncodes.print_stderr(stderr)
if returncode == 0:
return (0, True)
elif returncode == 1:
# Unlikely case that the translator crashed without raising an
# exception.
return (returncodes.TRANSLATE_CRITICAL_ERROR, False)
else:
# Pass on any other exit code, including in particular signals or
# exit codes such as running out of memory or time.
return (returncode, False)
def run_search(args):
logging.info("Running search (%s)." % args.build)
time_limit = limits.get_time_limit(
args.search_time_limit, args.overall_time_limit)
memory_limit = limits.get_memory_limit(
args.search_memory_limit, args.overall_memory_limit)
executable = get_executable(args.build, REL_SEARCH_PATH)
plan_manager = PlanManager(
args.plan_file,
portfolio_bound=args.portfolio_bound,
single_plan=args.portfolio_single_plan)
plan_manager.delete_existing_plans()
if args.portfolio:
assert not args.search_options
logging.info("search portfolio: %s" % args.portfolio)
return portfolio_runner.run(
args.portfolio, executable, args.search_input, plan_manager,
time_limit, memory_limit)
else:
if not args.search_options:
returncodes.exit_with_driver_input_error(
"search needs --alias, --portfolio, or search options")
if "--help" not in args.search_options:
args.search_options.extend(["--internal-plan-file", args.plan_file])
try:
call.check_call(
"search",
[executable] + args.search_options,
stdin=args.search_input,
time_limit=time_limit,
memory_limit=memory_limit)
except subprocess.CalledProcessError as err:
# TODO: if we ever add support for SEARCH_PLAN_FOUND_AND_* directly
# in the planner, this assertion no longer holds. Furthermore, we
# would need to return (err.returncode, True) if the returncode is
# in [0..10].
# Negative exit codes are allowed for passing out signals.
assert err.returncode >= 10 or err.returncode < 0, "got returncode < 10: {}".format(err.returncode)
return (err.returncode, False)
else:
return (0, True)
def run_validate(args):
logging.info("Running validate.")
num_files = len(args.filenames)
if num_files == 1:
task, = args.filenames
domain = util.find_domain_filename(task)
elif num_files == 2:
domain, task = args.filenames
else:
returncodes.exit_with_driver_input_error("validate needs one or two PDDL input files.")
plan_files = list(PlanManager(args.plan_file).get_existing_plans())
if not plan_files:
print("Not running validate since no plans found.")
return (0, True)
validate_inputs = [domain, task] + plan_files
try:
call.check_call(
"validate",
[VALIDATE] + validate_inputs,
time_limit=args.validate_time_limit,
memory_limit=args.validate_memory_limit)
except OSError as err:
if err.errno == errno.ENOENT:
returncodes.exit_with_driver_input_error("Error: {} not found. Is it on the PATH?".format(VALIDATE))
else:
returncodes.exit_with_driver_critical_error(err)
else:
return (0, True)
| 6,502 | 36.80814 | 112 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/call.py
|
"""Make subprocess calls with time and memory limits."""
from . import limits
from . import returncodes
import logging
import os
import shlex
import subprocess
import sys
def print_call_settings(nick, cmd, stdin, time_limit, memory_limit):
if stdin is not None:
stdin = shlex.quote(stdin)
logging.info("{} stdin: {}".format(nick, stdin))
if time_limit is not None:
time_limit = str(time_limit) + "s"
logging.info("{} time limit: {}".format(nick, time_limit))
if memory_limit is not None:
memory_limit = int(limits.convert_to_mb(memory_limit))
memory_limit = str(memory_limit) + " MB"
logging.info("{} memory limit: {}".format(nick, memory_limit))
escaped_cmd = [shlex.quote(x) for x in cmd]
if stdin is not None:
escaped_cmd.extend(["<", shlex.quote(stdin)])
logging.info("{} command line string: {}".format(nick, " ".join(escaped_cmd)))
def _get_preexec_function(time_limit, memory_limit):
def set_limits():
def _try_or_exit(function, description):
def fail(exception, exitcode):
returncodes.print_stderr("{} failed: {}".format(description, exception))
os._exit(exitcode)
try:
function()
except NotImplementedError as err:
fail(err, returncodes.DRIVER_UNSUPPORTED)
except OSError as err:
fail(err, returncodes.DRIVER_CRITICAL_ERROR)
except ValueError as err:
fail(err, returncodes.DRIVER_INPUT_ERROR)
_try_or_exit(lambda: limits.set_time_limit(time_limit), "Setting time limit")
_try_or_exit(lambda: limits.set_memory_limit(memory_limit), "Setting memory limit")
if time_limit is None and memory_limit is None:
return None
else:
return set_limits
def check_call(nick, cmd, stdin=None, time_limit=None, memory_limit=None):
print_call_settings(nick, cmd, stdin, time_limit, memory_limit)
kwargs = {"preexec_fn": _get_preexec_function(time_limit, memory_limit)}
sys.stdout.flush()
if stdin:
with open(stdin) as stdin_file:
return subprocess.check_call(cmd, stdin=stdin_file, **kwargs)
else:
return subprocess.check_call(cmd, **kwargs)
def get_error_output_and_returncode(nick, cmd, time_limit=None, memory_limit=None):
print_call_settings(nick, cmd, None, time_limit, memory_limit)
preexec_fn = _get_preexec_function(time_limit, memory_limit)
sys.stdout.flush()
p = subprocess.Popen(cmd, preexec_fn=preexec_fn, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr, p.returncode
| 2,669 | 33.675325 | 91 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/aliases.py
|
import os
from .util import DRIVER_DIR
PORTFOLIO_DIR = os.path.join(DRIVER_DIR, "portfolios")
ALIASES = {}
ALIASES["seq-sat-fd-autotune-1"] = [
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--evaluator", "hcea=cea()",
"--evaluator", "hcg=cg(transform=adapt_costs(plusone))",
"--evaluator", "hgc=goalcount()",
"--evaluator", "hAdd=add()",
"--search", """iterated([
lazy(alt([single(sum([g(),weight(hff,10)])),
single(sum([g(),weight(hff,10)]),pref_only=true)],
boost=2000),
preferred=[hff],reopen_closed=false,cost_type=one),
lazy(alt([single(sum([g(),weight(hAdd,7)])),
single(sum([g(),weight(hAdd,7)]),pref_only=true),
single(sum([g(),weight(hcg,7)])),
single(sum([g(),weight(hcg,7)]),pref_only=true),
single(sum([g(),weight(hcea,7)])),
single(sum([g(),weight(hcea,7)]),pref_only=true),
single(sum([g(),weight(hgc,7)])),
single(sum([g(),weight(hgc,7)]),pref_only=true)],
boost=1000),
preferred=[hcea,hgc],reopen_closed=false,cost_type=one),
lazy(alt([tiebreaking([sum([g(),weight(hAdd,3)]),hAdd]),
tiebreaking([sum([g(),weight(hAdd,3)]),hAdd],pref_only=true),
tiebreaking([sum([g(),weight(hcg,3)]),hcg]),
tiebreaking([sum([g(),weight(hcg,3)]),hcg],pref_only=true),
tiebreaking([sum([g(),weight(hcea,3)]),hcea]),
tiebreaking([sum([g(),weight(hcea,3)]),hcea],pref_only=true),
tiebreaking([sum([g(),weight(hgc,3)]),hgc]),
tiebreaking([sum([g(),weight(hgc,3)]),hgc],pref_only=true)],
boost=5000),
preferred=[hcea,hgc],reopen_closed=false,cost_type=normal),
eager(alt([tiebreaking([sum([g(),weight(hAdd,10)]),hAdd]),
tiebreaking([sum([g(),weight(hAdd,10)]),hAdd],pref_only=true),
tiebreaking([sum([g(),weight(hcg,10)]),hcg]),
tiebreaking([sum([g(),weight(hcg,10)]),hcg],pref_only=true),
tiebreaking([sum([g(),weight(hcea,10)]),hcea]),
tiebreaking([sum([g(),weight(hcea,10)]),hcea],pref_only=true),
tiebreaking([sum([g(),weight(hgc,10)]),hgc]),
tiebreaking([sum([g(),weight(hgc,10)]),hgc],pref_only=true)],
boost=500),
preferred=[hcea,hgc],reopen_closed=true,cost_type=normal)
],repeat_last=true,continue_on_fail=true)"""]
ALIASES["seq-sat-fd-autotune-2"] = [
"--evaluator", "hcea=cea(transform=adapt_costs(plusone))",
"--evaluator", "hcg=cg(transform=adapt_costs(one))",
"--evaluator", "hgc=goalcount(transform=adapt_costs(plusone))",
"--evaluator", "hff=ff()",
"--search", """iterated([
ehc(hcea,preferred=[hcea],preferred_usage=0,cost_type=normal),
lazy(alt([single(sum([weight(g(),2),weight(hff,3)])),
single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hcg,3)])),
single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hcea,3)])),
single(sum([weight(g(),2),weight(hcea,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hgc,3)])),
single(sum([weight(g(),2),weight(hgc,3)]),pref_only=true)],
boost=200),
preferred=[hcea,hgc],reopen_closed=false,cost_type=one),
lazy(alt([single(sum([g(),weight(hff,5)])),
single(sum([g(),weight(hff,5)]),pref_only=true),
single(sum([g(),weight(hcg,5)])),
single(sum([g(),weight(hcg,5)]),pref_only=true),
single(sum([g(),weight(hcea,5)])),
single(sum([g(),weight(hcea,5)]),pref_only=true),
single(sum([g(),weight(hgc,5)])),
single(sum([g(),weight(hgc,5)]),pref_only=true)],
boost=5000),
preferred=[hcea,hgc],reopen_closed=true,cost_type=normal),
lazy(alt([single(sum([g(),weight(hff,2)])),
single(sum([g(),weight(hff,2)]),pref_only=true),
single(sum([g(),weight(hcg,2)])),
single(sum([g(),weight(hcg,2)]),pref_only=true),
single(sum([g(),weight(hcea,2)])),
single(sum([g(),weight(hcea,2)]),pref_only=true),
single(sum([g(),weight(hgc,2)])),
single(sum([g(),weight(hgc,2)]),pref_only=true)],
boost=1000),
preferred=[hcea,hgc],reopen_closed=true,cost_type=one)
],repeat_last=true,continue_on_fail=true)"""]
def _get_lama(**kwargs):
return [
"--if-unit-cost",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),pref={pref})".format(**kwargs),
"--evaluator", "hff=ff()",
"--search", """iterated([
lazy_greedy([hff,hlm],preferred=[hff,hlm]),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--if-non-unit-cost",
"--evaluator",
"hlm1=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref={pref})".format(**kwargs),
"--evaluator", "hff1=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm2=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone),pref={pref})".format(**kwargs),
"--evaluator", "hff2=ff(transform=adapt_costs(plusone))",
"--search", """iterated([
lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],
cost_type=one,reopen_closed=false),
lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],
reopen_closed=false),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)
],repeat_last=true,continue_on_fail=true)""",
# Append --always to be on the safe side if we want to append
# additional options later.
"--always"]
ALIASES["seq-sat-lama-2011"] = _get_lama(pref="true")
ALIASES["lama"] = _get_lama(pref="false")
ALIASES["lama-first"] = [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]
ALIASES["seq-opt-bjolp"] = [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]
ALIASES["seq-opt-lmcut"] = [
"--search", "astar(lmcut())"]
PORTFOLIOS = {}
for portfolio in os.listdir(PORTFOLIO_DIR):
name, ext = os.path.splitext(portfolio)
assert ext == ".py", portfolio
PORTFOLIOS[name.replace("_", "-")] = os.path.join(PORTFOLIO_DIR, portfolio)
def show_aliases():
for alias in sorted(list(ALIASES) + list(PORTFOLIOS)):
print(alias)
def set_options_for_alias(alias_name, args):
"""
If alias_name is an alias for a configuration, set args.search_options
to the corresponding command-line arguments. If it is an alias for a
portfolio, set args.portfolio to the path to the portfolio file.
Otherwise raise KeyError.
"""
assert not args.search_options
assert not args.portfolio
if alias_name in ALIASES:
args.search_options = [x.replace(" ", "").replace("\n", "")
for x in ALIASES[alias_name]]
elif alias_name in PORTFOLIOS:
args.portfolio = PORTFOLIOS[alias_name]
else:
raise KeyError(alias_name)
| 7,884 | 43.801136 | 115 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolio_runner.py
|
""" Module for running planner portfolios.
Memory limits: We apply the same memory limit that is given to the
plan script to each planner call. Note that this setup does not work if
the sum of the memory usage of the Python process and the planner calls
is limited. In this case the Python process might get killed although
we would like to kill only the single planner call and continue with
the remaining configurations. If we ever want to support this scenario
we will have to reduce the memory limit of the planner calls by the
amount of memory that the Python process needs. On maia for example
this amounts to 128MB of reserved virtual memory. We can make Python
reserve less space by lowering the soft limit for virtual memory before
the process is started.
"""
__all__ = ["run"]
import os
import subprocess
from . import call
from . import limits
from . import returncodes
from . import util
DEFAULT_TIMEOUT = 1800
def adapt_heuristic_cost_type(arg, cost_type):
if cost_type == "normal":
transform = "no_transform()"
else:
transform = "adapt_costs({})".format(cost_type)
return arg.replace("H_COST_TRANSFORM", transform)
def adapt_args(args, search_cost_type, heuristic_cost_type, plan_manager):
g_bound = plan_manager.get_next_portfolio_cost_bound()
plan_counter = plan_manager.get_plan_counter()
print("g bound: %s" % g_bound)
print("next plan number: %d" % (plan_counter + 1))
for index, arg in enumerate(args):
if arg == "--evaluator" or arg == "--heuristic":
heuristic = args[index + 1]
heuristic = adapt_heuristic_cost_type(heuristic, heuristic_cost_type)
args[index + 1] = heuristic
elif arg == "--search":
search = args[index + 1]
if "bound=BOUND" not in search:
returncodes.exit_with_driver_critical_error(
"Satisficing portfolios need the string "
"\"bound=BOUND\" in each search configuration. "
"See the FDSS portfolios for examples.")
for name, value in [
("BOUND", g_bound),
("S_COST_TYPE", search_cost_type)]:
search = search.replace(name, str(value))
search = adapt_heuristic_cost_type(search, heuristic_cost_type)
args[index + 1] = search
break
def run_search(executable, args, sas_file, plan_manager, time, memory):
complete_args = [executable] + args + [
"--internal-plan-file", plan_manager.get_plan_prefix()]
print("args: %s" % complete_args)
try:
exitcode = call.check_call(
"search", complete_args, stdin=sas_file,
time_limit=time, memory_limit=memory)
except subprocess.CalledProcessError as err:
exitcode = err.returncode
print("exitcode: %d" % exitcode)
print()
return exitcode
def compute_run_time(timeout, configs, pos):
remaining_time = timeout - util.get_elapsed_time()
print("remaining time: {}".format(remaining_time))
relative_time = configs[pos][0]
remaining_relative_time = sum(config[0] for config in configs[pos:])
print("config {}: relative time {}, remaining {}".format(
pos, relative_time, remaining_relative_time))
# For the last config we have relative_time == remaining_relative_time, so
# we use all of the remaining time at the end.
return remaining_time * relative_time / remaining_relative_time
def run_sat_config(configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory):
run_time = compute_run_time(timeout, configs, pos)
if run_time <= 0:
return None
_, args_template = configs[pos]
args = list(args_template)
adapt_args(args, search_cost_type, heuristic_cost_type, plan_manager)
if not plan_manager.abort_portfolio_after_first_plan():
args.extend([
"--internal-previous-portfolio-plans",
str(plan_manager.get_plan_counter())])
result = run_search(executable, args, sas_file, plan_manager, run_time, memory)
plan_manager.process_new_plans()
return result
def run_sat(configs, executable, sas_file, plan_manager, final_config,
final_config_builder, timeout, memory):
# If the configuration contains S_COST_TYPE or H_COST_TRANSFORM and the task
# has non-unit costs, we start by treating all costs as one. When we find
# a solution, we rerun the successful config with real costs.
heuristic_cost_type = "one"
search_cost_type = "one"
changed_cost_types = False
while configs:
configs_next_round = []
for pos, (relative_time, args) in enumerate(configs):
exitcode = run_sat_config(
configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory)
if exitcode is None:
return
yield exitcode
if exitcode == returncodes.SEARCH_UNSOLVABLE:
return
if exitcode == returncodes.SUCCESS:
if plan_manager.abort_portfolio_after_first_plan():
return
configs_next_round.append((relative_time, args))
if (not changed_cost_types and can_change_cost_type(args) and
plan_manager.get_problem_type() == "general cost"):
print("Switch to real costs and repeat last run.")
changed_cost_types = True
search_cost_type = "normal"
heuristic_cost_type = "plusone"
exitcode = run_sat_config(
configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory)
if exitcode is None:
return
yield exitcode
if exitcode == returncodes.SEARCH_UNSOLVABLE:
return
if final_config_builder:
print("Build final config.")
final_config = final_config_builder(args)
break
if final_config:
break
# Only run the successful configs in the next round.
configs = configs_next_round
if final_config:
print("Abort portfolio and run final config.")
exitcode = run_sat_config(
[(1, final_config)], 0, search_cost_type,
heuristic_cost_type, executable, sas_file, plan_manager,
timeout, memory)
if exitcode is not None:
yield exitcode
def run_opt(configs, executable, sas_file, plan_manager, timeout, memory):
for pos, (relative_time, args) in enumerate(configs):
run_time = compute_run_time(timeout, configs, pos)
exitcode = run_search(executable, args, sas_file, plan_manager,
run_time, memory)
yield exitcode
if exitcode in [returncodes.SUCCESS, returncodes.SEARCH_UNSOLVABLE]:
break
def can_change_cost_type(args):
return any("S_COST_TYPE" in part or "H_COST_TRANSFORM" in part for part in args)
def get_portfolio_attributes(portfolio):
attributes = {}
with open(portfolio, "rb") as portfolio_file:
content = portfolio_file.read()
try:
exec(content, attributes)
except Exception:
returncodes.exit_with_driver_critical_error(
"The portfolio %s could not be loaded. Maybe it still "
"uses the old portfolio syntax? See the FDSS portfolios "
"for examples using the new syntax." % portfolio)
if "CONFIGS" not in attributes:
returncodes.exit_with_driver_critical_error("portfolios must define CONFIGS")
if "OPTIMAL" not in attributes:
returncodes.exit_with_driver_critical_error("portfolios must define OPTIMAL")
return attributes
def run(portfolio, executable, sas_file, plan_manager, time, memory):
"""
Run the configs in the given portfolio file.
The portfolio is allowed to run for at most *time* seconds and may
use a maximum of *memory* bytes.
"""
attributes = get_portfolio_attributes(portfolio)
configs = attributes["CONFIGS"]
optimal = attributes["OPTIMAL"]
final_config = attributes.get("FINAL_CONFIG")
final_config_builder = attributes.get("FINAL_CONFIG_BUILDER")
if "TIMEOUT" in attributes:
returncodes.exit_with_driver_input_error(
"The TIMEOUT attribute in portfolios has been removed. "
"Please pass a time limit to fast-downward.py.")
if time is None:
if os.name == "nt":
returncodes.exit_with_driver_unsupported_error(limits.RESOURCE_MODULE_MISSING_MSG)
else:
returncodes.exit_with_driver_input_error(
"Portfolios need a time limit. Please pass --search-time-limit "
"or --overall-time-limit to fast-downward.py.")
timeout = util.get_elapsed_time() + time
if optimal:
exitcodes = run_opt(
configs, executable, sas_file, plan_manager, timeout, memory)
else:
exitcodes = run_sat(
configs, executable, sas_file, plan_manager, final_config,
final_config_builder, timeout, memory)
return returncodes.generate_portfolio_exitcode(exitcodes)
| 9,483 | 38.682008 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/plan_manager.py
|
import itertools
import os
import os.path
import re
from . import returncodes
_PLAN_INFO_REGEX = re.compile(r"; cost = (\d+) \((unit cost|general cost)\)\n")
def _read_last_line(filename):
line = None
with open(filename) as input_file:
for line in input_file:
pass
return line
def _parse_plan(plan_filename):
"""Parse a plan file and return a pair (cost, problem_type)
summarizing the salient information. Return (None, None) for
incomplete plans."""
last_line = _read_last_line(plan_filename) or ""
match = _PLAN_INFO_REGEX.match(last_line)
if match:
return int(match.group(1)), match.group(2)
else:
return None, None
class PlanManager:
def __init__(self, plan_prefix, portfolio_bound=None, single_plan=False):
self._plan_prefix = plan_prefix
self._plan_costs = []
self._problem_type = None
if portfolio_bound is None:
portfolio_bound = "infinity"
self._portfolio_bound = portfolio_bound
self._single_plan = single_plan
def get_plan_prefix(self):
return self._plan_prefix
def get_plan_counter(self):
return len(self._plan_costs)
def get_next_portfolio_cost_bound(self):
"""Return the next plan cost bound to be used in a portfolio planner.
Initially, this is the user-specified cost bound, or "infinity"
if the user specified no bound. Once a plan has been found, it
is the cost of the best plan found so far. (This is always the
last plan found because plans must decrease in cost.)
"""
if self._plan_costs:
return self._plan_costs[-1]
else:
return self._portfolio_bound
def abort_portfolio_after_first_plan(self):
return self._single_plan
def get_problem_type(self):
if self._problem_type is None:
returncodes.exit_with_driver_critical_error("no plans found yet: cost type not set")
return self._problem_type
def process_new_plans(self):
"""Update information about plans after a planner run.
Read newly generated plans and store the relevant information.
If the last plan file is incomplete, delete it.
"""
had_incomplete_plan = False
for counter in itertools.count(self.get_plan_counter() + 1):
plan_filename = self._get_plan_file(counter)
def bogus_plan(msg):
returncodes.exit_with_driver_critical_error("%s: %s" % (plan_filename, msg))
if not os.path.exists(plan_filename):
break
if had_incomplete_plan:
bogus_plan("plan found after incomplete plan")
cost, problem_type = _parse_plan(plan_filename)
if cost is None:
had_incomplete_plan = True
print("%s is incomplete. Deleted the file." % plan_filename)
os.remove(plan_filename)
else:
print("plan manager: found new plan with cost %d" % cost)
if self._problem_type is None:
# This is the first plan we found.
self._problem_type = problem_type
else:
# Check if info from this plan matches previous info.
if self._problem_type != problem_type:
bogus_plan("problem type has changed")
if cost >= self._plan_costs[-1]:
bogus_plan("plan quality has not improved")
self._plan_costs.append(cost)
def get_existing_plans(self):
"""Yield all plans that match the given plan prefix."""
if os.path.exists(self._plan_prefix):
yield self._plan_prefix
for counter in itertools.count(start=1):
plan_filename = self._get_plan_file(counter)
if os.path.exists(plan_filename):
yield plan_filename
else:
break
def delete_existing_plans(self):
"""Delete all plans that match the given plan prefix."""
for plan in self.get_existing_plans():
os.remove(plan)
def _get_plan_file(self, number):
return "%s.%d" % (self._plan_prefix, number)
| 4,288 | 33.869919 | 96 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/version.py
|
# This file is auto-generated by the scripts in misc/release.
# Do not modify it.
__version__ = "20.06+"
| 106 | 20.4 | 61 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/util.py
|
import os
from . import returncodes
DRIVER_DIR = os.path.abspath(os.path.dirname(__file__))
REPO_ROOT_DIR = os.path.dirname(DRIVER_DIR)
BUILDS_DIR = os.path.join(REPO_ROOT_DIR, "builds")
def get_elapsed_time():
"""
Return the CPU time taken by the python process and its child
processes.
"""
if os.name == "nt":
# The child time components of os.times() are 0 on Windows.
raise NotImplementedError("cannot use get_elapsed_time() on Windows")
return sum(os.times()[:4])
def find_domain_filename(task_filename):
"""
Find domain filename for the given task using automatic naming rules.
"""
dirname, basename = os.path.split(task_filename)
domain_basenames = [
"domain.pddl",
basename[:3] + "-domain.pddl",
"domain_" + basename,
"domain-" + basename,
]
for domain_basename in domain_basenames:
domain_filename = os.path.join(dirname, domain_basename)
if os.path.exists(domain_filename):
return domain_filename
returncodes.exit_with_driver_input_error(
"Error: Could not find domain file using automatic naming rules.")
| 1,166 | 26.785714 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/__init__.py
|
from .version import __version__
| 33 | 16 | 32 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolios/seq_sat_fdss_1.py
|
OPTIMAL = False
CONFIGS = [
# alt_lazy_ff_cg
(49, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"lazy_greedy([hff,hcg],preferred=[hff,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_ff_1
(171, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_lazy_cea_cg
(27, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"lazy_greedy([hcea,hcg],preferred=[hcea,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_ff_1
(340, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_cg
(76, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hff,hcg],preferred=[hff,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_ff_1
(88, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_add
(90, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hff,hadd],preferred=[hff,hadd],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cea_1
(56, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_cea_cg
(73, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hff,hcea,hcg],preferred=[hff,hcea,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_add_1
(50, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cea_1
(84, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_add_1
(166, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_ff_1
(87, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_cg_1
(73, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_cg_1
(89, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
FINAL_CONFIG = [
"--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"iterated([eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)],bound=BOUND,repeat_last=true)"]
| 3,544 | 48.236111 | 133 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolios/seq_sat_fdss_2014.py
|
OPTIMAL = False
CONFIGS = [
# add_lm_lazy_greedy
(114, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hadd,hlm],preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_lazy_greedy
(187, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_lm_eager_greedy
(33, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hadd,hlm],preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_eager_greedy
(35, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hff,hlm],preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_lazy_greedy
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hcea,hlm],preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_ff_eager_greedy
(120, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hadd,hff],preferred=[hadd,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_ff_eager_greedy
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hcg,hff],preferred=[hcg,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_ff_lazy_greedy
(17, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hadd,hff],preferred=[hadd,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_cg_lazy_greedy
(40, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hadd,hcg],preferred=[hadd,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_lm_lazy_wastar
(79, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hadd,hlm],w=3,preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_lazy_wastar
(159, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hff,hlm],w=3,preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_lazy_wastar
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hcea,hlm],w=3,preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_eager_greedy
(78, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hcg,hlm],preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_ff_lazy_wastar
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--search",
"lazy_wastar([hcea,hff],w=3,preferred=[hcea,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_eager_wastar
(37, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(), weight(hcea, 3)])),single(sum([g(),weight(hcea,3)]),pref_only=true),single(sum([g(), weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_ff_lazy_wastar
(40, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--search",
"lazy_wastar([hcg,hff],w=3,preferred=[hcg,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_eager_wastar
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_eager_wastar
(77, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hadd, 3)])),single(sum([g(), weight(hadd,3)]),pref_only=true)]),preferred=[hadd],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_ff_eager_wastar
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hcea=cea(transform=H_COST_TRANSFORM)", "--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hcea,3)])),single(sum([g(),weight(hcea,3)]),pref_only=true)]),preferred=[hff,hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_eager_wastar
(78, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_eager_greedy
(40, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hcea],preferred=[hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_lazy_wastar
(39, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hcg,hlm],w=3,preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lazy_wastar
(40, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([hcea], w=3, preferred=[hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_eager_wastar
(72, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hff, 3)])),single(sum([g(),weight(hff,3)]),pref_only=true)]),preferred=[hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_eager_wastar
(38, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hcg, 3)])),single(sum([g(),weight(hcg,3)]),pref_only=true)]),preferred=[hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lazy_wastar
(38, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([hff], w=3, preferred=[hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lazy_greedy
(116, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hcg],preferred=[hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
# ff_lm_eager_wastar
FINAL_CONFIG = [
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))",
"--search",
"iterated([eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)],bound=BOUND,repeat_last=true)"]
| 8,496 | 64.361538 | 277 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolios/seq_sat_fdss_2.py
|
OPTIMAL = False
CONFIGS = [
# eager_greedy_ff
(330, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_ff
(411, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cea
(213, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cea
(57, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_add
(204, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cg
(208, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cg
(109, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_add
(63, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
def FINAL_CONFIG_BUILDER(successful_args):
# This assumes that CONFIGS only contains "simple" configurations.
new_args = list(successful_args)
for pos, arg in enumerate(successful_args):
if arg == "--search":
orig_search = successful_args[pos + 1]
sub_searches = []
for weight in (5, 3, 2, 1):
if orig_search.startswith("lazy"):
sub_search = \
"lazy_wastar([h],preferred=[h],w=%d,cost_type=S_COST_TYPE)" % weight
else:
sub_search = \
"eager(single(sum([g(),weight(h,%d)])),preferred=[h],cost_type=S_COST_TYPE)" % weight
sub_searches.append(sub_search)
sub_search_string = ",".join(sub_searches)
new_search = "iterated([%s],bound=BOUND,repeat_last=true)" % sub_search_string
new_args[pos + 1] = new_search
break
return new_args
| 2,502 | 42.155172 | 109 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolios/seq_sat_fdss_2018.py
|
"""
This is the "Fast Downward Stone Soup 2018" sequential portfolio that participated in the IPC 2018
satisficing and bounded-cost tracks. For more information, see the planner abstract:
Jendrik Seipp and Gabriele Röger.
Fast Downward Stone Soup 2018.
In Ninth International Planning Competition (IPC 2018), Deterministic Part, pp. 80-82. 2018.
https://ai.dmi.unibas.ch/papers/seipp-roeger-ipc2018.pdf
"""
OPTIMAL = False
CONFIGS = [
(26, [
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true),type_based([hff,g()])],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=true,preferred_successors_first=false,bound=BOUND)"]),
(25, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=true)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=0),preferred=[hlm],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(135, [
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true)],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=false,preferred_successors_first=true,bound=BOUND)"]),
(59, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--search",
"eager_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one,bound=BOUND)"]),
(23, [
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true)],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=true,preferred_successors_first=true,bound=BOUND)"]),
(57, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=true)",
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(sum([g(),weight(hlm,10)])),single(sum([g(),weight(hlm,10)]),pref_only=true),single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=1000),preferred=[hlm,hcg],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(17, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--search",
"lazy_greedy([hcea,hlm],preferred=[hcea,hlm],cost_type=one,bound=BOUND)"]),
(12, [
"--evaluator",
"hadd=add(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(hadd),single(hadd,pref_only=true),single(hlm),single(hlm,pref_only=true)]),preferred=[hadd,hlm],cost_type=one,bound=BOUND)"]),
(26, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true)],boost=2000),preferred=[hff],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--search",
"eager(alt([type_based([g()]),single(hcg),single(hcg,pref_only=true),single(hlm),single(hlm,pref_only=true)]),preferred=[hcg,hlm],cost_type=one,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=false)",
"--evaluator",
"hcea=cea(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true),single(hcea),single(hcea,pref_only=true)],boost=0),preferred=[hlm,hcea],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(88, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--search",
"lazy_wastar([hcea,hlm],w=3,preferred=[hcea,hlm],cost_type=one,bound=BOUND)"]),
(8, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=100),preferred=[hcg],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(54, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hgoalcount,10)])),single(sum([g(),weight(hgoalcount,10)]),pref_only=true)],boost=2000),preferred=[hff,hgoalcount],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(24, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--search",
"eager(alt([type_based([g()]),single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=one,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_rhw(reasonable_orders=false,only_causal_landmarks=false,disjunctive_landmarks=false,conjunctive_landmarks=true,no_orders=false)",
"--evaluator",
"hlm=lmcount(lmg,admissible=false,transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hblind=blind()",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hblind,2)])),single(sum([g(),weight(hblind,2)]),pref_only=true),single(sum([g(),weight(hlm,2)])),single(sum([g(),weight(hlm,2)]),pref_only=true),single(sum([g(),weight(hff,2)])),single(sum([g(),weight(hff,2)]),pref_only=true)],boost=4419),preferred=[hlm],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(30, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy_wastar([hff],w=3,preferred=[hff],cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([type_based([g()]),single(hcg),single(hcg,pref_only=true)],boost=0),preferred=[hcg],reopen_closed=true,cost_type=plusone,bound=BOUND)"]),
(58, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcg,hlm],cost_type=one,bound=BOUND)"]),
(26, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--search",
"eager(alt([single(sum([g(),weight(hblind,10)])),single(sum([g(),weight(hblind,10)]),pref_only=true),single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcea,10)])),single(sum([g(),weight(hcea,10)]),pref_only=true)],boost=536),preferred=[hff],reopen_closed=false,bound=BOUND)"]),
(27, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--search",
"eager_greedy([hcea],preferred=[hcea],cost_type=one,bound=BOUND)"]),
(50, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true)]),preferred=[hff],cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hcg=cg()",
"--search",
"lazy(alt([type_based([g()]),single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hcg,3)])),single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),single(sum([weight(g(),2),weight(hgoalcount,3)])),single(sum([weight(g(),2),weight(hgoalcount,3)]),pref_only=true)],boost=3662),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(29, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hcg=cg()",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hcg,3)])),single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),single(sum([weight(g(),2),weight(hgoalcount,3)])),single(sum([weight(g(),2),weight(hgoalcount,3)]),pref_only=true)],boost=3662),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(21, [
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=0),preferred=[hcg],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(21, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true)]),preferred=[hcg],cost_type=one,bound=BOUND)"]),
(24, [
"--landmarks",
"lmg=lm_rhw(reasonable_orders=true,only_causal_landmarks=true,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=false)",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hadd=add()",
"--evaluator",
"hlm=lmcount(lmg,admissible=false,pref=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hlm,3)])),single(sum([weight(g(),2),weight(hlm,3)]),pref_only=true),single(sum([weight(g(),2),weight(hadd,3)])),single(sum([weight(g(),2),weight(hadd,3)]),pref_only=true)],boost=2474),preferred=[hadd],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hblind=blind()",
"--evaluator",
"hadd=add()",
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hhmax=hmax()",
"--search",
"eager(alt([tiebreaking([sum([g(),weight(hblind,7)]),hblind]),tiebreaking([sum([g(),weight(hhmax,7)]),hhmax]),tiebreaking([sum([g(),weight(hadd,7)]),hadd]),tiebreaking([sum([g(),weight(hcg,7)]),hcg])],boost=2142),preferred=[],reopen_closed=true,bound=BOUND)"]),
(28, [
"--evaluator",
"hadd=add(transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([tiebreaking([sum([weight(g(),4),weight(hff,5)]),hff]),tiebreaking([sum([weight(g(),4),weight(hff,5)]),hff],pref_only=true),tiebreaking([sum([weight(g(),4),weight(hadd,5)]),hadd]),tiebreaking([sum([weight(g(),4),weight(hadd,5)]),hadd],pref_only=true)],boost=2537),preferred=[hff,hadd],reopen_closed=true,bound=BOUND)"]),
(53, [
"--landmarks",
"lmg=lm_hm(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=false,no_orders=true,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=5000),preferred=[hlm],reopen_closed=false,bound=BOUND)"]),
(29, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true)],boost=5000),preferred=[hff],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(27, [
"--evaluator",
"hblind=blind()",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hblind,2)])),single(sum([g(),weight(hff,2)]))],boost=4480),preferred=[],reopen_closed=true,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_hm(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=false,no_orders=true,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=1000),preferred=[hlm,hff],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(54, [
"--landmarks",
"lmg=lm_hm(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=false,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([tiebreaking([sum([g(),weight(hlm,10)]),hlm]),tiebreaking([sum([g(),weight(hlm,10)]),hlm],pref_only=true),tiebreaking([sum([g(),weight(hff,10)]),hff]),tiebreaking([sum([g(),weight(hff,10)]),hff],pref_only=true)],boost=200),preferred=[hlm],reopen_closed=true,cost_type=plusone,bound=BOUND)"]),
(87, [
"--landmarks",
"lmg=lm_hm(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=false,no_orders=true,m=1)",
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"lazy(alt([single(hlm),single(hlm,pref_only=true),single(hcg),single(hcg,pref_only=true)],boost=0),preferred=[hcg],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(30, [
"--landmarks",
"lmg=lm_exhaust(reasonable_orders=false,only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=false)",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hhmax=hmax()",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,pref=false,transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hblind,3)])),single(sum([g(),weight(hblind,3)]),pref_only=true),single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true),single(sum([g(),weight(hhmax,3)])),single(sum([g(),weight(hhmax,3)]),pref_only=true)],boost=3052),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(56, [
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([tiebreaking([sum([g(),hff]),hff]),tiebreaking([sum([g(),hff]),hff],pref_only=true)],boost=432),preferred=[hff],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(19, [
"--landmarks",
"lmg=lm_merged([lm_rhw(),lm_hm(m=1)],only_causal_landmarks=false,disjunctive_landmarks=false,conjunctive_landmarks=true,no_orders=false)",
"--evaluator",
"hff=ff()",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hlm,10)])),single(sum([g(),weight(hlm,10)]),pref_only=true)],boost=500),preferred=[hff],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(56, [
"--landmarks",
"lmg=lm_exhaust(reasonable_orders=false,only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=false)",
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=false)",
"--evaluator",
"hff=ff()",
"--evaluator",
"hblind=blind()",
"--search",
"eager(alt([tiebreaking([sum([weight(g(),8),weight(hblind,9)]),hblind]),tiebreaking([sum([weight(g(),8),weight(hlm,9)]),hlm]),tiebreaking([sum([weight(g(),8),weight(hff,9)]),hff]),tiebreaking([sum([weight(g(),8),weight(hgoalcount,9)]),hgoalcount])],boost=2005),preferred=[],reopen_closed=true,bound=BOUND)"]),
(24, [
"--landmarks",
"lmg=lm_zg(reasonable_orders=false,only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=true)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,pref=false)",
"--search",
"eager(single(sum([g(),weight(hlm,3)])),preferred=[],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(81, [
"--landmarks",
"lmg=lm_hm(only_causal_landmarks=false,disjunctive_landmarks=true,conjunctive_landmarks=true,no_orders=true,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"eager(single(sum([g(),weight(hlm,5)])),preferred=[],reopen_closed=true,cost_type=one,bound=BOUND)"]),
]
| 19,746 | 57.250737 | 525 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolios/seq_opt_merge_and_shrink.py
|
OPTIMAL = True
CONFIGS = [
(800, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(1000, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
]
| 706 | 46.133333 | 118 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolios/seq_opt_fdss_1.py
|
OPTIMAL = True
CONFIGS = [
(175, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(432, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
(455, ["--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]),
(569, ["--search",
"astar(lmcut())"]),
]
| 930 | 43.333333 | 118 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/driver/portfolios/seq_opt_fdss_2.py
|
OPTIMAL = True
CONFIGS = [
(1, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(1, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
(1, ["--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]),
(1, ["--search",
"astar(lmcut())"]),
(1, ["--search",
"astar(blind())"]),
]
| 948 | 40.26087 | 116 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/tests/test-standard-configs.py
|
import os
import pipes
import subprocess
import sys
import pytest
import configs
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
FAST_DOWNWARD = os.path.join(REPO, "fast-downward.py")
SAS_FILE = os.path.join(REPO, "test.sas")
PLAN_FILE = os.path.join(REPO, "test.plan")
TASK = os.path.join(BENCHMARKS_DIR, "miconic/s1-0.pddl")
CONFIGS = {}
CONFIGS.update(configs.default_configs_optimal(core=True, extended=True))
CONFIGS.update(configs.default_configs_satisficing(core=True, extended=True))
def escape_list(l):
return " ".join(pipes.quote(x) for x in l)
def run_plan_script(task, config, debug):
cmd = [sys.executable, FAST_DOWNWARD, "--plan-file", PLAN_FILE]
if debug:
cmd.append("--debug")
if "--alias" in config:
assert len(config) == 2, config
cmd += config + [task]
else:
cmd += [task] + config
print("\nRun: {}:".format(escape_list(cmd)))
sys.stdout.flush()
subprocess.check_call(cmd, cwd=REPO)
def translate(task):
subprocess.check_call([
sys.executable, FAST_DOWNWARD, "--sas-file", SAS_FILE, "--translate", task], cwd=REPO)
def cleanup():
os.remove(SAS_FILE)
os.remove(PLAN_FILE)
def setup_module(module):
translate(TASK)
@pytest.mark.parametrize("config", sorted(CONFIGS.values()))
@pytest.mark.parametrize("debug", [False, True])
def test_configs(config, debug):
run_plan_script(SAS_FILE, config, debug)
def teardown_module(module):
cleanup()
| 1,582 | 24.126984 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/tests/test-translator.py
|
#! /usr/bin/env python3
HELP = """\
Check that translator is deterministic.
Run the translator multiple times to test that the log and the output file are
the same for every run. Obviously, there might be false negatives, i.e.,
different runs might lead to the same nondeterministic results.
"""
import argparse
from collections import defaultdict
from distutils.spawn import find_executable
import itertools
import os
import re
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
DRIVER = os.path.join(REPO, "fast-downward.py")
def parse_args():
parser = argparse.ArgumentParser(description=HELP)
parser.add_argument(
"benchmarks_dir",
help="path to benchmark directory")
parser.add_argument(
"suite", nargs="*", default=["first"],
help='Use "all" to test all benchmarks, '
'"first" to test the first task of each domain (default), '
'or "<domain>:<problem>" to test individual tasks')
args = parser.parse_args()
args.benchmarks_dir = os.path.abspath(args.benchmarks_dir)
return args
def get_task_name(path):
return "-".join(path.split("/")[-2:])
def translate_task(task_file):
python = sys.executable
print("Translate {} with {}".format(get_task_name(task_file), python))
sys.stdout.flush()
cmd = [python, DRIVER, "--translate", task_file]
try:
output = subprocess.check_output(cmd)
except OSError as err:
sys.exit("Call failed: {}\n{}".format(" ".join(cmd), err))
output = str(output)
# Remove information that may differ between calls.
for pattern in [
r"\[.+s CPU, .+s wall-clock\]",
r"\d+ KB"]:
output = re.sub(pattern, "", output)
return output
def _get_all_tasks_by_domain(benchmarks_dir):
# Ignore domains where translating the first task takes too much time or memory.
# We also ignore citycar, which indeed reveals some nondeterminism in the
# invariant synthesis. Fixing it would require to sort the actions which
# seems to be detrimental on some other domains.
blacklisted_domains = [
"agricola-sat18-strips",
"citycar-opt14-adl", # cf. issue875
"citycar-sat14-adl", # cf. issue875
"organic-synthesis-sat18-strips",
"organic-synthesis-split-opt18-strips",
"organic-synthesis-split-sat18-strips"]
tasks = defaultdict(list)
domains = [
name for name in os.listdir(benchmarks_dir)
if os.path.isdir(os.path.join(benchmarks_dir, name)) and
not name.startswith((".", "_")) and
name not in blacklisted_domains]
for domain in domains:
path = os.path.join(benchmarks_dir, domain)
tasks[domain] = [
os.path.join(benchmarks_dir, domain, f)
for f in sorted(os.listdir(path)) if "domain" not in f]
return sorted(tasks.values())
def get_tasks(args):
suite = []
for task in args.suite:
if task == "first":
# Add the first task of each domain.
suite.extend([tasks[0] for tasks in _get_all_tasks_by_domain(args.benchmarks_dir)])
elif task == "all":
# Add the whole benchmark suite.
suite.extend(itertools.chain.from_iterable(
tasks for tasks in _get_all_tasks_by_domain(args.benchmarks_dir)))
else:
# Add task from command line.
task = task.replace(":", "/")
suite.append(os.path.join(args.benchmarks_dir, task))
return sorted(set(suite))
def cleanup():
# We can't use the driver's cleanup function since the files are renamed.
for f in os.listdir("."):
if f.endswith(".sas"):
os.remove(f)
def write_combined_output(output_file, task):
log = translate_task(task)
with open(output_file, "w") as combined_output:
combined_output.write(log)
with open("output.sas") as output_sas:
combined_output.write(output_sas.read())
def main():
args = parse_args()
os.chdir(DIR)
cleanup()
subprocess.check_call(["./build.py", "translate"], cwd=REPO)
for task in get_tasks(args):
write_combined_output("base.sas", task)
for iteration in range(2):
write_combined_output("output{}.sas".format(iteration), task)
print("Compare translator output", flush=True)
files = ["base.sas", "output{}.sas".format(iteration)]
try:
subprocess.check_call(["diff", "-q"] + files)
except subprocess.CalledProcessError:
sys.exit(
"Error: Translator is nondeterministic for {task}.".format(**locals()))
print(flush=True)
cleanup()
if __name__ == "__main__":
main()
| 4,822 | 32.493056 | 95 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/tests/test-dependencies.py
|
#! /usr/bin/env python3
import os
import shutil
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
DOWNWARD_FILES = os.path.join(REPO, "src", "search", "DownwardFiles.cmake")
TEST_BUILD_CONFIGS = os.path.join(REPO, "test_build_configs.py")
BUILD = os.path.join(REPO, "build.py")
BUILDS = os.path.join(REPO, "builds")
paths_to_clean = [TEST_BUILD_CONFIGS]
def clean_up(paths_to_clean):
print("\nCleaning up")
for path in paths_to_clean:
print("Removing {path}".format(**locals()))
if os.path.isfile(path):
os.remove(path)
if os.path.isdir(path):
shutil.rmtree(path)
print("Done cleaning")
with open(DOWNWARD_FILES) as d:
content = d.readlines()
content = [line for line in content if '#' not in line]
content = [line for line in content if 'NAME' in line or 'CORE_PLUGIN' in line or 'DEPENDENCY_ONLY' in line]
plugins_to_be_tested = []
for line in content:
if 'NAME' in line:
plugins_to_be_tested.append(line.replace("NAME", "").strip())
if 'CORE_PLUGIN' in line or 'DEPENDENCY_ONLY' in line:
plugins_to_be_tested.pop()
with open(TEST_BUILD_CONFIGS, "w") as f:
for plugin in plugins_to_be_tested:
lowercase = plugin.lower()
line = "{lowercase} = [\"-DCMAKE_BUILD_TYPE=Debug\", \"-DDISABLE_PLUGINS_BY_DEFAULT=YES\"," \
" \"-DPLUGIN_{plugin}_ENABLED=True\"]\n".format(**locals())
f.write(line)
paths_to_clean.append(os.path.join(BUILDS, lowercase))
plugins_failed_test = []
for plugin in plugins_to_be_tested:
try:
subprocess.check_call([BUILD, plugin.lower()])
except subprocess.CalledProcessError:
plugins_failed_test.append(plugin)
if plugins_failed_test:
print("\nFailure:")
for plugin in plugins_failed_test:
print("{plugin} failed dependencies test".format(**locals()))
sys.exit(1)
else:
print("\nAll plugins have passed dependencies test")
clean_up(paths_to_clean)
| 2,043 | 30.9375 | 108 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/tests/test-memory-leaks.py
|
import errno
import os
import pipes
import re
import subprocess
import sys
import pytest
import configs
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
FAST_DOWNWARD = os.path.join(REPO, "fast-downward.py")
BUILD_DIR = os.path.join(REPO, "builds", "release")
DOWNWARD_BIN = os.path.join(BUILD_DIR, "bin", "downward")
SAS_FILE = os.path.join(REPO, "test.sas")
PLAN_FILE = os.path.join(REPO, "test.plan")
VALGRIND_GCC5_SUPPRESSION_FILE = os.path.join(
REPO, "misc", "tests", "valgrind", "gcc5.supp")
DLOPEN_SUPPRESSION_FILE = os.path.join(
REPO, "misc", "tests", "valgrind", "dlopen.supp")
DL_CATCH_ERROR_SUPPRESSION_FILE = os.path.join(
REPO, "misc", "tests", "valgrind", "dl_catch_error.supp")
VALGRIND_ERROR_EXITCODE = 99
TASK = os.path.join(BENCHMARKS_DIR, "miconic/s1-0.pddl")
CONFIGS = {}
CONFIGS.update(configs.default_configs_optimal(core=True, extended=True))
CONFIGS.update(configs.default_configs_satisficing(core=True, extended=True))
def escape_list(l):
return " ".join(pipes.quote(x) for x in l)
def get_compiler_and_version():
output = subprocess.check_output(
["cmake", "-LA", "-N", "../../src/"], cwd=BUILD_DIR).decode("utf-8")
compiler = re.search(
"^DOWNWARD_CXX_COMPILER_ID:STRING=(.+)$", output, re.M).group(1)
version = re.search(
"^DOWNWARD_CXX_COMPILER_VERSION:STRING=(.+)$", output, re.M).group(1)
return compiler, version
COMPILER, COMPILER_VERSION = get_compiler_and_version()
SUPPRESSION_FILES = [
DLOPEN_SUPPRESSION_FILE,
DL_CATCH_ERROR_SUPPRESSION_FILE,
]
if COMPILER == "GNU" and COMPILER_VERSION.split(".")[0] == "5":
print("Using leak suppression file for GCC 5 "
"(see http://issues.fast-downward.org/issue703).")
SUPPRESSION_FILES.append(VALGRIND_GCC5_SUPPRESSION_FILE)
def run_plan_script(task, config):
assert "--alias" not in config, config
cmd = [
"valgrind",
"--leak-check=full",
"--error-exitcode={}".format(VALGRIND_ERROR_EXITCODE),
"--show-leak-kinds=all",
"--errors-for-leak-kinds=all",
"--track-origins=yes"]
for suppression_file in SUPPRESSION_FILES:
cmd.append("--suppressions={}".format(suppression_file))
cmd.extend([DOWNWARD_BIN] + config + ["--internal-plan-file", PLAN_FILE])
print("\nRun: {}".format(escape_list(cmd)))
sys.stdout.flush()
try:
subprocess.check_call(cmd, stdin=open(SAS_FILE), cwd=REPO)
except OSError as err:
if err.errno == errno.ENOENT:
pytest.fail(
"Could not find valgrind. Please install it "
"with \"sudo apt install valgrind\".")
except subprocess.CalledProcessError as err:
# Valgrind exits with
# - the exit code of the binary if it does not find leaks
# - VALGRIND_ERROR_EXITCODE if it finds leaks
# - 1 in case of usage errors
# Fortunately, we only use exit code 1 for portfolios.
if err.returncode == 1:
pytest.fail("failed to run valgrind")
elif err.returncode == VALGRIND_ERROR_EXITCODE:
pytest.fail("{config} leaks memory for {task}".format(**locals()))
def translate(task):
subprocess.check_call(
[sys.executable, FAST_DOWNWARD, "--sas-file", SAS_FILE, "--translate", task],
cwd=REPO)
def setup_module(_module):
translate(TASK)
@pytest.mark.parametrize("config", sorted(CONFIGS.values()))
def test_configs(config):
run_plan_script(SAS_FILE, config)
def teardown_module(_module):
os.remove(SAS_FILE)
os.remove(PLAN_FILE)
| 3,694 | 32.288288 | 85 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/tests/test-exitcodes.py
|
from collections import defaultdict
import os
import subprocess
import sys
import pytest
DIR = os.path.dirname(os.path.abspath(__file__))
REPO_BASE = os.path.dirname(os.path.dirname(DIR))
sys.path.insert(0, REPO_BASE)
from driver import returncodes
BENCHMARKS_DIR = os.path.join(REPO_BASE, "misc", "tests", "benchmarks")
DRIVER = os.path.join(REPO_BASE, "fast-downward.py")
TRANSLATE_TASKS = {
"small": "gripper/prob01.pddl",
"large": "satellite/p25-HC-pfile5.pddl",
}
TRANSLATE_TESTS = [
("small", [], [], defaultdict(lambda: returncodes.SUCCESS)),
# We cannot set time limits on Windows and thus expect DRIVER_UNSUPPORTED
# as exit code in this case.
("large", ["--translate-time-limit", "1s"], [], defaultdict(
lambda: returncodes.TRANSLATE_OUT_OF_TIME,
win32=returncodes.DRIVER_UNSUPPORTED)),
# We cannot set/enforce memory limits on Windows/macOS and thus expect
# DRIVER_UNSUPPORTED as exit code in those cases.
("large", ["--translate-memory-limit", "75M"], [], defaultdict(
lambda: returncodes.TRANSLATE_OUT_OF_MEMORY,
darwin=returncodes.DRIVER_UNSUPPORTED,
win32=returncodes.DRIVER_UNSUPPORTED)),
]
SEARCH_TASKS = {
"strips": "miconic/s1-0.pddl",
"axioms": "philosophers/p01-phil2.pddl",
"cond-eff": "miconic-simpleadl/s1-0.pddl",
"large": "satellite/p25-HC-pfile5.pddl",
}
MERGE_AND_SHRINK = ('astar(merge_and_shrink('
'merge_strategy=merge_stateless(merge_selector='
'score_based_filtering(scoring_functions=[goal_relevance,'
'dfp,total_order(atomic_ts_order=reverse_level,'
'product_ts_order=new_to_old,atomic_before_product=false)])),'
'shrink_strategy=shrink_bisimulation(greedy=false),'
'label_reduction=exact('
'before_shrinking=true,'
'before_merging=false),'
'max_states=50000,threshold_before_merge=1,verbosity=silent))')
SEARCH_TESTS = [
("strips", [], "astar(add())", defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "astar(hm())", defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "ehc(hm())", defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "astar(ipdb())", defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "astar(lmcut())", defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "astar(lmcount(lm_rhw(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "astar(lmcount(lm_rhw(), admissible=true))",
defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "astar(lmcount(lm_hm(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], "astar(lmcount(lm_hm(), admissible=true))",
defaultdict(lambda: returncodes.SUCCESS)),
("strips", [], MERGE_AND_SHRINK, defaultdict(lambda: returncodes.SUCCESS)),
("axioms", [], "astar(add())", defaultdict(lambda: returncodes.SUCCESS)),
("axioms", [], "astar(hm())",
defaultdict(lambda: returncodes.SEARCH_UNSOLVED_INCOMPLETE)),
("axioms", [], "ehc(hm())",
defaultdict(lambda: returncodes.SEARCH_UNSOLVED_INCOMPLETE)),
("axioms", [], "astar(ipdb())",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("axioms", [], "astar(lmcut())",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("axioms", [], "astar(lmcount(lm_rhw(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("axioms", [], "astar(lmcount(lm_rhw(), admissible=true))",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("axioms", [], "astar(lmcount(lm_zg(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("axioms", [], "astar(lmcount(lm_zg(), admissible=true))",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
# h^m landmark factory explicitly forbids axioms.
("axioms", [], "astar(lmcount(lm_hm(), admissible=false))",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("axioms", [], "astar(lmcount(lm_hm(), admissible=true))",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("axioms", [], "astar(lmcount(lm_exhaust(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("axioms", [], "astar(lmcount(lm_exhaust(), admissible=true))",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("axioms", [], MERGE_AND_SHRINK,
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("cond-eff", [], "astar(add())",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(hm())",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(ipdb())",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("cond-eff", [], "astar(lmcut())",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("cond-eff", [], "astar(lmcount(lm_rhw(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(lmcount(lm_rhw(), admissible=true))",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(lmcount(lm_zg(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(lmcount(lm_zg(), admissible=true))",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(lmcount(lm_hm(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(lmcount(lm_hm(), admissible=true))",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("cond-eff", [], "astar(lmcount(lm_exhaust(), admissible=false))",
defaultdict(lambda: returncodes.SUCCESS)),
("cond-eff", [], "astar(lmcount(lm_exhaust(), admissible=true))",
defaultdict(lambda: returncodes.SEARCH_UNSUPPORTED)),
("cond-eff", [], MERGE_AND_SHRINK,
defaultdict(lambda: returncodes.SUCCESS)),
# We cannot set/enforce memory limits on Windows/macOS and thus expect
# DRIVER_UNSUPPORTED as exit code in those cases.
("large", ["--search-memory-limit", "100M"], MERGE_AND_SHRINK,
defaultdict(lambda: returncodes.SEARCH_OUT_OF_MEMORY,
darwin=returncodes.DRIVER_UNSUPPORTED,
win32=returncodes.DRIVER_UNSUPPORTED)),
# We cannot set time limits on Windows and thus expect DRIVER_UNSUPPORTED
# as exit code in this case.
("large", ["--search-time-limit", "1s"], MERGE_AND_SHRINK,
defaultdict(lambda: returncodes.SEARCH_OUT_OF_TIME,
win32=returncodes.DRIVER_UNSUPPORTED)),
]
def translate(pddl_file, sas_file):
subprocess.check_call([
sys.executable, DRIVER, "--sas-file", sas_file, "--translate", pddl_file])
def cleanup():
subprocess.check_call([sys.executable, DRIVER, "--cleanup"])
def get_sas_file_name(task_type):
return "{}.sas".format(task_type)
def setup_module(_module):
for task_type, relpath in SEARCH_TASKS.items():
pddl_file = os.path.join(BENCHMARKS_DIR, relpath)
sas_file = get_sas_file_name(task_type)
translate(pddl_file, sas_file)
@pytest.mark.parametrize(
"task_type, driver_options, translate_options, expected", TRANSLATE_TESTS)
def test_translator_exit_codes(task_type, driver_options, translate_options, expected):
relpath = TRANSLATE_TASKS[task_type]
problem = os.path.join(BENCHMARKS_DIR, relpath)
cmd = ([sys.executable, DRIVER] + driver_options +
["--translate"] + translate_options + [problem])
print("\nRun {cmd}:".format(**locals()))
sys.stdout.flush()
exitcode = subprocess.call(cmd)
assert exitcode == expected[sys.platform]
cleanup()
@pytest.mark.parametrize(
"task_type, driver_options, search_options, expected", SEARCH_TESTS)
def test_search_exit_codes(task_type, driver_options, search_options, expected):
sas_file = get_sas_file_name(task_type)
cmd = ([sys.executable, DRIVER] + driver_options +
[sas_file, "--search", search_options])
print("\nRun {cmd}:".format(**locals()))
sys.stdout.flush()
exitcode = subprocess.call(cmd)
assert exitcode == expected[sys.platform]
cleanup()
def teardown_module(_module):
for task_type in SEARCH_TASKS:
os.remove(get_sas_file_name(task_type))
| 8,269 | 42.989362 | 87 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/tests/configs.py
|
def configs_optimal_core():
return {
# A*
"astar_blind": [
"--search",
"astar(blind)"],
"astar_h2": [
"--search",
"astar(hm(2))"],
"astar_ipdb": [
"--search",
"astar(ipdb)"],
"bjolp": [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"],
"astar_lmcut": [
"--search",
"astar(lmcut)"],
"astar_hmax": [
"--search",
"astar(hmax)"],
"astar_merge_and_shrink_rl_fh": [
"--search",
"astar(merge_and_shrink("
"merge_strategy=merge_strategy=merge_precomputed("
"merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_fh(),"
"label_reduction=exact(before_shrinking=false,"
"before_merging=true),max_states=50000,verbosity=silent))"],
"astar_merge_and_shrink_dfp_bisim": [
"--search",
"astar(merge_and_shrink(merge_strategy=merge_stateless("
"merge_selector=score_based_filtering(scoring_functions=["
"goal_relevance,dfp,total_order("
"atomic_ts_order=reverse_level,product_ts_order=new_to_old,"
"atomic_before_product=false)])),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,"
"before_merging=false),max_states=50000,"
"threshold_before_merge=1,verbosity=silent))"],
"astar_merge_and_shrink_dfp_greedy_bisim": [
"--search",
"astar(merge_and_shrink(merge_strategy=merge_stateless("
"merge_selector=score_based_filtering(scoring_functions=["
"goal_relevance,dfp,total_order("
"atomic_ts_order=reverse_level,product_ts_order=new_to_old,"
"atomic_before_product=false)])),"
"shrink_strategy=shrink_bisimulation("
"greedy=true),"
"label_reduction=exact(before_shrinking=true,"
"before_merging=false),max_states=infinity,"
"threshold_before_merge=1,verbosity=silent))"],
"blind-sss-simple": [
"--search",
"astar(blind(), pruning=stubborn_sets_simple())"],
"blind-sss-ec": [
"--search", "astar(blind(), pruning=stubborn_sets_ec())"],
"blind-atom-centric-sss": [
"--search", "astar(blind(), pruning=atom_centric_stubborn_sets())"],
}
def configs_satisficing_core():
return {
# A*
"astar_goalcount": [
"--search",
"astar(goalcount)"],
# eager greedy
"eager_greedy_ff": [
"--evaluator",
"h=ff()",
"--search",
"eager_greedy([h],preferred=[h])"],
"eager_greedy_add": [
"--evaluator",
"h=add()",
"--search",
"eager_greedy([h],preferred=[h])"],
"eager_greedy_cg": [
"--evaluator",
"h=cg()",
"--search",
"eager_greedy([h],preferred=[h])"],
"eager_greedy_cea": [
"--evaluator",
"h=cea()",
"--search",
"eager_greedy([h],preferred=[h])"],
# lazy greedy
"lazy_greedy_ff": [
"--evaluator",
"h=ff()",
"--search",
"lazy_greedy([h],preferred=[h])"],
"lazy_greedy_add": [
"--evaluator",
"h=add()",
"--search",
"lazy_greedy([h],preferred=[h])"],
"lazy_greedy_cg": [
"--evaluator",
"h=cg()",
"--search",
"lazy_greedy([h],preferred=[h])"],
# LAMA first
"lama-first": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
"lama-first-typed": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
"single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000),"
"preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true,"
"preferred_successors_first=false)"],
}
def configs_optimal_extended():
return {
"astar_lmcount_lm_merged_rhw_hm_no_order": [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"],
"astar_cegar": [
"--search",
"astar(cegar())"],
"pdb": [
"--search",
"astar(pdb())"],
}
def configs_satisficing_extended():
return {
# eager greedy
"eager_greedy_alt_ff_cg": [
"--evaluator",
"hff=ff()",
"--evaluator",
"hcg=cg()",
"--search",
"eager_greedy([hff,hcg],preferred=[hff,hcg])"],
"eager_greedy_ff_no_pref": [
"--search",
"eager_greedy([ff()])"],
# lazy greedy
"lazy_greedy_alt_cea_cg": [
"--evaluator",
"hcea=cea()",
"--evaluator",
"hcg=cg()",
"--search",
"lazy_greedy([hcea,hcg],preferred=[hcea,hcg])"],
"lazy_greedy_ff_no_pref": [
"--search",
"lazy_greedy([ff()])"],
"lazy_greedy_cea": [
"--evaluator",
"h=cea()",
"--search",
"lazy_greedy([h],preferred=[h])"],
# lazy wA*
"lazy_wa3_ff": [
"--evaluator",
"h=ff()",
"--search",
"lazy_wastar([h],w=3,preferred=[h])"],
# eager wA*
"eager_wa3_cg": [
"--evaluator",
"h=cg()",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h])"],
# ehc
"ehc_ff": [
"--search",
"ehc(ff())"],
# iterated
"iterated_wa_ff": [
"--evaluator",
"h=ff()",
"--search",
"iterated([lazy_wastar([h],w=10), lazy_wastar([h],w=5), lazy_wastar([h],w=3),"
"lazy_wastar([h],w=2), lazy_wastar([h],w=1)])"],
# pareto open list
"pareto_ff": [
"--evaluator",
"h=ff()",
"--search",
"eager(pareto([sum([g(), h]), h]), reopen_closed=true,"
"f_eval=sum([g(), h]))"],
}
def configs_optimal_lp():
return {
"divpot": ["--search", "astar(diverse_potentials())"],
"seq+lmcut": ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"],
}
def default_configs_optimal(core=True, extended=True):
configs = {}
if core:
configs.update(configs_optimal_core())
if extended:
configs.update(configs_optimal_extended())
return configs
def default_configs_satisficing(core=True, extended=True):
configs = {}
if core:
configs.update(configs_satisficing_core())
if extended:
configs.update(configs_satisficing_extended())
return configs
| 7,787 | 33.157895 | 114 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/autodoc/autodoc.py
|
#! /usr/bin/env python3
import argparse
import logging
import os
from os.path import dirname, join
import re
import subprocess
import sys
import time
import xmlrpc.client as xmlrpclib
import markup
# How many seconds to wait after a failed requests. Will be doubled after each failed request.
# Don't lower this below ~5, or we may get locked out for an hour.
sleep_time = 10
BOT_USERNAME = "XmlRpcBot"
PASSWORD_FILE = ".downward-xmlrpc.secret" # relative to this source file or in the home directory
WIKI_URL = "http://www.fast-downward.org"
DOC_PREFIX = "Doc/"
# a list of characters allowed to be used in doc titles
TITLE_WHITE_LIST = r"[\w\+-]" # match 'word characters' (including '_'), '+', and '-'
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
REPO_ROOT_DIR = os.path.dirname(os.path.dirname(SCRIPT_DIR))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--build", default="release")
parser.add_argument("--dry-run", action="store_true")
return parser.parse_args()
def read_password():
path = join(dirname(__file__), PASSWORD_FILE)
if not os.path.exists(path):
path = os.path.expanduser(join('~', PASSWORD_FILE))
try:
with open(path) as password_file:
return password_file.read().strip()
except OSError:
logging.critical("Could not find password file %s!\nIs it present?"
% PASSWORD_FILE)
sys.exit(1)
def connect():
wiki = xmlrpclib.ServerProxy(WIKI_URL + "?action=xmlrpc2", allow_none=True)
auth_token = wiki.getAuthToken(BOT_USERNAME, read_password())
multi_call = xmlrpclib.MultiCall(wiki)
multi_call.applyAuthToken(auth_token)
return multi_call
def get_all_titles_from_wiki():
multi_call = connect()
multi_call.getAllPages()
response = list(multi_call())
assert(response[0] == 'SUCCESS' and len(response) == 2)
return response[1]
def get_pages_from_wiki(titles):
multi_call = connect()
for title in titles:
multi_call.getPage(title)
response = list(multi_call())
assert(response[0] == 'SUCCESS')
return dict(zip(titles, response[1:]))
def send_pages(pages):
multi_call = connect()
for page_name, page_text in pages:
multi_call.putPage(page_name, page_text)
return multi_call()
def attempt(func, *args):
global sleep_time
try:
result = func(*args)
except xmlrpclib.Fault as error:
# This usually means the page content did not change.
logging.exception("Error: %s\nShould not happen anymore." % error)
sys.exit(1)
except xmlrpclib.ProtocolError as err:
logging.warning("Error: %s\n"
"Will retry after %s seconds." % (err.errcode, sleep_time))
# Retry after sleeping.
time.sleep(sleep_time)
sleep_time *= 2
return attempt(func, *args)
except Exception:
logging.exception("Unexpected error: %s" % sys.exc_info()[0])
sys.exit(1)
else:
for entry in result:
logging.info(repr(entry))
logging.info("Call to %s successful." % func.__name__)
return result
def insert_wiki_links(text, titles):
def make_link(m, prefix=''):
anchor = m.group('anchor') or ''
link_name = m.group('link')
target = prefix + link_name
if anchor:
target += '#' + anchor
link_name = anchor
link_name = link_name.replace("_", " ")
# Leave out the prefix in the link name.
result = m.group('before') + "[[" + target + "|" + link_name + "]]" + m.group('after')
return result
def make_doc_link(m):
return make_link(m, prefix=DOC_PREFIX)
re_link = r"(?P<before>\W)(?P<link>%s)(#(?P<anchor>" + TITLE_WHITE_LIST + r"+))?(?P<after>\W)"
doctitles = [title[4:] for title in titles if title.startswith(DOC_PREFIX)]
for key in doctitles:
text = re.sub(re_link % key, make_doc_link, text)
othertitles = [title for title in titles
if not title.startswith(DOC_PREFIX) and title not in doctitles]
for key in othertitles:
text = re.sub(re_link % key, make_link, text)
return text
def build_planner(build):
subprocess.check_call(["./build.py", build, "downward"], cwd=REPO_ROOT_DIR)
def get_pages_from_planner(build):
out = subprocess.check_output(
["./fast-downward.py", "--build", build, "--search", "--", "--help", "--txt2tags"],
cwd=REPO_ROOT_DIR).decode("utf-8")
# Split the output into tuples (title, markup_text).
pagesplitter = re.compile(r'>>>>CATEGORY: ([\w\s]+?)<<<<(.+?)>>>>CATEGORYEND<<<<', re.DOTALL)
pages = dict()
for title, markup_text in pagesplitter.findall(out):
document = markup.Document(date='')
document.add_text("<<TableOfContents>>")
document.add_text(markup_text)
rendered_text = document.render("moin").strip()
pages[DOC_PREFIX + title] = rendered_text
return pages
def get_changed_pages(old_doc_pages, new_doc_pages, all_titles):
def add_page(title, text):
# Check if this page is new or changed.
if old_doc_pages.get(title, '') != text:
print(title, "changed")
changed_pages.append([title, text])
else:
print(title, "unchanged")
changed_pages = []
overview_lines = []
for title, text in sorted(new_doc_pages.items()):
overview_lines.append(" * [[" + title + "]]")
text = insert_wiki_links(text, all_titles)
add_page(title, text)
overview_title = DOC_PREFIX + "Overview"
overview_text = "\n".join(overview_lines)
add_page(overview_title, overview_text)
return changed_pages
if __name__ == '__main__':
args = parse_args()
logging.info("building planner...")
build_planner(args.build)
logging.info("getting new pages from planner...")
new_doc_pages = get_pages_from_planner(args.build)
if args.dry_run:
for title, content in sorted(new_doc_pages.items()):
print("=" * 25, title, "=" * 25)
print(content)
print()
print()
sys.exit()
logging.info("getting existing page titles from wiki...")
old_titles = attempt(get_all_titles_from_wiki)
old_doc_titles = [title for title in old_titles if title.startswith(DOC_PREFIX)]
all_titles = set(old_titles) | set(new_doc_pages.keys())
logging.info("getting existing doc page contents from wiki...")
old_doc_pages = attempt(get_pages_from_wiki, old_doc_titles)
logging.info("looking for changed pages...")
changed_pages = get_changed_pages(old_doc_pages, new_doc_pages, all_titles)
if changed_pages:
logging.info("sending changed pages...")
attempt(send_pages, changed_pages)
else:
logging.info("no changes found")
missing_titles = set(old_doc_titles) - set(new_doc_pages.keys()) - {DOC_PREFIX + "Overview"}
if missing_titles:
sys.exit(
"There are pages in the wiki documentation "
"that are not created by Fast Downward:\n" +
"\n".join(sorted(missing_titles)))
print("Done")
| 7,162 | 35.176768 | 98 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/autodoc/markup.py
|
import logging
from external import txt2tags
def _get_config(target):
config = {}
# Set the configuration on the 'config' dict.
config = txt2tags.ConfigMaster()._get_defaults()
# The Pre (and Post) processing config is a list of lists:
# [ [this, that], [foo, bar], [patt, replace] ]
config['postproc'] = []
config['preproc'] = []
if target in ['xhtml', 'html']:
config['encoding'] = 'UTF-8' # document encoding
config['toc'] = 0
config['css-inside'] = 1
config['css-sugar'] = 1
# Allow line breaks, r'\\\\' are 2 \ for regexes
config['postproc'].append([r'\\\\', r'<br />'])
# {{Roter Text|color:red}} -> <span style="color:red">Roter Text</span>
config['postproc'].append([r'\{\{(.*?)\|color:(.+?)\}\}',
r'<span style="color:\2">\1</span>'])
elif target == 'tex':
config['style'] = []
config['style'].append('color')
# Do not clear the title page
config['postproc'].append([r'\\clearpage', r''])
config['postproc'].append([r'usepackage{color}',
r'usepackage[usenames,dvipsnames]{color}'])
config['encoding'] = 'utf8'
config['preproc'].append(['€', 'Euro'])
# Latex only allows whitespace and underscores in filenames if
# the filename is surrounded by "...". This is in turn only possible
# if the extension is omitted
config['preproc'].append([r'\[""', r'["""'])
config['preproc'].append([r'""\.', r'""".'])
# For images we have to omit the file:// prefix
config['postproc'].append([r'includegraphics\{(.*)"file://',
r'includegraphics{"\1'])
# Allow line breaks, r'\\\\' are 2 \ for regexes
config['postproc'].append([r'\$\\backslash\$\$\\backslash\$', r'\\\\'])
# {{Roter Text|color:red}} -> \textcolor{red}{Roter Text}
config['postproc'].append([r'\\{\\{(.*?)\$\|\$color:(.+?)\\}\\}',
r'\\textcolor{\2}{\1}'])
elif target == 'txt':
# Allow line breaks, r'\\\\' are 2 \ for regexes
config['postproc'].append([r'\\\\', '\n'])
return config
class Document:
def __init__(self, title='', author='', date='%%date(%Y-%m-%d)'):
self.title = title
self.author = author
self.date = date
self.text = ''
def add_text(self, text):
self.text += text + '\n'
def __str__(self):
return self.text
def render(self, target, options=None):
# We always want xhtml
if target == 'html':
target = 'xhtml'
# Bug in txt2tags: Titles are not escaped
if target == 'tex':
self.title = self.title.replace('_', r'\_')
# Here is the marked body text, it must be a list.
txt = self.text.split('\n')
# Set the three header fields
headers = [self.title, self.author, self.date]
config = _get_config(target)
config['outfile'] = txt2tags.MODULEOUT # results as list
config['target'] = target
if options is not None:
config.update(options)
# Let's do the conversion
try:
headers = txt2tags.doHeader(headers, config)
body, toc = txt2tags.convert(txt, config)
footer = txt2tags.doFooter(config)
toc = txt2tags.toc_tagger(toc, config)
toc = txt2tags.toc_formatter(toc, config)
full_doc = headers + toc + body + footer
finished = txt2tags.finish_him(full_doc, config)
result = '\n'.join(finished)
# Txt2tags error, show the messsage to the user
except txt2tags.error as err:
logging.error(err)
result = err
# Unknown error, show the traceback to the user
except Exception:
result = txt2tags.getUnknownErrorMessage()
logging.error(result)
return result
if __name__ == '__main__':
doc = Document('MyTitle', 'Max Mustermann')
doc.add_text('{{Roter Text|color:red}}')
print(doc)
print()
print(doc.render('tex'))
| 4,237 | 30.626866 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/autodoc/external/txt2tags.py
|
#!/usr/bin/env python3
# txt2tags - generic text conversion tool
# http://txt2tags.org
#
# Copyright 2001-2010 Aurelio Jargas
# Copyright 2010-2019 Jendrik Seipp
#
# This file is based on txt2tags version 2.6. The changes compared to
# the original version are:
#
# * use spaces instead of tabs
# * support Python 3.6+ in addition to Python 2.7
# * don't escape underscores in tagged and raw LaTeX text
# * don't use locale-dependent str.capitalize()
# * support SVG images
#
# License: http://www.gnu.org/licenses/gpl-2.0.txt
# Subversion: http://svn.txt2tags.org
# Bug tracker: http://bugs.txt2tags.org
#
########################################################################
#
# BORING CODE EXPLANATION AHEAD
#
# Just read it if you wish to understand how the txt2tags code works.
#
########################################################################
#
# The code that [1] parses the marked text is separated from the
# code that [2] insert the target tags.
#
# [1] made by: def convert()
# [2] made by: class BlockMaster
#
# The structures of the marked text are identified and its contents are
# extracted into a data holder (Python lists and dictionaries).
#
# When parsing the source file, the blocks (para, lists, quote, table)
# are opened with BlockMaster, right when found. Then its contents,
# which spans on several lines, are feeded into a special holder on the
# BlockMaster instance. Just when the block is closed, the target tags
# are inserted for the full block as a whole, in one pass. This way, we
# have a better control on blocks. Much better than the previous line by
# line approach.
#
# In other words, whenever inside a block, the parser *holds* the tag
# insertion process, waiting until the full block is read. That was
# needed primary to close paragraphs for the XHTML target, but
# proved to be a very good adding, improving many other processing.
#
# -------------------------------------------------------------------
#
# These important classes are all documented:
# CommandLine, SourceDocument, ConfigMaster, ConfigLines.
#
# There is a RAW Config format and all kind of configuration is first
# converted to this format. Then a generic method parses it.
#
# These functions get information about the input file(s) and take
# care of the init processing:
# get_infiles_config(), process_source_file() and convert_this_files()
#
########################################################################
#XXX Python coding warning
# Avoid common mistakes:
# - do NOT use newlist=list instead newlist=list[:]
# - do NOT use newdic=dic instead newdic=dic.copy()
# - do NOT use dic[key] instead dic.get(key)
# - do NOT use del dic[key] without has_key() before
#XXX Smart Image Align don't work if the image is a link
# Can't fix that because the image is expanded together with the
# link, at the linkbank filling moment. Only the image is passed
# to parse_images(), not the full line, so it is always 'middle'.
#XXX Paragraph separation not valid inside Quote
# Quote will not have <p></p> inside, instead will close and open
# again the <blockquote>. This really sux in CSS, when defining a
# different background color. Still don't know how to fix it.
#XXX TODO (maybe)
# New mark or macro which expands to an anchor full title.
# It is necessary to parse the full document in this order:
# DONE 1st scan: HEAD: get all settings, including %!includeconf
# DONE 2nd scan: BODY: expand includes & apply %!preproc
# 3rd scan: BODY: read titles and compose TOC info
# 4th scan: BODY: full parsing, expanding [#anchor] 1st
# Steps 2 and 3 can be made together, with no tag adding.
# Two complete body scans will be *slow*, don't know if it worths.
# One solution may be add the titles as postproc rules
##############################################################################
# User config (1=ON, 0=OFF)
USE_I18N = 1 # use gettext for i18ned messages? (default is 1)
COLOR_DEBUG = 1 # show debug messages in colors? (default is 1)
BG_LIGHT = 0 # your terminal background color is light (default is 0)
HTML_LOWER = 0 # use lowercased HTML tags instead upper? (default is 0)
##############################################################################
# These are all the core Python modules used by txt2tags (KISS!)
import re, os, sys, time, getopt
# The CSV module is new in Python version 2.3
try:
import csv
except ImportError:
csv = None
# Program information
my_url = 'http://txt2tags.org'
my_name = 'txt2tags'
my_email = '[email protected]'
my_version = '2.6'
# i18n - just use if available
if USE_I18N:
try:
import gettext
# If your locale dir is different, change it here
cat = gettext.Catalog('txt2tags',localedir='/usr/share/locale/')
_ = cat.gettext
except:
_ = lambda x:x
else:
_ = lambda x:x
# FLAGS : the conversion related flags , may be used in %!options
# OPTIONS : the conversion related options, may be used in %!options
# ACTIONS : the other behavior modifiers, valid on command line only
# MACROS : the valid macros with their default values for formatting
# SETTINGS: global miscellaneous settings, valid on RC file only
# NO_TARGET: actions that don't require a target specification
# NO_MULTI_INPUT: actions that don't accept more than one input file
# CONFIG_KEYWORDS: the valid %!key:val keywords
#
# FLAGS and OPTIONS are configs that affect the converted document.
# They usually have also a --no-<option> to turn them OFF.
#
# ACTIONS are needed because when doing multiple input files, strange
# behavior would be found, as use command line interface for the
# first file and gui for the second. There is no --no-<action>.
# --version and --help inside %!options are also odd
#
TARGETS = 'html xhtml sgml dbk tex lout man mgp wiki gwiki doku pmw moin pm6 txt art adoc creole'.split()
TARGETS.sort()
FLAGS = {'headers' :1 , 'enum-title' :0 , 'mask-email' :0 ,
'toc-only' :0 , 'toc' :0 , 'rc' :1 ,
'css-sugar' :0 , 'css-suggar' :0 , 'css-inside' :0 ,
'quiet' :0 , 'slides' :0 }
OPTIONS = {'target' :'', 'toc-level' :3 , 'style' :'',
'infile' :'', 'outfile' :'', 'encoding' :'',
'config-file':'', 'split' :0 , 'lang' :'',
'width' :0 , 'height' :0 , 'art-chars' :'',
'show-config-value':''}
ACTIONS = {'help' :0 , 'version' :0 , 'gui' :0 ,
'verbose' :0 , 'debug' :0 , 'dump-config':0 ,
'dump-source':0 , 'targets' :0}
MACROS = {'date' : '%Y%m%d', 'infile': '%f',
'mtime': '%Y%m%d', 'outfile': '%f'}
SETTINGS = {} # for future use
NO_TARGET = ['help', 'version', 'gui', 'toc-only', 'dump-config', 'dump-source', 'targets']
NO_MULTI_INPUT = ['gui','dump-config','dump-source']
CONFIG_KEYWORDS = [
'target', 'encoding', 'style', 'options', 'preproc','postproc',
'guicolors']
TARGET_NAMES = {
'html' : _('HTML page'),
'xhtml' : _('XHTML page'),
'sgml' : _('SGML document'),
'dbk' : _('DocBook document'),
'tex' : _('LaTeX document'),
'lout' : _('Lout document'),
'man' : _('UNIX Manual page'),
'mgp' : _('MagicPoint presentation'),
'wiki' : _('Wikipedia page'),
'gwiki' : _('Google Wiki page'),
'doku' : _('DokuWiki page'),
'pmw' : _('PmWiki page'),
'moin' : _('MoinMoin page'),
'pm6' : _('PageMaker document'),
'txt' : _('Plain Text'),
'art' : _('ASCII Art text'),
'adoc' : _('AsciiDoc document'),
'creole' : _('Creole 1.0 document')
}
DEBUG = 0 # do not edit here, please use --debug
VERBOSE = 0 # do not edit here, please use -v, -vv or -vvv
QUIET = 0 # do not edit here, please use --quiet
GUI = 0 # do not edit here, please use --gui
AUTOTOC = 1 # do not edit here, please use --no-toc or %%toc
DFT_TEXT_WIDTH = 72 # do not edit here, please use --width
DFT_SLIDE_WIDTH = 80 # do not edit here, please use --width
DFT_SLIDE_HEIGHT = 25 # do not edit here, please use --height
# ASCII Art config
AA_KEYS = 'corner border side bar1 bar2 level2 level3 level4 level5'.split()
AA_VALUES = '+-|-==-^"' # do not edit here, please use --art-chars
AA = dict(zip(AA_KEYS, AA_VALUES))
AA_COUNT = 0
AA_TITLE = ''
RC_RAW = []
CMDLINE_RAW = []
CONF = {}
BLOCK = None
TITLE = None
regex = {}
TAGS = {}
rules = {}
# Gui globals
askopenfilename = None
showinfo = None
showwarning = None
showerror = None
lang = 'english'
TARGET = ''
STDIN = STDOUT = '-'
MODULEIN = MODULEOUT = '-module-'
ESCCHAR = '\x00'
SEPARATOR = '\x01'
LISTNAMES = {'-':'list', '+':'numlist', ':':'deflist'}
LINEBREAK = {'default':'\n', 'win':'\r\n', 'mac':'\r'}
# Platform specific settings
LB = LINEBREAK.get(sys.platform[:3]) or LINEBREAK['default']
VERSIONSTR = _("%s version %s <%s>")%(my_name,my_version,my_url)
USAGE = '\n'.join([
'',
_("Usage: %s [OPTIONS] [infile.t2t ...]") % my_name,
'',
_(" --targets print a list of all the available targets and exit"),
_(" -t, --target=TYPE set target document type. currently supported:"),
' %s,' % ', '.join(TARGETS[:9]),
' %s' % ', '.join(TARGETS[9:]),
_(" -i, --infile=FILE set FILE as the input file name ('-' for STDIN)"),
_(" -o, --outfile=FILE set FILE as the output file name ('-' for STDOUT)"),
_(" --encoding=ENC set target file encoding (utf-8, iso-8859-1, etc)"),
_(" --toc add an automatic Table of Contents to the output"),
_(" --toc-level=N set maximum TOC level (depth) to N"),
_(" --toc-only print the Table of Contents and exit"),
_(" -n, --enum-title enumerate all titles as 1, 1.1, 1.1.1, etc"),
_(" --style=FILE use FILE as the document style (like HTML CSS)"),
_(" --css-sugar insert CSS-friendly tags for HTML/XHTML"),
_(" --css-inside insert CSS file contents inside HTML/XHTML headers"),
_(" -H, --no-headers suppress header and footer from the output"),
_(" --mask-email hide email from spam robots. [email protected] turns <x (a) y z>"),
_(" --slides format output as presentation slides (used by -t art)"),
_(" --width=N set the output's width to N columns (used by -t art)"),
_(" --height=N set the output's height to N rows (used by -t art)"),
_(" -C, --config-file=F read configuration from file F"),
_(" --gui invoke Graphical Tk Interface"),
_(" -q, --quiet quiet mode, suppress all output (except errors)"),
_(" -v, --verbose print informative messages during conversion"),
_(" -h, --help print this help information and exit"),
_(" -V, --version print program version and exit"),
_(" --dump-config print all the configuration found and exit"),
_(" --dump-source print the document source, with includes expanded"),
'',
_("Turn OFF options:"),
" --no-css-inside, --no-css-sugar, --no-dump-config, --no-dump-source,",
" --no-encoding, --no-enum-title, --no-headers, --no-infile,",
" --no-mask-email, --no-outfile, --no-quiet, --no-rc, --no-slides,",
" --no-style, --no-targets, --no-toc, --no-toc-only",
'',
_("Example:"),
" %s -t html --toc %s" % (my_name, _("file.t2t")),
'',
_("By default, converted output is saved to 'infile.<target>'."),
_("Use --outfile to force an output file name."),
_("If input file is '-', reads from STDIN."),
_("If output file is '-', dumps output to STDOUT."),
'',
my_url,
''
])
##############################################################################
# Here is all the target's templates
# You may edit them to fit your needs
# - the %(HEADERn)s strings represent the Header lines
# - the %(STYLE)s string is changed by --style contents
# - the %(ENCODING)s string is changed by --encoding contents
# - if any of the above is empty, the full line is removed
# - use %% to represent a literal %
#
HEADER_TEMPLATE = {
'art':"""
Fake template to respect the general process.
""",
'txt': """\
%(HEADER1)s
%(HEADER2)s
%(HEADER3)s
""",
'sgml': """\
<!doctype linuxdoc system>
<article>
<title>%(HEADER1)s
<author>%(HEADER2)s
<date>%(HEADER3)s
""",
'html': """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<HTML>
<HEAD>
<META NAME="generator" CONTENT="http://txt2tags.org">
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s">
<LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s">
<TITLE>%(HEADER1)s</TITLE>
</HEAD><BODY BGCOLOR="white" TEXT="black">
<CENTER>
<H1>%(HEADER1)s</H1>
<FONT SIZE="4"><I>%(HEADER2)s</I></FONT><BR>
<FONT SIZE="4">%(HEADER3)s</FONT>
</CENTER>
""",
'htmlcss': """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<HTML>
<HEAD>
<META NAME="generator" CONTENT="http://txt2tags.org">
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s">
<LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s">
<TITLE>%(HEADER1)s</TITLE>
</HEAD>
<BODY>
<DIV CLASS="header" ID="header">
<H1>%(HEADER1)s</H1>
<H2>%(HEADER2)s</H2>
<H3>%(HEADER3)s</H3>
</DIV>
""",
'xhtml': """\
<?xml version="1.0"
encoding="%(ENCODING)s"
?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(HEADER1)s</title>
<meta name="generator" content="http://txt2tags.org" />
<link rel="stylesheet" type="text/css" href="%(STYLE)s" />
</head>
<body bgcolor="white" text="black">
<div align="center">
<h1>%(HEADER1)s</h1>
<h2>%(HEADER2)s</h2>
<h3>%(HEADER3)s</h3>
</div>
""",
'xhtmlcss': """\
<?xml version="1.0"
encoding="%(ENCODING)s"
?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(HEADER1)s</title>
<meta name="generator" content="http://txt2tags.org" />
<link rel="stylesheet" type="text/css" href="%(STYLE)s" />
</head>
<body>
<div class="header" id="header">
<h1>%(HEADER1)s</h1>
<h2>%(HEADER2)s</h2>
<h3>%(HEADER3)s</h3>
</div>
""",
'dbk': """\
<?xml version="1.0"
encoding="%(ENCODING)s"
?>
<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"\
"docbook/dtd/xml/4.5/docbookx.dtd">
<article lang="en">
<articleinfo>
<title>%(HEADER1)s</title>
<authorgroup>
<author><othername>%(HEADER2)s</othername></author>
</authorgroup>
<date>%(HEADER3)s</date>
</articleinfo>
""",
'man': """\
.TH "%(HEADER1)s" 1 "%(HEADER3)s" "%(HEADER2)s"
""",
# TODO style to <HR>
'pm6': """\
<PMTags1.0 win><C-COLORTABLE ("Preto" 1 0 0 0)
><@Normal=
<FONT "Times New Roman"><CCOLOR "Preto"><SIZE 11>
<HORIZONTAL 100><LETTERSPACE 0><CTRACK 127><CSSIZE 70><C+SIZE 58.3>
<C-POSITION 33.3><C+POSITION 33.3><P><CBASELINE 0><CNOBREAK 0><CLEADING -0.05>
<GGRID 0><GLEFT 7.2><GRIGHT 0><GFIRST 0><G+BEFORE 7.2><G+AFTER 0>
<GALIGNMENT "justify"><GMETHOD "proportional"><G& "ENGLISH">
<GPAIRS 12><G%% 120><GKNEXT 0><GKWIDOW 0><GKORPHAN 0><GTABS $>
<GHYPHENATION 2 34 0><GWORDSPACE 75 100 150><GSPACE -5 0 25>
><@Bullet=<@-PARENT "Normal"><FONT "Abadi MT Condensed Light">
<GLEFT 14.4><G+BEFORE 2.15><G%% 110><GTABS(25.2 l "")>
><@PreFormat=<@-PARENT "Normal"><FONT "Lucida Console"><SIZE 8><CTRACK 0>
<GLEFT 0><G+BEFORE 0><GALIGNMENT "left"><GWORDSPACE 100 100 100><GSPACE 0 0 0>
><@Title1=<@-PARENT "Normal"><FONT "Arial"><SIZE 14><B>
<GCONTENTS><GLEFT 0><G+BEFORE 0><GALIGNMENT "left">
><@Title2=<@-PARENT "Title1"><SIZE 12><G+BEFORE 3.6>
><@Title3=<@-PARENT "Title1"><SIZE 10><GLEFT 7.2><G+BEFORE 7.2>
><@Title4=<@-PARENT "Title3">
><@Title5=<@-PARENT "Title3">
><@Quote=<@-PARENT "Normal"><SIZE 10><I>>
%(HEADER1)s
%(HEADER2)s
%(HEADER3)s
""",
'mgp': """\
#!/usr/X11R6/bin/mgp -t 90
%%deffont "normal" xfont "utopia-medium-r", charset "iso8859-1"
%%deffont "normal-i" xfont "utopia-medium-i", charset "iso8859-1"
%%deffont "normal-b" xfont "utopia-bold-r" , charset "iso8859-1"
%%deffont "normal-bi" xfont "utopia-bold-i" , charset "iso8859-1"
%%deffont "mono" xfont "courier-medium-r", charset "iso8859-1"
%%default 1 size 5
%%default 2 size 8, fore "yellow", font "normal-b", center
%%default 3 size 5, fore "white", font "normal", left, prefix " "
%%tab 1 size 4, vgap 30, prefix " ", icon arc "red" 40, leftfill
%%tab 2 prefix " ", icon arc "orange" 40, leftfill
%%tab 3 prefix " ", icon arc "brown" 40, leftfill
%%tab 4 prefix " ", icon arc "darkmagenta" 40, leftfill
%%tab 5 prefix " ", icon arc "magenta" 40, leftfill
%%%%------------------------- end of headers -----------------------------
%%page
%%size 10, center, fore "yellow"
%(HEADER1)s
%%font "normal-i", size 6, fore "white", center
%(HEADER2)s
%%font "mono", size 7, center
%(HEADER3)s
""",
'moin': """\
'''%(HEADER1)s'''
''%(HEADER2)s''
%(HEADER3)s
""",
'gwiki': """\
*%(HEADER1)s*
%(HEADER2)s
_%(HEADER3)s_
""",
'adoc': """\
= %(HEADER1)s
%(HEADER2)s
%(HEADER3)s
""",
'doku': """\
===== %(HEADER1)s =====
**//%(HEADER2)s//**
//%(HEADER3)s//
""",
'pmw': """\
(:Title %(HEADER1)s:)
(:Description %(HEADER2)s:)
(:Summary %(HEADER3)s:)
""",
'wiki': """\
'''%(HEADER1)s'''
%(HEADER2)s
''%(HEADER3)s''
""",
'tex': \
r"""\documentclass{article}
\usepackage{graphicx}
\usepackage{paralist} %% needed for compact lists
\usepackage[normalem]{ulem} %% needed by strike
\usepackage[urlcolor=blue,colorlinks=true]{hyperref}
\usepackage[%(ENCODING)s]{inputenc} %% char encoding
\usepackage{%(STYLE)s} %% user defined
\title{%(HEADER1)s}
\author{%(HEADER2)s}
\begin{document}
\date{%(HEADER3)s}
\maketitle
\clearpage
""",
'lout': """\
@SysInclude { doc }
@Document
@InitialFont { Times Base 12p } # Times, Courier, Helvetica, ...
@PageOrientation { Portrait } # Portrait, Landscape
@ColumnNumber { 1 } # Number of columns (2, 3, ...)
@PageHeaders { Simple } # None, Simple, Titles, NoTitles
@InitialLanguage { English } # German, French, Portuguese, ...
@OptimizePages { Yes } # Yes/No smart page break feature
//
@Text @Begin
@Display @Heading { %(HEADER1)s }
@Display @I { %(HEADER2)s }
@Display { %(HEADER3)s }
#@NP # Break page after Headers
""",
'creole': """\
%(HEADER1)s
%(HEADER2)s
%(HEADER3)s
"""
# @SysInclude { tbl } # Tables support
# setup: @MakeContents { Yes } # show TOC
# setup: @SectionGap # break page at each section
}
##############################################################################
def getTags(config):
"Returns all the known tags for the specified target"
keys = """
title1 numtitle1
title2 numtitle2
title3 numtitle3
title4 numtitle4
title5 numtitle5
title1Open title1Close
title2Open title2Close
title3Open title3Close
title4Open title4Close
title5Open title5Close
blocktitle1Open blocktitle1Close
blocktitle2Open blocktitle2Close
blocktitle3Open blocktitle3Close
paragraphOpen paragraphClose
blockVerbOpen blockVerbClose
blockQuoteOpen blockQuoteClose blockQuoteLine
blockCommentOpen blockCommentClose
fontMonoOpen fontMonoClose
fontBoldOpen fontBoldClose
fontItalicOpen fontItalicClose
fontUnderlineOpen fontUnderlineClose
fontStrikeOpen fontStrikeClose
listOpen listClose
listOpenCompact listCloseCompact
listItemOpen listItemClose listItemLine
numlistOpen numlistClose
numlistOpenCompact numlistCloseCompact
numlistItemOpen numlistItemClose numlistItemLine
deflistOpen deflistClose
deflistOpenCompact deflistCloseCompact
deflistItem1Open deflistItem1Close
deflistItem2Open deflistItem2Close deflistItem2LinePrefix
bar1 bar2
url urlMark
email emailMark
img imgAlignLeft imgAlignRight imgAlignCenter
_imgAlignLeft _imgAlignRight _imgAlignCenter
tableOpen tableClose
_tableBorder _tableAlignLeft _tableAlignCenter
tableRowOpen tableRowClose tableRowSep
tableTitleRowOpen tableTitleRowClose
tableCellOpen tableCellClose tableCellSep
tableTitleCellOpen tableTitleCellClose tableTitleCellSep
_tableColAlignLeft _tableColAlignRight _tableColAlignCenter
_tableCellAlignLeft _tableCellAlignRight _tableCellAlignCenter
_tableCellColSpan tableColAlignSep
_tableCellMulticolOpen
_tableCellMulticolClose
bodyOpen bodyClose
cssOpen cssClose
tocOpen tocClose TOC
anchor
comment
pageBreak
EOD
""".split()
# TIP: \a represents the current text inside the mark
# TIP: ~A~, ~B~ and ~C~ are expanded to other tags parts
alltags = {
'art': {
'title1' : '\a' ,
'title2' : '\a' ,
'title3' : '\a' ,
'title4' : '\a' ,
'title5' : '\a' ,
'blockQuoteLine' : '\t' ,
'listItemOpen' : '- ' ,
'numlistItemOpen' : '\a. ' ,
'bar1' : aa_line(AA['bar1'], config['width']),
'bar2' : aa_line(AA['bar2'], config['width']),
'url' : '\a' ,
'urlMark' : '\a (\a)' ,
'email' : '\a' ,
'emailMark' : '\a (\a)' ,
'img' : '[\a]' ,
},
'txt': {
'title1' : ' \a' ,
'title2' : '\t\a' ,
'title3' : '\t\t\a' ,
'title4' : '\t\t\t\a' ,
'title5' : '\t\t\t\t\a',
'blockQuoteLine' : '\t' ,
'listItemOpen' : '- ' ,
'numlistItemOpen' : '\a. ' ,
'bar1' : '\a' ,
'url' : '\a' ,
'urlMark' : '\a (\a)' ,
'email' : '\a' ,
'emailMark' : '\a (\a)' ,
'img' : '[\a]' ,
},
'html': {
'paragraphOpen' : '<P>' ,
'paragraphClose' : '</P>' ,
'title1' : '~A~<H1>\a</H1>' ,
'title2' : '~A~<H2>\a</H2>' ,
'title3' : '~A~<H3>\a</H3>' ,
'title4' : '~A~<H4>\a</H4>' ,
'title5' : '~A~<H5>\a</H5>' ,
'anchor' : '<A NAME="\a"></A>\n',
'blockVerbOpen' : '<PRE>' ,
'blockVerbClose' : '</PRE>' ,
'blockQuoteOpen' : '<BLOCKQUOTE>' ,
'blockQuoteClose' : '</BLOCKQUOTE>' ,
'fontMonoOpen' : '<CODE>' ,
'fontMonoClose' : '</CODE>' ,
'fontBoldOpen' : '<B>' ,
'fontBoldClose' : '</B>' ,
'fontItalicOpen' : '<I>' ,
'fontItalicClose' : '</I>' ,
'fontUnderlineOpen' : '<U>' ,
'fontUnderlineClose' : '</U>' ,
'fontStrikeOpen' : '<S>' ,
'fontStrikeClose' : '</S>' ,
'listOpen' : '<UL>' ,
'listClose' : '</UL>' ,
'listItemOpen' : '<LI>' ,
'numlistOpen' : '<OL>' ,
'numlistClose' : '</OL>' ,
'numlistItemOpen' : '<LI>' ,
'deflistOpen' : '<DL>' ,
'deflistClose' : '</DL>' ,
'deflistItem1Open' : '<DT>' ,
'deflistItem1Close' : '</DT>' ,
'deflistItem2Open' : '<DD>' ,
'bar1' : '<HR NOSHADE SIZE=1>' ,
'bar2' : '<HR NOSHADE SIZE=5>' ,
'url' : '<A HREF="\a">\a</A>' ,
'urlMark' : '<A HREF="\a">\a</A>' ,
'email' : '<A HREF="mailto:\a">\a</A>' ,
'emailMark' : '<A HREF="mailto:\a">\a</A>' ,
'img' : '<IMG~A~ SRC="\a" BORDER="0" ALT="">',
'_imgAlignLeft' : ' ALIGN="left"' ,
'_imgAlignCenter' : ' ALIGN="middle"',
'_imgAlignRight' : ' ALIGN="right"' ,
'tableOpen' : '<TABLE~A~~B~ CELLPADDING="4">',
'tableClose' : '</TABLE>' ,
'tableRowOpen' : '<TR>' ,
'tableRowClose' : '</TR>' ,
'tableCellOpen' : '<TD~A~~S~>' ,
'tableCellClose' : '</TD>' ,
'tableTitleCellOpen' : '<TH~S~>' ,
'tableTitleCellClose' : '</TH>' ,
'_tableBorder' : ' BORDER="1"' ,
'_tableAlignCenter' : ' ALIGN="center"',
'_tableCellAlignRight' : ' ALIGN="right"' ,
'_tableCellAlignCenter': ' ALIGN="center"',
'_tableCellColSpan' : ' COLSPAN="\a"' ,
'cssOpen' : '<STYLE TYPE="text/css">',
'cssClose' : '</STYLE>' ,
'comment' : '<!-- \a -->' ,
'EOD' : '</BODY></HTML>'
},
#TIP xhtml inherits all HTML definitions (lowercased)
#TIP http://www.w3.org/TR/xhtml1/#guidelines
#TIP http://www.htmlref.com/samples/Chapt17/17_08.htm
'xhtml': {
'listItemClose' : '</li>' ,
'numlistItemClose' : '</li>' ,
'deflistItem2Close' : '</dd>' ,
'bar1' : '<hr class="light" />',
'bar2' : '<hr class="heavy" />',
'anchor' : '<a id="\a" name="\a"></a>\n',
'img' : '<img~A~ src="\a" border="0" alt=""/>',
},
'sgml': {
'paragraphOpen' : '<p>' ,
'title1' : '<sect>\a~A~<p>' ,
'title2' : '<sect1>\a~A~<p>' ,
'title3' : '<sect2>\a~A~<p>' ,
'title4' : '<sect3>\a~A~<p>' ,
'title5' : '<sect4>\a~A~<p>' ,
'anchor' : '<label id="\a">' ,
'blockVerbOpen' : '<tscreen><verb>' ,
'blockVerbClose' : '</verb></tscreen>' ,
'blockQuoteOpen' : '<quote>' ,
'blockQuoteClose' : '</quote>' ,
'fontMonoOpen' : '<tt>' ,
'fontMonoClose' : '</tt>' ,
'fontBoldOpen' : '<bf>' ,
'fontBoldClose' : '</bf>' ,
'fontItalicOpen' : '<em>' ,
'fontItalicClose' : '</em>' ,
'fontUnderlineOpen' : '<bf><em>' ,
'fontUnderlineClose' : '</em></bf>' ,
'listOpen' : '<itemize>' ,
'listClose' : '</itemize>' ,
'listItemOpen' : '<item>' ,
'numlistOpen' : '<enum>' ,
'numlistClose' : '</enum>' ,
'numlistItemOpen' : '<item>' ,
'deflistOpen' : '<descrip>' ,
'deflistClose' : '</descrip>' ,
'deflistItem1Open' : '<tag>' ,
'deflistItem1Close' : '</tag>' ,
'bar1' : '<!-- \a -->' ,
'url' : '<htmlurl url="\a" name="\a">' ,
'urlMark' : '<htmlurl url="\a" name="\a">' ,
'email' : '<htmlurl url="mailto:\a" name="\a">' ,
'emailMark' : '<htmlurl url="mailto:\a" name="\a">' ,
'img' : '<figure><ph vspace=""><img src="\a"></figure>',
'tableOpen' : '<table><tabular ca="~C~">' ,
'tableClose' : '</tabular></table>' ,
'tableRowSep' : '<rowsep>' ,
'tableCellSep' : '<colsep>' ,
'_tableColAlignLeft' : 'l' ,
'_tableColAlignRight' : 'r' ,
'_tableColAlignCenter' : 'c' ,
'comment' : '<!-- \a -->' ,
'TOC' : '<toc>' ,
'EOD' : '</article>'
},
'dbk': {
'paragraphOpen' : '<para>' ,
'paragraphClose' : '</para>' ,
'title1Open' : '~A~<sect1><title>\a</title>' ,
'title1Close' : '</sect1>' ,
'title2Open' : '~A~ <sect2><title>\a</title>' ,
'title2Close' : ' </sect2>' ,
'title3Open' : '~A~ <sect3><title>\a</title>' ,
'title3Close' : ' </sect3>' ,
'title4Open' : '~A~ <sect4><title>\a</title>' ,
'title4Close' : ' </sect4>' ,
'title5Open' : '~A~ <sect5><title>\a</title>',
'title5Close' : ' </sect5>' ,
'anchor' : '<anchor id="\a"/>\n' ,
'blockVerbOpen' : '<programlisting>' ,
'blockVerbClose' : '</programlisting>' ,
'blockQuoteOpen' : '<blockquote><para>' ,
'blockQuoteClose' : '</para></blockquote>' ,
'fontMonoOpen' : '<code>' ,
'fontMonoClose' : '</code>' ,
'fontBoldOpen' : '<emphasis role="bold">' ,
'fontBoldClose' : '</emphasis>' ,
'fontItalicOpen' : '<emphasis>' ,
'fontItalicClose' : '</emphasis>' ,
'fontUnderlineOpen' : '<emphasis role="underline">' ,
'fontUnderlineClose' : '</emphasis>' ,
# 'fontStrikeOpen' : '<emphasis role="strikethrough">' , # Don't know
# 'fontStrikeClose' : '</emphasis>' ,
'listOpen' : '<itemizedlist>' ,
'listClose' : '</itemizedlist>' ,
'listItemOpen' : '<listitem><para>' ,
'listItemClose' : '</para></listitem>' ,
'numlistOpen' : '<orderedlist numeration="arabic">' ,
'numlistClose' : '</orderedlist>' ,
'numlistItemOpen' : '<listitem><para>' ,
'numlistItemClose' : '</para></listitem>' ,
'deflistOpen' : '<variablelist>' ,
'deflistClose' : '</variablelist>' ,
'deflistItem1Open' : '<varlistentry><term>' ,
'deflistItem1Close' : '</term>' ,
'deflistItem2Open' : '<listitem><para>' ,
'deflistItem2Close' : '</para></listitem></varlistentry>' ,
# 'bar1' : '<>' , # Don't know
# 'bar2' : '<>' , # Don't know
'url' : '<ulink url="\a">\a</ulink>' ,
'urlMark' : '<ulink url="\a">\a</ulink>' ,
'email' : '<email>\a</email>' ,
'emailMark' : '<email>\a</email>' ,
'img' : '<mediaobject><imageobject><imagedata fileref="\a"/></imageobject></mediaobject>',
# '_imgAlignLeft' : '' , # Don't know
# '_imgAlignCenter' : '' , # Don't know
# '_imgAlignRight' : '' , # Don't know
# 'tableOpen' : '<informaltable><tgroup cols=""><tbody>', # Don't work, need to know number of cols
# 'tableClose' : '</tbody></tgroup></informaltable>' ,
# 'tableRowOpen' : '<row>' ,
# 'tableRowClose' : '</row>' ,
# 'tableCellOpen' : '<entry>' ,
# 'tableCellClose' : '</entry>' ,
# 'tableTitleRowOpen' : '<thead>' ,
# 'tableTitleRowClose' : '</thead>' ,
# '_tableBorder' : ' frame="all"' ,
# '_tableAlignCenter' : ' align="center"' ,
# '_tableCellAlignRight' : ' align="right"' ,
# '_tableCellAlignCenter': ' align="center"' ,
# '_tableCellColSpan' : ' COLSPAN="\a"' ,
'TOC' : '<index/>' ,
'comment' : '<!-- \a -->' ,
'EOD' : '</article>'
},
'tex': {
'title1' : '~A~\section*{\a}' ,
'title2' : '~A~\\subsection*{\a}' ,
'title3' : '~A~\\subsubsection*{\a}',
# title 4/5: DIRTY: para+BF+\\+\n
'title4' : '~A~\\paragraph{}\\textbf{\a}\\\\\n',
'title5' : '~A~\\paragraph{}\\textbf{\a}\\\\\n',
'numtitle1' : '\n~A~\section{\a}' ,
'numtitle2' : '~A~\\subsection{\a}' ,
'numtitle3' : '~A~\\subsubsection{\a}' ,
'anchor' : '\\hypertarget{\a}{}\n' ,
'blockVerbOpen' : '\\begin{verbatim}' ,
'blockVerbClose' : '\\end{verbatim}' ,
'blockQuoteOpen' : '\\begin{quotation}' ,
'blockQuoteClose' : '\\end{quotation}' ,
'fontMonoOpen' : '\\texttt{' ,
'fontMonoClose' : '}' ,
'fontBoldOpen' : '\\textbf{' ,
'fontBoldClose' : '}' ,
'fontItalicOpen' : '\\textit{' ,
'fontItalicClose' : '}' ,
'fontUnderlineOpen' : '\\underline{' ,
'fontUnderlineClose' : '}' ,
'fontStrikeOpen' : '\\sout{' ,
'fontStrikeClose' : '}' ,
'listOpen' : '\\begin{itemize}' ,
'listClose' : '\\end{itemize}' ,
'listOpenCompact' : '\\begin{compactitem}',
'listCloseCompact' : '\\end{compactitem}' ,
'listItemOpen' : '\\item ' ,
'numlistOpen' : '\\begin{enumerate}' ,
'numlistClose' : '\\end{enumerate}' ,
'numlistOpenCompact' : '\\begin{compactenum}',
'numlistCloseCompact' : '\\end{compactenum}' ,
'numlistItemOpen' : '\\item ' ,
'deflistOpen' : '\\begin{description}',
'deflistClose' : '\\end{description}' ,
'deflistOpenCompact' : '\\begin{compactdesc}',
'deflistCloseCompact' : '\\end{compactdesc}' ,
'deflistItem1Open' : '\\item[' ,
'deflistItem1Close' : ']' ,
'bar1' : '\\hrulefill{}' ,
'bar2' : '\\rule{\linewidth}{1mm}',
'url' : '\\htmladdnormallink{\a}{\a}',
'urlMark' : '\\htmladdnormallink{\a}{\a}',
'email' : '\\htmladdnormallink{\a}{mailto:\a}',
'emailMark' : '\\htmladdnormallink{\a}{mailto:\a}',
'img' : '\\includegraphics{\a}',
'tableOpen' : '\\begin{center}\\begin{tabular}{|~C~|}',
'tableClose' : '\\end{tabular}\\end{center}',
'tableRowOpen' : '\\hline ' ,
'tableRowClose' : ' \\\\' ,
'tableCellSep' : ' & ' ,
'_tableColAlignLeft' : 'l' ,
'_tableColAlignRight' : 'r' ,
'_tableColAlignCenter' : 'c' ,
'_tableCellAlignLeft' : 'l' ,
'_tableCellAlignRight' : 'r' ,
'_tableCellAlignCenter': 'c' ,
'_tableCellColSpan' : '\a' ,
'_tableCellMulticolOpen' : '\\multicolumn{\a}{|~C~|}{',
'_tableCellMulticolClose' : '}',
'tableColAlignSep' : '|' ,
'comment' : '% \a' ,
'TOC' : '\\tableofcontents',
'pageBreak' : '\\clearpage',
'EOD' : '\\end{document}'
},
'lout': {
'paragraphOpen' : '@LP' ,
'blockTitle1Open' : '@BeginSections' ,
'blockTitle1Close' : '@EndSections' ,
'blockTitle2Open' : ' @BeginSubSections' ,
'blockTitle2Close' : ' @EndSubSections' ,
'blockTitle3Open' : ' @BeginSubSubSections' ,
'blockTitle3Close' : ' @EndSubSubSections' ,
'title1Open' : '~A~@Section @Title { \a } @Begin',
'title1Close' : '@End @Section' ,
'title2Open' : '~A~ @SubSection @Title { \a } @Begin',
'title2Close' : ' @End @SubSection' ,
'title3Open' : '~A~ @SubSubSection @Title { \a } @Begin',
'title3Close' : ' @End @SubSubSection' ,
'title4Open' : '~A~@LP @LeftDisplay @B { \a }',
'title5Open' : '~A~@LP @LeftDisplay @B { \a }',
'anchor' : '@Tag { \a }\n' ,
'blockVerbOpen' : '@LP @ID @F @RawVerbatim @Begin',
'blockVerbClose' : '@End @RawVerbatim' ,
'blockQuoteOpen' : '@QD {' ,
'blockQuoteClose' : '}' ,
# enclosed inside {} to deal with joined**words**
'fontMonoOpen' : '{@F {' ,
'fontMonoClose' : '}}' ,
'fontBoldOpen' : '{@B {' ,
'fontBoldClose' : '}}' ,
'fontItalicOpen' : '{@II {' ,
'fontItalicClose' : '}}' ,
'fontUnderlineOpen' : '{@Underline{' ,
'fontUnderlineClose' : '}}' ,
# the full form is more readable, but could be BL EL LI NL TL DTI
'listOpen' : '@BulletList' ,
'listClose' : '@EndList' ,
'listItemOpen' : '@ListItem{' ,
'listItemClose' : '}' ,
'numlistOpen' : '@NumberedList' ,
'numlistClose' : '@EndList' ,
'numlistItemOpen' : '@ListItem{' ,
'numlistItemClose' : '}' ,
'deflistOpen' : '@TaggedList' ,
'deflistClose' : '@EndList' ,
'deflistItem1Open' : '@DropTagItem {' ,
'deflistItem1Close' : '}' ,
'deflistItem2Open' : '{' ,
'deflistItem2Close' : '}' ,
'bar1' : '@DP @FullWidthRule' ,
'url' : '{blue @Colour { \a }}' ,
'urlMark' : '\a ({blue @Colour { \a }})' ,
'email' : '{blue @Colour { \a }}' ,
'emailMark' : '\a ({blue Colour{ \a }})' ,
'img' : '~A~@IncludeGraphic { \a }' , # eps only!
'_imgAlignLeft' : '@LeftDisplay ' ,
'_imgAlignRight' : '@RightDisplay ' ,
'_imgAlignCenter' : '@CentredDisplay ' ,
# lout tables are *way* complicated, no support for now
#'tableOpen' : '~A~@Tbl~B~\naformat{ @Cell A | @Cell B } {',
#'tableClose' : '}' ,
#'tableRowOpen' : '@Rowa\n' ,
#'tableTitleRowOpen' : '@HeaderRowa' ,
#'tableCenterAlign' : '@CentredDisplay ' ,
#'tableCellOpen' : '\a {' , # A, B, ...
#'tableCellClose' : '}' ,
#'_tableBorder' : '\nrule {yes}' ,
'comment' : '# \a' ,
# @MakeContents must be on the config file
'TOC' : '@DP @ContentsGoesHere @DP',
'pageBreak' : '@NP' ,
'EOD' : '@End @Text'
},
# http://moinmo.in/SyntaxReference
'moin': {
'title1' : '= \a =' ,
'title2' : '== \a ==' ,
'title3' : '=== \a ===' ,
'title4' : '==== \a ====' ,
'title5' : '===== \a =====',
'blockVerbOpen' : '{{{' ,
'blockVerbClose' : '}}}' ,
'blockQuoteLine' : ' ' ,
'fontMonoOpen' : '{{{' ,
'fontMonoClose' : '}}}' ,
'fontBoldOpen' : "'''" ,
'fontBoldClose' : "'''" ,
'fontItalicOpen' : "''" ,
'fontItalicClose' : "''" ,
'fontUnderlineOpen' : '__' ,
'fontUnderlineClose' : '__' ,
'fontStrikeOpen' : '--(' ,
'fontStrikeClose' : ')--' ,
'listItemOpen' : ' * ' ,
'numlistItemOpen' : ' \a. ' ,
'deflistItem1Open' : ' ' ,
'deflistItem1Close' : '::' ,
'deflistItem2LinePrefix': ' :: ' ,
'bar1' : '----' ,
'bar2' : '--------' ,
'url' : '[[\a]]' ,
'urlMark' : '[[\a|\a]]' ,
'email' : '\a' ,
'emailMark' : '[[mailto:\a|\a]]',
'img' : '[\a]' ,
'tableRowOpen' : '||' ,
'tableCellOpen' : '~A~' ,
'tableCellClose' : '||' ,
'tableTitleCellClose' : '||' ,
'_tableCellAlignRight' : '<)>' ,
'_tableCellAlignCenter' : '<:>' ,
'comment' : '/* \a */' ,
'TOC' : '[[TableOfContents]]'
},
# http://code.google.com/p/support/wiki/WikiSyntax
'gwiki': {
'title1' : '= \a =' ,
'title2' : '== \a ==' ,
'title3' : '=== \a ===' ,
'title4' : '==== \a ====' ,
'title5' : '===== \a =====',
'blockVerbOpen' : '{{{' ,
'blockVerbClose' : '}}}' ,
'blockQuoteLine' : ' ' ,
'fontMonoOpen' : '{{{' ,
'fontMonoClose' : '}}}' ,
'fontBoldOpen' : '*' ,
'fontBoldClose' : '*' ,
'fontItalicOpen' : '_' , # underline == italic
'fontItalicClose' : '_' ,
'fontStrikeOpen' : '~~' ,
'fontStrikeClose' : '~~' ,
'listItemOpen' : ' * ' ,
'numlistItemOpen' : ' # ' ,
'url' : '\a' ,
'urlMark' : '[\a \a]' ,
'email' : 'mailto:\a' ,
'emailMark' : '[mailto:\a \a]',
'img' : '[\a]' ,
'tableRowOpen' : '|| ' ,
'tableRowClose' : ' ||' ,
'tableCellSep' : ' || ' ,
},
# http://powerman.name/doc/asciidoc
'adoc': {
'title1' : '== \a' ,
'title2' : '=== \a' ,
'title3' : '==== \a' ,
'title4' : '===== \a' ,
'title5' : '===== \a' ,
'blockVerbOpen' : '----' ,
'blockVerbClose' : '----' ,
'fontMonoOpen' : '+' ,
'fontMonoClose' : '+' ,
'fontBoldOpen' : '*' ,
'fontBoldClose' : '*' ,
'fontItalicOpen' : '_' ,
'fontItalicClose' : '_' ,
'listItemOpen' : '- ' ,
'listItemLine' : '\t' ,
'numlistItemOpen' : '. ' ,
'url' : '\a' ,
'urlMark' : '\a[\a]' ,
'email' : 'mailto:\a' ,
'emailMark' : 'mailto:\a[\a]' ,
'img' : 'image::\a[]' ,
},
# http://wiki.splitbrain.org/wiki:syntax
# Hint: <br> is \\ $
# Hint: You can add footnotes ((This is a footnote))
'doku': {
'title1' : '===== \a =====',
'title2' : '==== \a ====' ,
'title3' : '=== \a ===' ,
'title4' : '== \a ==' ,
'title5' : '= \a =' ,
# DokuWiki uses ' ' identation to mark verb blocks (see indentverbblock)
'blockQuoteLine' : '>' ,
'fontMonoOpen' : "''" ,
'fontMonoClose' : "''" ,
'fontBoldOpen' : "**" ,
'fontBoldClose' : "**" ,
'fontItalicOpen' : "//" ,
'fontItalicClose' : "//" ,
'fontUnderlineOpen' : "__" ,
'fontUnderlineClose' : "__" ,
'fontStrikeOpen' : '<del>' ,
'fontStrikeClose' : '</del>' ,
'listItemOpen' : ' * ' ,
'numlistItemOpen' : ' - ' ,
'bar1' : '----' ,
'url' : '[[\a]]' ,
'urlMark' : '[[\a|\a]]' ,
'email' : '[[\a]]' ,
'emailMark' : '[[\a|\a]]' ,
'img' : '{{\a}}' ,
'imgAlignLeft' : '{{\a }}' ,
'imgAlignRight' : '{{ \a}}' ,
'imgAlignCenter' : '{{ \a }}' ,
'tableTitleRowOpen' : '^ ' ,
'tableTitleRowClose' : ' ^' ,
'tableTitleCellSep' : ' ^ ' ,
'tableRowOpen' : '| ' ,
'tableRowClose' : ' |' ,
'tableCellSep' : ' | ' ,
# DokuWiki has no attributes. The content must be aligned!
# '_tableCellAlignRight' : '<)>' , # ??
# '_tableCellAlignCenter': '<:>' , # ??
# DokuWiki colspan is the same as txt2tags' with multiple |||
# 'comment' : '## \a' , # ??
# TOC is automatic
},
# http://www.pmwiki.org/wiki/PmWiki/TextFormattingRules
'pmw': {
'title1' : '~A~! \a ' ,
'title2' : '~A~!! \a ' ,
'title3' : '~A~!!! \a ' ,
'title4' : '~A~!!!! \a ' ,
'title5' : '~A~!!!!! \a ' ,
'blockQuoteOpen' : '->' ,
'blockQuoteClose' : '\n' ,
# In-text font
'fontLargeOpen' : "[+" ,
'fontLargeClose' : "+]" ,
'fontLargerOpen' : "[++" ,
'fontLargerClose' : "++]" ,
'fontSmallOpen' : "[-" ,
'fontSmallClose' : "-]" ,
'fontLargerOpen' : "[--" ,
'fontLargerClose' : "--]" ,
'fontMonoOpen' : "@@" ,
'fontMonoClose' : "@@" ,
'fontBoldOpen' : "'''" ,
'fontBoldClose' : "'''" ,
'fontItalicOpen' : "''" ,
'fontItalicClose' : "''" ,
'fontUnderlineOpen' : "{+" ,
'fontUnderlineClose' : "+}" ,
'fontStrikeOpen' : '{-' ,
'fontStrikeClose' : '-}' ,
# Lists
'listItemLine' : '*' ,
'numlistItemLine' : '#' ,
'deflistItem1Open' : ': ' ,
'deflistItem1Close' : ':' ,
'deflistItem2LineOpen' : '::' ,
'deflistItem2LineClose' : ':' ,
# Verbatim block
'blockVerbOpen' : '[@' ,
'blockVerbClose' : '@]' ,
'bar1' : '----' ,
# URL, email and anchor
'url' : '\a' ,
'urlMark' : '[[\a -> \a]]' ,
'email' : '\a' ,
'emailMark' : '[[\a -> mailto:\a]]',
'anchor' : '[[#\a]]\n' ,
# Image markup
'img' : '\a' ,
#'imgAlignLeft' : '{{\a }}' ,
#'imgAlignRight' : '{{ \a}}' ,
#'imgAlignCenter' : '{{ \a }}' ,
# Table attributes
'tableTitleRowOpen' : '||! ' ,
'tableTitleRowClose' : '||' ,
'tableTitleCellSep' : ' ||!' ,
'tableRowOpen' : '||' ,
'tableRowClose' : '||' ,
'tableCellSep' : ' ||' ,
},
# http://en.wikipedia.org/wiki/Help:Editing
'wiki': {
'title1' : '== \a ==' ,
'title2' : '=== \a ===' ,
'title3' : '==== \a ====' ,
'title4' : '===== \a =====' ,
'title5' : '====== \a ======',
'blockVerbOpen' : '<pre>' ,
'blockVerbClose' : '</pre>' ,
'blockQuoteOpen' : '<blockquote>' ,
'blockQuoteClose' : '</blockquote>' ,
'fontMonoOpen' : '<tt>' ,
'fontMonoClose' : '</tt>' ,
'fontBoldOpen' : "'''" ,
'fontBoldClose' : "'''" ,
'fontItalicOpen' : "''" ,
'fontItalicClose' : "''" ,
'fontUnderlineOpen' : '<u>' ,
'fontUnderlineClose' : '</u>' ,
'fontStrikeOpen' : '<s>' ,
'fontStrikeClose' : '</s>' ,
#XXX Mixed lists not working: *#* list inside numlist inside list
'listItemLine' : '*' ,
'numlistItemLine' : '#' ,
'deflistItem1Open' : '; ' ,
'deflistItem2LinePrefix': ': ' ,
'bar1' : '----' ,
'url' : '[\a]' ,
'urlMark' : '[\a \a]' ,
'email' : 'mailto:\a' ,
'emailMark' : '[mailto:\a \a]' ,
# [[Image:foo.png|right|Optional alt/caption text]] (right, left, center, none)
'img' : '[[Image:\a~A~]]' ,
'_imgAlignLeft' : '|left' ,
'_imgAlignCenter' : '|center' ,
'_imgAlignRight' : '|right' ,
# {| border="1" cellspacing="0" cellpadding="4" align="center"
'tableOpen' : '{|~A~~B~ cellpadding="4"',
'tableClose' : '|}' ,
'tableRowOpen' : '|-\n| ' ,
'tableTitleRowOpen' : '|-\n! ' ,
'tableCellSep' : ' || ' ,
'tableTitleCellSep' : ' !! ' ,
'_tableBorder' : ' border="1"' ,
'_tableAlignCenter' : ' align="center"' ,
'comment' : '<!-- \a -->' ,
'TOC' : '__TOC__' ,
},
# http://www.inference.phy.cam.ac.uk/mackay/mgp/SYNTAX
# http://en.wikipedia.org/wiki/MagicPoint
'mgp': {
'paragraphOpen' : '%font "normal", size 5' ,
'title1' : '%page\n\n\a\n' ,
'title2' : '%page\n\n\a\n' ,
'title3' : '%page\n\n\a\n' ,
'title4' : '%page\n\n\a\n' ,
'title5' : '%page\n\n\a\n' ,
'blockVerbOpen' : '%font "mono"' ,
'blockVerbClose' : '%font "normal"' ,
'blockQuoteOpen' : '%prefix " "' ,
'blockQuoteClose' : '%prefix " "' ,
'fontMonoOpen' : '\n%cont, font "mono"\n' ,
'fontMonoClose' : '\n%cont, font "normal"\n' ,
'fontBoldOpen' : '\n%cont, font "normal-b"\n' ,
'fontBoldClose' : '\n%cont, font "normal"\n' ,
'fontItalicOpen' : '\n%cont, font "normal-i"\n' ,
'fontItalicClose' : '\n%cont, font "normal"\n' ,
'fontUnderlineOpen' : '\n%cont, fore "cyan"\n' ,
'fontUnderlineClose' : '\n%cont, fore "white"\n' ,
'listItemLine' : '\t' ,
'numlistItemLine' : '\t' ,
'numlistItemOpen' : '\a. ' ,
'deflistItem1Open' : '\t\n%cont, font "normal-b"\n',
'deflistItem1Close' : '\n%cont, font "normal"\n' ,
'bar1' : '%bar "white" 5' ,
'bar2' : '%pause' ,
'url' : '\n%cont, fore "cyan"\n\a' +\
'\n%cont, fore "white"\n' ,
'urlMark' : '\a \n%cont, fore "cyan"\n\a'+\
'\n%cont, fore "white"\n' ,
'email' : '\n%cont, fore "cyan"\n\a' +\
'\n%cont, fore "white"\n' ,
'emailMark' : '\a \n%cont, fore "cyan"\n\a'+\
'\n%cont, fore "white"\n' ,
'img' : '~A~\n%newimage "\a"\n%left\n',
'_imgAlignLeft' : '\n%left' ,
'_imgAlignRight' : '\n%right' ,
'_imgAlignCenter' : '\n%center' ,
'comment' : '%% \a' ,
'pageBreak' : '%page\n\n\n' ,
'EOD' : '%%EOD'
},
# man groff_man ; man 7 groff
'man': {
'paragraphOpen' : '.P' ,
'title1' : '.SH \a' ,
'title2' : '.SS \a' ,
'title3' : '.SS \a' ,
'title4' : '.SS \a' ,
'title5' : '.SS \a' ,
'blockVerbOpen' : '.nf' ,
'blockVerbClose' : '.fi\n' ,
'blockQuoteOpen' : '.RS' ,
'blockQuoteClose' : '.RE' ,
'fontBoldOpen' : '\\fB' ,
'fontBoldClose' : '\\fR' ,
'fontItalicOpen' : '\\fI' ,
'fontItalicClose' : '\\fR' ,
'listOpen' : '.RS' ,
'listItemOpen' : '.IP \(bu 3\n',
'listClose' : '.RE' ,
'numlistOpen' : '.RS' ,
'numlistItemOpen' : '.IP \a. 3\n',
'numlistClose' : '.RE' ,
'deflistItem1Open' : '.TP\n' ,
'bar1' : '\n\n' ,
'url' : '\a' ,
'urlMark' : '\a (\a)',
'email' : '\a' ,
'emailMark' : '\a (\a)',
'img' : '\a' ,
'tableOpen' : '.TS\n~A~~B~tab(^); ~C~.',
'tableClose' : '.TE' ,
'tableRowOpen' : ' ' ,
'tableCellSep' : '^' ,
'_tableAlignCenter' : 'center, ',
'_tableBorder' : 'allbox, ',
'_tableColAlignLeft' : 'l' ,
'_tableColAlignRight' : 'r' ,
'_tableColAlignCenter' : 'c' ,
'comment' : '.\\" \a'
},
'pm6': {
'paragraphOpen' : '<@Normal:>' ,
'title1' : '<@Title1:>\a',
'title2' : '<@Title2:>\a',
'title3' : '<@Title3:>\a',
'title4' : '<@Title4:>\a',
'title5' : '<@Title5:>\a',
'blockVerbOpen' : '<@PreFormat:>' ,
'blockQuoteLine' : '<@Quote:>' ,
'fontMonoOpen' : '<FONT "Lucida Console"><SIZE 9>' ,
'fontMonoClose' : '<SIZE$><FONT$>',
'fontBoldOpen' : '<B>' ,
'fontBoldClose' : '<P>' ,
'fontItalicOpen' : '<I>' ,
'fontItalicClose' : '<P>' ,
'fontUnderlineOpen' : '<U>' ,
'fontUnderlineClose' : '<P>' ,
'listOpen' : '<@Bullet:>' ,
'listItemOpen' : '\x95\t' , # \x95 == ~U
'numlistOpen' : '<@Bullet:>' ,
'numlistItemOpen' : '\x95\t' ,
'bar1' : '\a' ,
'url' : '<U>\a<P>' , # underline
'urlMark' : '\a <U>\a<P>' ,
'email' : '\a' ,
'emailMark' : '\a \a' ,
'img' : '\a'
},
# http://www.wikicreole.org/wiki/AllMarkup
'creole': {
'title1' : '= \a =' ,
'title2' : '== \a ==' ,
'title3' : '=== \a ===' ,
'title4' : '==== \a ====' ,
'title5' : '===== \a =====',
'blockVerbOpen' : '{{{' ,
'blockVerbClose' : '}}}' ,
'blockQuoteLine' : ' ' ,
# 'fontMonoOpen' : '##' , # planned for 2.0,
# 'fontMonoClose' : '##' , # meanwhile we disable it
'fontBoldOpen' : '**' ,
'fontBoldClose' : '**' ,
'fontItalicOpen' : '//' ,
'fontItalicClose' : '//' ,
'fontUnderlineOpen' : '//' , # no underline in 1.0, planned for 2.0,
'fontUnderlineClose' : '//' , # meanwhile we can use italic (emphasized)
# 'fontStrikeOpen' : '--' , # planned for 2.0,
# 'fontStrikeClose' : '--' , # meanwhile we disable it
'listItemLine' : '*' ,
'numlistItemLine' : '#' ,
'deflistItem2LinePrefix': ':' ,
'bar1' : '----' ,
'url' : '[[\a]]' ,
'urlMark' : '[[\a|\a]]' ,
'img' : '{{\a}}' ,
'tableTitleRowOpen' : '|= ' ,
'tableTitleRowClose' : '|' ,
'tableTitleCellSep' : ' |= ' ,
'tableRowOpen' : '| ' ,
'tableRowClose' : ' |' ,
'tableCellSep' : ' | ' ,
# TODO: placeholder (mark for unknown syntax)
# if possible: http://www.wikicreole.org/wiki/Placeholder
}
}
# Exceptions for --css-sugar
if config['css-sugar'] and config['target'] in ('html','xhtml'):
# Change just HTML because XHTML inherits it
htmltags = alltags['html']
# Table with no cellpadding
htmltags['tableOpen'] = htmltags['tableOpen'].replace(' CELLPADDING="4"', '')
# DIVs
htmltags['tocOpen' ] = '<DIV CLASS="toc">'
htmltags['tocClose'] = '</DIV>'
htmltags['bodyOpen'] = '<DIV CLASS="body" ID="body">'
htmltags['bodyClose']= '</DIV>'
# Make the HTML -> XHTML inheritance
xhtml = alltags['html'].copy()
for key in xhtml.keys(): xhtml[key] = xhtml[key].lower()
# Some like HTML tags as lowercase, some don't... (headers out)
if HTML_LOWER: alltags['html'] = xhtml.copy()
xhtml.update(alltags['xhtml'])
alltags['xhtml'] = xhtml.copy()
# Compose the target tags dictionary
tags = {}
target_tags = alltags[config['target']].copy()
for key in keys: tags[key] = '' # create empty keys
for key in target_tags.keys():
tags[key] = maskEscapeChar(target_tags[key]) # populate
# Map strong line to pagebreak
if rules['mapbar2pagebreak'] and tags['pageBreak']:
tags['bar2'] = tags['pageBreak']
# Map strong line to separator if not defined
if not tags['bar2'] and tags['bar1']:
tags['bar2'] = tags['bar1']
return tags
##############################################################################
def getRules(config):
"Returns all the target-specific syntax rules"
ret = {}
allrules = [
# target rules (ON/OFF)
'linkable', # target supports external links
'tableable', # target supports tables
'imglinkable', # target supports images as links
'imgalignable', # target supports image alignment
'imgasdefterm', # target supports image as definition term
'autonumberlist', # target supports numbered lists natively
'autonumbertitle', # target supports numbered titles natively
'stylable', # target supports external style files
'parainsidelist', # lists items supports paragraph
'compactlist', # separate enclosing tags for compact lists
'spacedlistitem', # lists support blank lines between items
'listnotnested', # lists cannot be nested
'quotenotnested', # quotes cannot be nested
'verbblocknotescaped', # don't escape specials in verb block
'verbblockfinalescape', # do final escapes in verb block
'escapeurl', # escape special in link URL
'labelbeforelink', # label comes before the link on the tag
'onelinepara', # dump paragraph as a single long line
'tabletitlerowinbold', # manually bold any cell on table titles
'tablecellstrip', # strip extra spaces from each table cell
'tablecellspannable', # the table cells can have span attribute
'tablecellmulticol', # separate open+close tags for multicol cells
'barinsidequote', # bars are allowed inside quote blocks
'finalescapetitle', # perform final escapes on title lines
'autotocnewpagebefore', # break page before automatic TOC
'autotocnewpageafter', # break page after automatic TOC
'autotocwithbars', # automatic TOC surrounded by bars
'mapbar2pagebreak', # map the strong bar to a page break
'titleblocks', # titles must be on open/close section blocks
# Target code beautify (ON/OFF)
'indentverbblock', # add leading spaces to verb block lines
'breaktablecell', # break lines after any table cell
'breaktablelineopen', # break line after opening table line
'notbreaklistopen', # don't break line after opening a new list
'keepquoteindent', # don't remove the leading TABs on quotes
'keeplistindent', # don't remove the leading spaces on lists
'blankendautotoc', # append a blank line at the auto TOC end
'tagnotindentable', # tags must be placed at the line beginning
'spacedlistitemopen', # append a space after the list item open tag
'spacednumlistitemopen',# append a space after the numlist item open tag
'deflisttextstrip', # strip the contents of the deflist text
'blanksaroundpara', # put a blank line before and after paragraphs
'blanksaroundverb', # put a blank line before and after verb blocks
'blanksaroundquote', # put a blank line before and after quotes
'blanksaroundlist', # put a blank line before and after lists
'blanksaroundnumlist', # put a blank line before and after numlists
'blanksarounddeflist', # put a blank line before and after deflists
'blanksaroundtable', # put a blank line before and after tables
'blanksaroundbar', # put a blank line before and after bars
'blanksaroundtitle', # put a blank line before and after titles
'blanksaroundnumtitle', # put a blank line before and after numtitles
# Value settings
'listmaxdepth', # maximum depth for lists
'quotemaxdepth', # maximum depth for quotes
'tablecellaligntype', # type of table cell align: cell, column
]
rules_bank = {
'txt': {
'indentverbblock':1,
'spacedlistitem':1,
'parainsidelist':1,
'keeplistindent':1,
'barinsidequote':1,
'autotocwithbars':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'art': {
#TIP art inherits all TXT rules
},
'html': {
'indentverbblock':1,
'linkable':1,
'stylable':1,
'escapeurl':1,
'imglinkable':1,
'imgalignable':1,
'imgasdefterm':1,
'autonumberlist':1,
'spacedlistitem':1,
'parainsidelist':1,
'tableable':1,
'tablecellstrip':1,
'breaktablecell':1,
'breaktablelineopen':1,
'keeplistindent':1,
'keepquoteindent':1,
'barinsidequote':1,
'autotocwithbars':1,
'tablecellspannable':1,
'tablecellaligntype':'cell',
# 'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'xhtml': {
#TIP xhtml inherits all HTML rules
},
'sgml': {
'linkable':1,
'escapeurl':1,
'autonumberlist':1,
'spacedlistitem':1,
'tableable':1,
'tablecellstrip':1,
'blankendautotoc':1,
'quotenotnested':1,
'keeplistindent':1,
'keepquoteindent':1,
'barinsidequote':1,
'finalescapetitle':1,
'tablecellaligntype':'column',
'blanksaroundpara':1,
'blanksaroundverb':1,
'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'dbk': {
'linkable':1,
'tableable':0, # activate when table tags are ready
'imglinkable':1,
'imgalignable':1,
'imgasdefterm':1,
'autonumberlist':1,
'autonumbertitle':1,
'parainsidelist':1,
'spacedlistitem':1,
'titleblocks':1,
},
'mgp': {
'tagnotindentable':1,
'spacedlistitem':1,
'imgalignable':1,
'autotocnewpagebefore':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
# 'blanksaroundtitle':1,
# 'blanksaroundnumtitle':1,
},
'tex': {
'stylable':1,
'escapeurl':1,
'autonumberlist':1,
'autonumbertitle':1,
'spacedlistitem':1,
'compactlist':1,
'parainsidelist':1,
'tableable':1,
'tablecellstrip':1,
'tabletitlerowinbold':1,
'verbblocknotescaped':1,
'keeplistindent':1,
'listmaxdepth':4, # deflist is 6
'quotemaxdepth':6,
'barinsidequote':1,
'finalescapetitle':1,
'autotocnewpageafter':1,
'mapbar2pagebreak':1,
'tablecellaligntype':'column',
'tablecellmulticol':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'lout': {
'keepquoteindent':1,
'deflisttextstrip':1,
'escapeurl':1,
'verbblocknotescaped':1,
'imgalignable':1,
'mapbar2pagebreak':1,
'titleblocks':1,
'autonumberlist':1,
'parainsidelist':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'moin': {
'spacedlistitem':1,
'linkable':1,
'keeplistindent':1,
'tableable':1,
'barinsidequote':1,
'tabletitlerowinbold':1,
'tablecellstrip':1,
'autotocwithbars':1,
'tablecellaligntype':'cell',
'deflisttextstrip':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
# 'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'gwiki': {
'spacedlistitem':1,
'linkable':1,
'keeplistindent':1,
'tableable':1,
'tabletitlerowinbold':1,
'tablecellstrip':1,
'autonumberlist':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
# 'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'adoc': {
'spacedlistitem':1,
'linkable':1,
'keeplistindent':1,
'autonumberlist':1,
'autonumbertitle':1,
'listnotnested':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'doku': {
'indentverbblock':1, # DokuWiki uses ' ' to mark verb blocks
'spacedlistitem':1,
'linkable':1,
'keeplistindent':1,
'tableable':1,
'barinsidequote':1,
'tablecellstrip':1,
'autotocwithbars':1,
'autonumberlist':1,
'imgalignable':1,
'tablecellaligntype':'cell',
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'pmw': {
'indentverbblock':1,
'spacedlistitem':1,
'linkable':1,
'labelbeforelink':1,
# 'keeplistindent':1,
'tableable':1,
'barinsidequote':1,
'tablecellstrip':1,
'autotocwithbars':1,
'autonumberlist':1,
'spacedlistitemopen':1,
'spacednumlistitemopen':1,
'imgalignable':1,
'tabletitlerowinbold':1,
'tablecellaligntype':'cell',
'blanksaroundpara':1,
'blanksaroundverb':1,
'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'wiki': {
'linkable':1,
'tableable':1,
'tablecellstrip':1,
'autotocwithbars':1,
'spacedlistitemopen':1,
'spacednumlistitemopen':1,
'deflisttextstrip':1,
'autonumberlist':1,
'imgalignable':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'man': {
'spacedlistitem':1,
'tagnotindentable':1,
'tableable':1,
'tablecellaligntype':'column',
'tabletitlerowinbold':1,
'tablecellstrip':1,
'barinsidequote':1,
'parainsidelist':0,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
# 'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'pm6': {
'keeplistindent':1,
'verbblockfinalescape':1,
#TODO add support for these
# maybe set a JOINNEXT char and do it on addLineBreaks()
'notbreaklistopen':1,
'barinsidequote':1,
'autotocwithbars':1,
'onelinepara':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
# 'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
# 'blanksaroundtable':1,
# 'blanksaroundbar':1,
'blanksaroundtitle':1,
'blanksaroundnumtitle':1,
},
'creole': {
'linkable':1,
'tableable':1,
'imglinkable':1,
'tablecellstrip':1,
'autotocwithbars':1,
'spacedlistitemopen':1,
'spacednumlistitemopen':1,
'deflisttextstrip':1,
'verbblocknotescaped':1,
'blanksaroundpara':1,
'blanksaroundverb':1,
'blanksaroundquote':1,
'blanksaroundlist':1,
'blanksaroundnumlist':1,
'blanksarounddeflist':1,
'blanksaroundtable':1,
'blanksaroundbar':1,
'blanksaroundtitle':1,
},
}
# Exceptions for --css-sugar
if config['css-sugar'] and config['target'] in ('html','xhtml'):
rules_bank['html']['indentverbblock'] = 0
rules_bank['html']['autotocwithbars'] = 0
# Get the target specific rules
if config['target'] == 'xhtml':
myrules = rules_bank['html'].copy() # inheritance
myrules.update(rules_bank['xhtml']) # get XHTML specific
elif config['target'] == 'art':
myrules = rules_bank['txt'].copy() # inheritance
if config['slides']:
myrules['blanksaroundtitle'] = 0
myrules['blanksaroundnumtitle'] = 0
else:
myrules = rules_bank[config['target']].copy()
# Populate return dictionary
for key in allrules: ret[key] = 0 # reset all
ret.update(myrules) # get rules
return ret
##############################################################################
def getRegexes():
"Returns all the regexes used to find the t2t marks"
bank = {
'blockVerbOpen':
re.compile(r'^```\s*$'),
'blockVerbClose':
re.compile(r'^```\s*$'),
'blockRawOpen':
re.compile(r'^"""\s*$'),
'blockRawClose':
re.compile(r'^"""\s*$'),
'blockTaggedOpen':
re.compile(r"^'''\s*$"),
'blockTaggedClose':
re.compile(r"^'''\s*$"),
'blockCommentOpen':
re.compile(r'^%%%\s*$'),
'blockCommentClose':
re.compile(r'^%%%\s*$'),
'quote':
re.compile(r'^\t+'),
'1lineVerb':
re.compile(r'^``` (?=.)'),
'1lineRaw':
re.compile(r'^""" (?=.)'),
'1lineTagged':
re.compile(r"^''' (?=.)"),
# mono, raw, bold, italic, underline:
# - marks must be glued with the contents, no boundary spaces
# - they are greedy, so in ****bold****, turns to <b>**bold**</b>
'fontMono':
re.compile( r'``([^\s](|.*?[^\s])`*)``'),
'raw':
re.compile( r'""([^\s](|.*?[^\s])"*)""'),
'tagged':
re.compile( r"''([^\s](|.*?[^\s])'*)''"),
'fontBold':
re.compile(r'\*\*([^\s](|.*?[^\s])\**)\*\*'),
'fontItalic':
re.compile( r'//([^\s](|.*?[^\s])/*)//'),
'fontUnderline':
re.compile( r'__([^\s](|.*?[^\s])_*)__'),
'fontStrike':
re.compile( r'--([^\s](|.*?[^\s])-*)--'),
'list':
re.compile(r'^( *)(-) (?=[^ ])'),
'numlist':
re.compile(r'^( *)(\+) (?=[^ ])'),
'deflist':
re.compile(r'^( *)(:) (.*)$'),
'listclose':
re.compile(r'^( *)([-+:])\s*$'),
'bar':
re.compile(r'^(\s*)([_=-]{20,})\s*$'),
'table':
re.compile(r'^ *\|\|? '),
'blankline':
re.compile(r'^\s*$'),
'comment':
re.compile(r'^%'),
# Auxiliary tag regexes
'_imgAlign' : re.compile(r'~A~', re.I),
'_tableAlign' : re.compile(r'~A~', re.I),
'_anchor' : re.compile(r'~A~', re.I),
'_tableBorder' : re.compile(r'~B~', re.I),
'_tableColAlign' : re.compile(r'~C~', re.I),
'_tableCellColSpan': re.compile(r'~S~', re.I),
'_tableCellAlign' : re.compile(r'~A~', re.I),
}
# Special char to place data on TAGs contents (\a == bell)
bank['x'] = re.compile('\a')
# %%macroname [ (formatting) ]
bank['macros'] = re.compile(r'%%%%(?P<name>%s)\b(\((?P<fmt>.*?)\))?' % (
'|'.join(MACROS.keys())), re.I)
# %%TOC special macro for TOC positioning
bank['toc'] = re.compile(r'^ *%%toc\s*$', re.I)
# Almost complicated title regexes ;)
titskel = r'^ *(?P<id>%s)(?P<txt>%s)\1(\[(?P<label>[\w-]*)\])?\s*$'
bank[ 'title'] = re.compile(titskel%('[=]{1,5}','[^=](|.*[^=])'))
bank['numtitle'] = re.compile(titskel%('[+]{1,5}','[^+](|.*[^+])'))
### Complicated regexes begin here ;)
#
# Textual descriptions on --help's style: [...] is optional, | is OR
### First, some auxiliary variables
#
# [image.EXT]
patt_img = r'\[([\w_,.+%$#@!?+~/-]+\.(png|jpe?g|gif|eps|bmp|svg))\]'
# Link things
# http://www.gbiv.com/protocols/uri/rfc/rfc3986.html
# pchar: A-Za-z._~- / %FF / !$&'()*+,;= / :@
# Recomended order: scheme://user:pass@domain/path?query=foo#anchor
# Also works : scheme://user:pass@domain/path#anchor?query=foo
# TODO form: !'():
urlskel = {
'proto' : r'(https?|ftp|news|telnet|gopher|wais)://',
'guess' : r'(www[23]?|ftp)\.', # w/out proto, try to guess
'login' : r'A-Za-z0-9_.-', # for ftp://[email protected]
'pass' : r'[^ @]*', # for ftp://login:[email protected]
'chars' : r'A-Za-z0-9%._/~:,=$@&+-', # %20(space), :80(port), D&D
'anchor': r'A-Za-z0-9%._-', # %nn(encoded)
'form' : r'A-Za-z0-9/%&=+:;.,$@*_-', # .,@*_-(as is)
'punct' : r'.,;:!?'
}
# username [ :password ] @
patt_url_login = r'([%s]+(:%s)?@)?'%(urlskel['login'],urlskel['pass'])
# [ http:// ] [ username:password@ ] domain.com [ / ]
# [ #anchor | ?form=data ]
retxt_url = r'\b(%s%s|%s)[%s]+\b/*(\?[%s]+)?(#[%s]*)?'%(
urlskel['proto'],patt_url_login, urlskel['guess'],
urlskel['chars'],urlskel['form'],urlskel['anchor'])
# filename | [ filename ] #anchor
retxt_url_local = r'[%s]+|[%s]*(#[%s]*)'%(
urlskel['chars'],urlskel['chars'],urlskel['anchor'])
# user@domain [ ?form=data ]
patt_email = r'\b[%s]+@([A-Za-z0-9_-]+\.)+[A-Za-z]{2,4}\b(\?[%s]+)?'%(
urlskel['login'],urlskel['form'])
# Saving for future use
bank['_urlskel'] = urlskel
### And now the real regexes
#
bank['email'] = re.compile(patt_email,re.I)
# email | url
bank['link'] = re.compile(r'%s|%s'%(retxt_url,patt_email), re.I)
# \[ label | imagetag url | email | filename \]
bank['linkmark'] = re.compile(
r'\[(?P<label>%s|[^]]+) (?P<link>%s|%s|%s)\]'%(
patt_img, retxt_url, patt_email, retxt_url_local),
re.I)
# Image
bank['img'] = re.compile(patt_img, re.I)
# Special things
bank['special'] = re.compile(r'^%!\s*')
return bank
### END OF regex nightmares
################# functions for the ASCII Art backend ########################
def aa_line(char, length):
return char * length
def aa_box(txt, length):
len_txt = len(txt)
nspace = (length - len_txt - 4) / 2
line_box = " " * nspace + AA['corner'] + AA['border'] * (len_txt + 2) + AA['corner']
# <----- nspace " " -----> "+" <----- len_txt+2 "-" -----> "+"
# +-------------------------------+
# | all theeeeeeeeeeeeeeeeee text |
# <----- nspace " " -----> "| " <--------- txt ---------> " |"
line_txt = " " * nspace + AA['side'] + ' ' + txt + ' ' + AA['side']
return [line_box, line_txt, line_box]
def aa_header(header_data, length, n, end):
header = [aa_line(AA['bar2'], length)]
header.extend(['']*n)
for h in 'HEADER1', 'HEADER2', 'HEADER3':
if header_data[h]:
header.extend(aa_box(header_data[h], length))
header.extend(['']*n)
header.extend(['']*end)
header.append(aa_line(AA['bar2'], length))
return header
def aa_slide(title, length):
res = [aa_line(AA['bar2'], length)]
res.append('')
res.append(title.center(length))
res.append('')
res.append(aa_line(AA['bar2'], length))
return res
def aa_table(table):
data = [row[2:-2].split(' | ') for row in table]
n = max([len(line) for line in data])
data = [line + (n - len(line)) * [''] for line in data]
tab = []
for i in range(n):
tab.append([line[i] for line in data])
length = [max([len(el) for el in line]) for line in tab]
res = "+"
for i in range(n):
res = res + (length[i] + 2) * "-" + '+'
ret = []
for line in data:
aff = "|"
ret.append(res)
for j,el in enumerate(line):
aff = aff + " " + el + (length[j] - len(el) + 1) * " " + "|"
ret.append(aff)
ret.append(res)
return ret
##############################################################################
class error(Exception):
pass
def echo(msg): # for quick debug
print('\033[32;1m%s\033[m'%msg)
def Quit(msg=''):
if msg: print(msg)
sys.exit(0)
def Error(msg):
msg = _("%s: Error: ")%my_name + msg
raise error(msg)
def getTraceback():
try:
from traceback import format_exception
etype, value, tb = sys.exc_info()
return ''.join(format_exception(etype, value, tb))
except: pass
def getUnknownErrorMessage():
msg = '%s\n%s (%s):\n\n%s'%(
_('Sorry! Txt2tags aborted by an unknown error.'),
_('Please send the following Error Traceback to the author'),
my_email, getTraceback())
return msg
def Message(msg,level):
if level <= VERBOSE and not QUIET:
prefix = '-'*5
print("%s %s"%(prefix*level, msg))
def Debug(msg,id_=0,linenr=None):
"Show debug messages, categorized (colored or not)"
if QUIET or not DEBUG: return
if int(id_) not in range(8): id_ = 0
# 0:black 1:red 2:green 3:yellow 4:blue 5:pink 6:cyan 7:white ;1:light
ids = ['INI','CFG','SRC','BLK','HLD','GUI','OUT','DET']
colors_bgdark = ['7;1','1;1','3;1','6;1','4;1','5;1','2;1','7;1']
colors_bglight = ['0' ,'1' ,'3' ,'6' ,'4' ,'5' ,'2' ,'0' ]
if linenr is not None: msg = "LINE %04d: %s"%(linenr,msg)
if COLOR_DEBUG:
if BG_LIGHT: color = colors_bglight[id_]
else : color = colors_bgdark[id_]
msg = '\033[3%sm%s\033[m'%(color,msg)
print("++ %s: %s"%(ids[id_],msg))
def Readfile(file_path, remove_linebreaks=0, ignore_error=0):
data = []
if file_path == '-':
try:
data = sys.stdin.readlines()
except:
if not ignore_error:
Error(_('You must feed me with data on STDIN!'))
else:
try:
f = open(file_path)
data = f.readlines()
f.close()
except:
if not ignore_error:
Error(_("Cannot read file:") + ' ' + file_path)
if remove_linebreaks:
data = [re.sub('[\n\r]+$', '', x) for x in data]
Message(_("File read (%d lines): %s") % (len(data), file_path), 2)
return data
def Savefile(file_path, lines):
try:
with open(file_path, "w") as f:
f.writelines(lines)
except IOError:
Error(_("Cannot open file for writing:") + ' ' + file_path)
def showdic(dic):
for k in dic.keys(): print("%15s : %s" % (k,dic[k]))
def dotted_spaces(txt=''):
return txt.replace(' ', '.')
# TIP: win env vars http://www.winnetmag.com/Article/ArticleID/23873/23873.html
def get_rc_path():
"Return the full path for the users' RC file"
# Try to get the path from an env var. if yes, we're done
user_defined = os.environ.get('T2TCONFIG')
if user_defined: return user_defined
# Env var not found, so perform automatic path composing
# Set default filename according system platform
rc_names = {'default':'.txt2tagsrc', 'win':'_t2trc'}
rc_file = rc_names.get(sys.platform[:3]) or rc_names['default']
# The file must be on the user directory, but where is this dir?
rc_dir_search = ['HOME', 'HOMEPATH']
for var in rc_dir_search:
rc_dir = os.environ.get(var)
if rc_dir: break
# rc dir found, now we must join dir+file to compose the full path
if rc_dir:
# Compose path and return it if the file exists
rc_path = os.path.join(rc_dir, rc_file)
# On windows, prefix with the drive (%homedrive%: 2k/XP/NT)
if sys.platform.startswith('win'):
rc_drive = os.environ.get('HOMEDRIVE')
rc_path = os.path.join(rc_drive,rc_path)
return rc_path
# Sorry, not found
return ''
##############################################################################
class CommandLine:
"""
Command Line class - Masters command line
This class checks and extract data from the provided command line.
The --long options and flags are taken from the global OPTIONS,
FLAGS and ACTIONS dictionaries. The short options are registered
here, and also their equivalence to the long ones.
_compose_short_opts() -> str
_compose_long_opts() -> list
Compose the valid short and long options list, on the
'getopt' format.
parse() -> (opts, args)
Call getopt to check and parse the command line.
It expects to receive the command line as a list, and
without the program name (sys.argv[1:]).
get_raw_config() -> [RAW config]
Scans command line and convert the data to the RAW config
format. See ConfigMaster class to the RAW format description.
Optional 'ignore' and 'filter_' arguments are used to filter
in or out specified keys.
compose_cmdline(dict) -> [Command line]
Compose a command line list from an already parsed config
dictionary, generated from RAW by ConfigMaster(). Use
this to compose an optimal command line for a group of
options.
The get_raw_config() calls parse(), so the typical use of this
class is:
raw = CommandLine().get_raw_config(sys.argv[1:])
"""
def __init__(self):
self.all_options = list(OPTIONS.keys())
self.all_flags = list(FLAGS.keys())
self.all_actions = list(ACTIONS.keys())
# short:long options equivalence
self.short_long = {
'C':'config-file',
'h':'help',
'H':'no-headers',
'i':'infile',
'n':'enum-title',
'o':'outfile',
'q':'quiet',
't':'target',
'v':'verbose',
'V':'version',
}
# Compose valid short and long options data for getopt
self.short_opts = self._compose_short_opts()
self.long_opts = self._compose_long_opts()
def _compose_short_opts(self):
"Returns a string like 'hVt:o' with all short options/flags"
ret = []
for opt in self.short_long.keys():
long_ = self.short_long[opt]
if long_ in self.all_options: # is flag or option?
opt = opt+':' # option: have param
ret.append(opt)
#Debug('Valid SHORT options: %s'%ret)
return ''.join(ret)
def _compose_long_opts(self):
"Returns a list with all the valid long options/flags"
ret = [x+'=' for x in self.all_options] # add =
ret.extend(self.all_flags) # flag ON
ret.extend(self.all_actions) # actions
ret.extend(['no-'+x for x in self.all_flags]) # add no-*
ret.extend(['no-style','no-encoding']) # turn OFF
ret.extend(['no-outfile','no-infile']) # turn OFF
ret.extend(['no-dump-config', 'no-dump-source']) # turn OFF
ret.extend(['no-targets']) # turn OFF
#Debug('Valid LONG options: %s'%ret)
return ret
def _tokenize(self, cmd_string=''):
"Convert a command line string to a list"
#TODO protect quotes contents -- Don't use it, pass cmdline as list
return cmd_string.split()
def parse(self, cmdline=[]):
"Check/Parse a command line list TIP: no program name!"
# Get the valid options
short, long_ = self.short_opts, self.long_opts
# Parse it!
try:
opts, args = getopt.getopt(cmdline, short, long_)
except getopt.error as errmsg:
Error(_("%s (try --help)")%errmsg)
return (opts, args)
def get_raw_config(self, cmdline=[], ignore=[], filter_=[], relative=0):
"Returns the options/arguments found as RAW config"
if not cmdline: return []
ret = []
# We need lists, not strings (such as from %!options)
if isinstance(cmdline, str):
cmdline = self._tokenize(cmdline)
# Extract name/value pair of all configs, check for invalid names
options, arguments = self.parse(cmdline[:])
# Some cleanup on the raw config
for name, value in options:
# Remove leading - and --
name = re.sub('^--?', '', name)
# Fix old misspelled --suGGar, --no-suGGar
name = name.replace('suggar', 'sugar')
# Translate short option to long
if len(name) == 1:
name = self.short_long[name]
# Outfile exception: path relative to PWD
if name == 'outfile' and relative and value not in [STDOUT, MODULEOUT]:
value = os.path.abspath(value)
# -C, --config-file inclusion, path relative to PWD
if name == 'config-file':
ret.extend(ConfigLines().include_config_file(value))
continue
# Save this config
ret.append(['all', name, value])
# All configuration was read and saved
# Get infile, if any
while arguments:
infile = arguments.pop(0)
ret.append(['all', 'infile', infile])
# Apply 'ignore' and 'filter_' rules (filter_ is stronger)
if (ignore or filter_):
filtered = []
for target, name, value in ret:
if (filter_ and name in filter_) or \
(ignore and name not in ignore):
filtered.append([target, name, value])
ret = filtered[:]
# Add the original command line string as 'realcmdline'
ret.append( ['all', 'realcmdline', cmdline] )
return ret
def compose_cmdline(self, conf={}, no_check=0):
"compose a full (and diet) command line from CONF dict"
if not conf: return []
args = []
dft_options = OPTIONS.copy()
cfg = conf.copy()
valid_opts = self.all_options + self.all_flags
use_short = {'no-headers':'H', 'enum-title':'n'}
# Remove useless options
if not no_check and cfg.get('toc-only'):
if 'no-headers' in cfg:
del cfg['no-headers']
if 'outfile' in cfg:
del cfg['outfile'] # defaults to STDOUT
if cfg.get('target') == 'txt':
del cfg['target'] # already default
args.append('--toc-only') # must be the first
del cfg['toc-only']
# Add target type
if 'target' in cfg:
args.append('-t '+cfg['target'])
del cfg['target']
# Add other options
for key in cfg.keys():
if key not in valid_opts: continue # may be a %!setting
if key == 'outfile' or key == 'infile': continue # later
val = cfg[key]
if not val: continue
# Default values are useless on cmdline
if val == dft_options.get(key): continue
# -short format
if key in use_short.keys():
args.append('-'+use_short[key])
continue
# --long format
if key in self.all_flags: # add --option
args.append('--'+key)
else: # add --option=value
args.append('--%s=%s'%(key,val))
# The outfile using -o
if 'outfile' in cfg and \
cfg['outfile'] != dft_options.get('outfile'):
args.append('-o '+cfg['outfile'])
# Place input file(s) always at the end
if 'infile' in cfg:
args.append(' '.join(cfg['infile']))
# Return as a nice list
Debug("Diet command line: %s"%' '.join(args), 1)
return args
##############################################################################
class SourceDocument:
"""
SourceDocument class - scan document structure, extract data
It knows about full files. It reads a file and identify all
the areas beginning (Head,Conf,Body). With this info it can
extract each area contents.
Note: the original line break is removed.
DATA:
self.arearef - Save Head, Conf, Body init line number
self.areas - Store the area names which are not empty
self.buffer - The full file contents (with NO \\r, \\n)
METHODS:
get() - Access the contents of an Area. Example:
config = SourceDocument(file).get('conf')
split() - Get all the document Areas at once. Example:
head, conf, body = SourceDocument(file).split()
RULES:
* The document parts are sequential: Head, Conf and Body.
* One ends when the next begins.
* The Conf Area is optional, so a document can have just
Head and Body Areas.
These are the Areas limits:
- Head Area: the first three lines
- Body Area: from the first valid text line to the end
- Conf Area: the comments between Head and Body Areas
Exception: If the first line is blank, this means no
header info, so the Head Area is just the first line.
"""
def __init__(self, filename='', contents=[]):
self.areas = ['head','conf','body']
self.arearef = []
self.areas_fancy = ''
self.filename = filename
self.buffer = []
if filename:
self.scan_file(filename)
elif contents:
self.scan(contents)
def split(self):
"Returns all document parts, splitted into lists."
return self.get('head'), self.get('conf'), self.get('body')
def get(self, areaname):
"Returns head|conf|body contents from self.buffer"
# Sanity
if areaname not in self.areas: return []
if not self.buffer : return []
# Go get it
bufini = 1
bufend = len(self.buffer)
if areaname == 'head':
ini = bufini
end = self.arearef[1] or self.arearef[2] or bufend
elif areaname == 'conf':
ini = self.arearef[1]
end = self.arearef[2] or bufend
elif areaname == 'body':
ini = self.arearef[2]
end = bufend
else:
Error("Unknown Area name '%s'"%areaname)
lines = self.buffer[ini:end]
# Make sure head will always have 3 lines
while areaname == 'head' and len(lines) < 3:
lines.append('')
return lines
def scan_file(self, filename):
Debug("source file: %s"%filename)
Message(_("Loading source document"),1)
buf = Readfile(filename, remove_linebreaks=1)
self.scan(buf)
def scan(self, lines):
"Run through source file and identify head/conf/body areas"
buf = lines
if len(buf) == 0:
Error(_('The input file is empty: %s')%self.filename)
cfg_parser = ConfigLines().parse_line
buf.insert(0, '') # text start at pos 1
ref = [1,4,0]
if not buf[1].strip(): # no header
ref[0] = 0 ; ref[1] = 2
rgx = getRegexes()
on_comment_block = 0
for i in range(ref[1],len(buf)): # find body init:
# Handle comment blocks inside config area
if not on_comment_block \
and rgx['blockCommentOpen'].search(buf[i]):
on_comment_block = 1
continue
if on_comment_block \
and rgx['blockCommentOpen'].search(buf[i]):
on_comment_block = 0
continue
if on_comment_block: continue
if buf[i].strip() and ( # ... not blank and
buf[i][0] != '%' or # ... not comment or
rgx['macros'].match(buf[i]) or # ... %%macro
rgx['toc'].match(buf[i]) or # ... %%toc
cfg_parser(buf[i],'include')[1] or # ... %!include
cfg_parser(buf[i],'csv')[1] # ... %!csv
):
ref[2] = i ; break
if ref[1] == ref[2]: ref[1] = 0 # no conf area
for i in 0,1,2: # del !existent
if ref[i] >= len(buf): ref[i] = 0 # title-only
if not ref[i]: self.areas[i] = ''
Debug('Head,Conf,Body start line: %s'%ref)
self.arearef = ref # save results
self.buffer = buf
# Fancyness sample: head conf body (1 4 8)
self.areas_fancy = "%s (%s)"%(
' '.join(self.areas),
' '.join(map(str, [x or '' for x in ref])))
Message(_("Areas found: %s")%self.areas_fancy, 2)
def get_raw_config(self):
"Handy method to get the CONF area RAW config (if any)"
if not self.areas.count('conf'): return []
Message(_("Scanning source document CONF area"),1)
raw = ConfigLines(
file_=self.filename, lines=self.get('conf'),
first_line=self.arearef[1]).get_raw_config()
Debug("document raw config: %s"%raw, 1)
return raw
##############################################################################
class ConfigMaster:
"""
ConfigMaster class - the configuration wizard
This class is the configuration master. It knows how to handle
the RAW and PARSED config format. It also performs the sanity
checking for a given configuration.
DATA:
self.raw - Stores the config on the RAW format
self.parsed - Stores the config on the PARSED format
self.defaults - Stores the default values for all keys
self.off - Stores the OFF values for all keys
self.multi - List of keys which can have multiple values
self.numeric - List of keys which value must be a number
self.incremental - List of keys which are incremental
RAW FORMAT:
The RAW format is a list of lists, being each mother list item
a full configuration entry. Any entry is a 3 item list, on
the following format: [ TARGET, KEY, VALUE ]
Being a list, the order is preserved, so it's easy to use
different kinds of configs, as CONF area and command line,
respecting the precedence.
The special target 'all' is used when no specific target was
defined on the original config.
PARSED FORMAT:
The PARSED format is a dictionary, with all the 'key : value'
found by reading the RAW config. The self.target contents
matters, so this dictionary only contains the target's
config. The configs of other targets are ignored.
The CommandLine and ConfigLines classes have the get_raw_config()
method which convert the configuration found to the RAW format.
Just feed it to parse() and get a brand-new ready-to-use config
dictionary. Example:
>>> raw = CommandLine().get_raw_config(['-n', '-H'])
>>> print raw
[['all', 'enum-title', ''], ['all', 'no-headers', '']]
>>> parsed = ConfigMaster(raw).parse()
>>> print parsed
{'enum-title': 1, 'headers': 0}
"""
def __init__(self, raw=[], target=''):
self.raw = raw
self.target = target
self.parsed = {}
self.dft_options = OPTIONS.copy()
self.dft_flags = FLAGS.copy()
self.dft_actions = ACTIONS.copy()
self.dft_settings = SETTINGS.copy()
self.defaults = self._get_defaults()
self.off = self._get_off()
self.incremental = ['verbose']
self.numeric = ['toc-level', 'split', 'width', 'height']
self.multi = ['infile', 'preproc', 'postproc', 'options', 'style']
def _get_defaults(self):
"Get the default values for all config/options/flags"
empty = {}
for kw in CONFIG_KEYWORDS: empty[kw] = ''
empty.update(self.dft_options)
empty.update(self.dft_flags)
empty.update(self.dft_actions)
empty.update(self.dft_settings)
empty['realcmdline'] = '' # internal use only
empty['sourcefile'] = '' # internal use only
return empty
def _get_off(self):
"Turns OFF all the config/options/flags"
off = {}
for key in self.defaults.keys():
kind = type(self.defaults[key])
if kind == type(9):
off[key] = 0
elif kind == type(''):
off[key] = ''
elif kind == type([]):
off[key] = []
else:
Error('ConfigMaster: %s: Unknown type' % key)
return off
def _check_target(self):
"Checks if the target is already defined. If not, do it"
if not self.target:
self.target = self.find_value('target')
def get_target_raw(self):
"Returns the raw config for self.target or 'all'"
ret = []
self._check_target()
for entry in self.raw:
if entry[0] == self.target or entry[0] == 'all':
ret.append(entry)
return ret
def add(self, key, val):
"Adds the key:value pair to the config dictionary (if needed)"
# %!options
if key == 'options':
ignoreme = list(self.dft_actions.keys()) + ['target']
ignoreme.remove('dump-config')
ignoreme.remove('dump-source')
ignoreme.remove('targets')
raw_opts = CommandLine().get_raw_config(
val, ignore=ignoreme)
for target, key, val in raw_opts:
self.add(key, val)
return
# The no- prefix turns OFF this key
if key.startswith('no-'):
key = key[3:] # remove prefix
val = self.off.get(key) # turn key OFF
# Is this key valid?
if key not in self.defaults.keys():
Debug('Bogus Config %s:%s'%(key,val),1)
return
# Is this value the default one?
if val == self.defaults.get(key):
# If default value, remove previous key:val
if key in self.parsed:
del self.parsed[key]
# Nothing more to do
return
# Flags ON comes empty. we'll add the 1 value now
if val == '' and (
key in self.dft_flags.keys() or
key in self.dft_actions.keys()):
val = 1
# Multi value or single?
if key in self.multi:
# First one? start new list
if key not in self.parsed:
self.parsed[key] = []
self.parsed[key].append(val)
# Incremental value? so let's add it
elif key in self.incremental:
self.parsed[key] = (self.parsed.get(key) or 0) + val
else:
self.parsed[key] = val
fancykey = dotted_spaces("%12s"%key)
Message(_("Added config %s : %s")%(fancykey,val),3)
def get_outfile_name(self, config={}):
"Dirname is the same for {in,out}file"
infile, outfile = config['sourcefile'], config['outfile']
if outfile and outfile not in (STDOUT, MODULEOUT) \
and not os.path.isabs(outfile):
outfile = os.path.join(os.path.dirname(infile), outfile)
if infile == STDIN and not outfile: outfile = STDOUT
if infile == MODULEIN and not outfile: outfile = MODULEOUT
if not outfile and (infile and config.get('target')):
basename = re.sub('\.(txt|t2t)$','',infile)
outfile = "%s.%s"%(basename, config['target'])
Debug(" infile: '%s'"%infile , 1)
Debug("outfile: '%s'"%outfile, 1)
return outfile
def sanity(self, config, gui=0):
"Basic config sanity checking"
global AA
if not config: return {}
target = config.get('target')
# Some actions don't require target specification
if not target:
for action in NO_TARGET:
if config.get(action):
target = 'txt'
break
# On GUI, some checking are skipped
if not gui:
# We *need* a target
if not target:
Error(_('No target specified (try --help)') + '\n\n' +
_('Please inform a target using the -t option or the %!target command.') + '\n' +
_('Example:') + ' %s -t html %s' % (my_name, _('file.t2t')) + '\n\n' +
_("Run 'txt2tags --targets' to see all the available targets."))
# And of course, an infile also
# TODO#1: It seems that this checking is never reached
if not config.get('infile'):
Error(_('Missing input file (try --help)'))
# Is the target valid?
if not TARGETS.count(target):
Error(_("Invalid target '%s'") % target + '\n\n' +
_("Run 'txt2tags --targets' to see all the available targets."))
# Ensure all keys are present
empty = self.defaults.copy() ; empty.update(config)
config = empty.copy()
# Check integers options
for key in config.keys():
if key in self.numeric:
try:
config[key] = int(config[key])
except ValueError:
Error(_('--%s value must be a number') % key)
# Check split level value
if config['split'] not in (0,1,2):
Error(_('Option --split must be 0, 1 or 2'))
# Slides needs width and height
if config['slides'] and target == 'art':
if not config['width']:
config['width'] = DFT_SLIDE_WIDTH
if not config['height']:
config['height'] = DFT_SLIDE_HEIGHT
# ASCII Art needs a width
if target == 'art' and not config['width']:
config['width'] = DFT_TEXT_WIDTH
# Check/set user ASCII Art formatting characters
if config['art-chars']:
if len(config['art-chars']) != len(AA_VALUES):
Error(_("--art-chars: Expected %i chars, got %i") % (
len(AA_VALUES), len(config['art-chars'])))
else:
AA = dict(zip(AA_KEYS, config['art-chars']))
# --toc-only is stronger than others
if config['toc-only']:
config['headers'] = 0
config['toc'] = 0
config['split'] = 0
config['gui'] = 0
config['outfile'] = config['outfile'] or STDOUT
# Splitting is disable for now (future: HTML only, no STDOUT)
config['split'] = 0
# Restore target
config['target'] = target
# Set output file name
config['outfile'] = self.get_outfile_name(config)
# Checking suicide
if config['sourcefile'] == config['outfile'] and \
config['outfile'] not in [STDOUT,MODULEOUT] and not gui:
Error(_("Input and Output files are the same: %s") % config['outfile'])
return config
def parse(self):
"Returns the parsed config for the current target"
raw = self.get_target_raw()
for target, key, value in raw:
self.add(key, value)
Message(_("Added the following keys: %s") % ', '.join(self.parsed.keys()), 2)
return self.parsed.copy()
def find_value(self, key='', target=''):
"Scans ALL raw config to find the desired key"
ret = []
# Scan and save all values found
for targ, k, val in self.raw:
if k == key and (targ == target or targ == 'all'):
ret.append(val)
if not ret: return ''
# If not multi value, return only the last found
if key in self.multi: return ret
else : return ret[-1]
########################################################################
class ConfigLines:
"""
ConfigLines class - the config file data extractor
This class reads and parse the config lines on the %!key:val
format, converting it to RAW config. It deals with user
config file (RC file), source document CONF area and
%!includeconf directives.
Call it passing a file name or feed the desired config lines.
Then just call the get_raw_config() method and wait to
receive the full config data on the RAW format. This method
also follows the possible %!includeconf directives found on
the config lines. Example:
raw = ConfigLines(file=".txt2tagsrc").get_raw_config()
The parse_line() method is also useful to be used alone,
to identify and tokenize a single config line. For example,
to get the %!include command components, on the source
document BODY:
target, key, value = ConfigLines().parse_line(body_line)
"""
def __init__(self, file_='', lines=[], first_line=1):
self.file = file_ or 'NOFILE'
self.lines = lines
self.first_line = first_line
def load_lines(self):
"Make sure we've loaded the file contents into buffer"
if not self.lines and not self.file:
Error("ConfigLines: No file or lines provided")
if not self.lines:
self.lines = self.read_config_file(self.file)
def read_config_file(self, filename=''):
"Read a Config File contents, aborting on invalid line"
if not filename: return []
errormsg = _("Invalid CONFIG line on %s")+"\n%03d:%s"
lines = Readfile(filename, remove_linebreaks=1)
# Sanity: try to find invalid config lines
for i in range(len(lines)):
line = lines[i].rstrip()
if not line: continue # empty
if line[0] != '%': Error(errormsg%(filename,i+1,line))
return lines
def include_config_file(self, file_=''):
"Perform the %!includeconf action, returning RAW config"
if not file_: return []
# Current dir relative to the current file (self.file)
current_dir = os.path.dirname(self.file)
file_ = os.path.join(current_dir, file_)
# Read and parse included config file contents
lines = self.read_config_file(file_)
return ConfigLines(file_=file_, lines=lines).get_raw_config()
def get_raw_config(self):
"Scan buffer and extract all config as RAW (including includes)"
ret = []
self.load_lines()
first = self.first_line
for i in range(len(self.lines)):
line = self.lines[i]
Message(_("Processing line %03d: %s")%(first+i,line),2)
target, key, val = self.parse_line(line)
if not key: continue # no config on this line
if key == 'includeconf':
err = _('A file cannot include itself (loop!)')
if val == self.file:
Error("%s: %%!includeconf: %s" % (err, self.file))
more_raw = self.include_config_file(val)
ret.extend(more_raw)
Message(_("Finished Config file inclusion: %s") % val, 2)
else:
ret.append([target, key, val])
Message(_("Added %s")%key,3)
return ret
def parse_line(self, line='', keyname='', target=''):
"Detects %!key:val config lines and extract data from it"
empty = ['', '', '']
if not line: return empty
no_target = ['target', 'includeconf']
re_name = keyname or '[a-z]+'
re_target = target or '[a-z]*'
# XXX TODO <value>\S.+? requires TWO chars, breaks %!include:a
cfgregex = re.compile("""
^%%!\s* # leading id with opt spaces
(?P<name>%s)\s* # config name
(\((?P<target>%s)\))? # optional target spec inside ()
\s*:\s* # key:value delimiter with opt spaces
(?P<value>\S.+?) # config value
\s*$ # rstrip() spaces and hit EOL
"""%(re_name, re_target), re.I+re.VERBOSE)
prepostregex = re.compile("""
# ---[ PATTERN ]---
^( "([^"]*)" # "double quoted" or
| '([^']*)' # 'single quoted' or
| ([^\s]+) # single_word
)
\s+ # separated by spaces
# ---[ REPLACE ]---
( "([^"]*)" # "double quoted" or
| '([^']*)' # 'single quoted' or
| (.*) # anything
)
\s*$
""", re.VERBOSE)
guicolors = re.compile("^([^\s]+\s+){3}[^\s]+") # 4 tokens
# Give me a match or get out
match = cfgregex.match(line)
if not match: return empty
# Save information about this config
name = (match.group('name') or '').lower()
target = (match.group('target') or 'all').lower()
value = match.group('value')
# %!keyword(target) not allowed for these
if name in no_target and match.group('target'):
Error(
_("You can't use (target) with %s") % ('%!' + name)
+ "\n%s" % line)
# Force no_target keywords to be valid for all targets
if name in no_target:
target = 'all'
# Special config for GUI colors
if name == 'guicolors':
valmatch = guicolors.search(value)
if not valmatch: return empty
value = re.split('\s+', value)
# Special config with two quoted values (%!preproc: "foo" 'bar')
if name == 'preproc' or name == 'postproc':
valmatch = prepostregex.search(value)
if not valmatch: return empty
getval = valmatch.group
patt = getval(2) or getval(3) or getval(4) or ''
repl = getval(6) or getval(7) or getval(8) or ''
value = (patt, repl)
return [target, name, value]
##############################################################################
class MaskMaster:
"(Un)Protect important structures from escaping and formatting"
def __init__(self):
self.linkmask = 'vvvLINKvvv'
self.monomask = 'vvvMONOvvv'
self.macromask = 'vvvMACROvvv'
self.rawmask = 'vvvRAWvvv'
self.taggedmask= 'vvvTAGGEDvvv'
self.tocmask = 'vvvTOCvvv'
self.macroman = MacroMaster()
self.reset()
def reset(self):
self.linkbank = []
self.monobank = []
self.macrobank = []
self.rawbank = []
self.taggedbank = []
def mask(self, line=''):
global AUTOTOC
# The verbatim, raw and tagged inline marks are mutually exclusive.
# This means that one can't appear inside the other.
# If found, the inner marks must be ignored.
# Example: ``foo ""bar"" ''baz''``
# In HTML: <code>foo ""bar"" ''baz''</code>
#
# The trick here is to protect the mark who appears first on the line.
# The three regexes are tried and the one with the lowest index wins.
# If none is found (else), we get out of the loop.
#
while True:
try:
t = regex['tagged'].search(line).start()
except:
t = -1
try:
r = regex['raw'].search(line).start()
except:
r = -1
try:
v = regex['fontMono'].search(line).start()
except:
v = -1
# Protect tagged text
if t >= 0 and (r == -1 or t < r) and (v == -1 or t < v):
txt = regex['tagged'].search(line).group(1)
## JS
if TARGET == 'tex':
txt = txt.replace('_', 'vvvUnderscoreInTaggedTextvvv')
self.taggedbank.append(txt)
line = regex['tagged'].sub(self.taggedmask,line,1)
# Protect raw text
elif r >= 0 and (t == -1 or r < t) and (v == -1 or r < v):
txt = regex['raw'].search(line).group(1)
txt = doEscape(TARGET,txt)
## JS
if TARGET == 'tex':
txt = txt.replace('_', 'vvvUnderscoreInRawTextvvv')
self.rawbank.append(txt)
line = regex['raw'].sub(self.rawmask,line,1)
# Protect verbatim text
elif v >= 0 and (t == -1 or v < t) and (r == -1 or v < r):
txt = regex['fontMono'].search(line).group(1)
txt = doEscape(TARGET,txt)
self.monobank.append(txt)
line = regex['fontMono'].sub(self.monomask,line,1)
else:
break
# Protect macros
while regex['macros'].search(line):
txt = regex['macros'].search(line).group()
self.macrobank.append(txt)
line = regex['macros'].sub(self.macromask,line,1)
# Protect TOC location
while regex['toc'].search(line):
line = regex['toc'].sub(self.tocmask,line)
AUTOTOC = 0
# Protect URLs and emails
while regex['linkmark'].search(line) or \
regex['link' ].search(line):
# Try to match plain or named links
match_link = regex['link'].search(line)
match_named = regex['linkmark'].search(line)
# Define the current match
if match_link and match_named:
# Both types found, which is the first?
m = match_link
if match_named.start() < match_link.start():
m = match_named
else:
# Just one type found, we're fine
m = match_link or match_named
# Extract link data and apply mask
if m == match_link: # plain link
link = m.group()
label = ''
link_re = regex['link']
else: # named link
link = m.group('link')
label = m.group('label').rstrip()
link_re = regex['linkmark']
line = link_re.sub(self.linkmask,line,1)
# Save link data to the link bank
self.linkbank.append((label, link))
return line
def undo(self, line):
# url & email
for label,url in self.linkbank:
link = get_tagged_link(label, url)
line = line.replace(self.linkmask, link, 1)
# Expand macros
for macro in self.macrobank:
macro = self.macroman.expand(macro)
line = line.replace(self.macromask, macro, 1)
# Expand verb
for mono in self.monobank:
open_,close = TAGS['fontMonoOpen'],TAGS['fontMonoClose']
line = line.replace(self.monomask, open_+mono+close, 1)
# Expand raw
for raw in self.rawbank:
line = line.replace(self.rawmask, raw, 1)
# Expand tagged
for tagged in self.taggedbank:
line = line.replace(self.taggedmask, tagged, 1)
return line
##############################################################################
class TitleMaster:
"Title things"
def __init__(self):
self.count = ['',0,0,0,0,0]
self.toc = []
self.level = 0
self.kind = ''
self.txt = ''
self.label = ''
self.tag = ''
self.tag_hold = []
self.last_level = 0
self.count_id = ''
self.user_labels = {}
self.anchor_count = 0
self.anchor_prefix = 'toc'
def _open_close_blocks(self):
"Open new title blocks, closing the previous (if any)"
if not rules['titleblocks']: return
tag = ''
last = self.last_level
curr = self.level
# Same level, just close the previous
if curr == last:
tag = TAGS.get('title%dClose'%last)
if tag: self.tag_hold.append(tag)
# Section -> subsection, more depth
while curr > last:
last += 1
# Open the new block of subsections
tag = TAGS.get('blockTitle%dOpen'%last)
if tag: self.tag_hold.append(tag)
# Jump from title1 to title3 or more
# Fill the gap with an empty section
if curr - last > 0:
tag = TAGS.get('title%dOpen'%last)
tag = regex['x'].sub('', tag) # del \a
if tag: self.tag_hold.append(tag)
# Section <- subsection, less depth
while curr < last:
# Close the current opened subsection
tag = TAGS.get('title%dClose'%last)
if tag: self.tag_hold.append(tag)
# Close the current opened block of subsections
tag = TAGS.get('blockTitle%dClose'%last)
if tag: self.tag_hold.append(tag)
last -= 1
# Close the previous section of the same level
# The subsections were under it
if curr == last:
tag = TAGS.get('title%dClose'%last)
if tag: self.tag_hold.append(tag)
def add(self, line):
"Parses a new title line."
if not line: return
self._set_prop(line)
self._open_close_blocks()
self._set_count_id()
self._set_label()
self._save_toc_info()
def close_all(self):
"Closes all opened title blocks"
ret = []
ret.extend(self.tag_hold)
while self.level:
tag = TAGS.get('title%dClose'%self.level)
if tag: ret.append(tag)
tag = TAGS.get('blockTitle%dClose'%self.level)
if tag: ret.append(tag)
self.level -= 1
return ret
def _save_toc_info(self):
"Save TOC info, used by self.dump_marked_toc()"
self.toc.append((self.level, self.count_id, self.txt, self.label))
def _set_prop(self, line=''):
"Extract info from original line and set data holders."
# Detect title type (numbered or not)
id_ = line.lstrip()[0]
if id_ == '=': kind = 'title'
elif id_ == '+': kind = 'numtitle'
else: Error("Unknown Title ID '%s'"%id_)
# Extract line info
match = regex[kind].search(line)
level = len(match.group('id'))
txt = match.group('txt').strip()
label = match.group('label')
# Parse info & save
if CONF['enum-title']: kind = 'numtitle' # force
if rules['titleblocks']:
self.tag = TAGS.get('%s%dOpen'%(kind,level)) or \
TAGS.get('title%dOpen'%level)
else:
self.tag = TAGS.get(kind+repr(level)) or \
TAGS.get('title'+repr(level))
self.last_level = self.level
self.kind = kind
self.level = level
self.txt = txt
self.label = label
def _set_count_id(self):
"Compose and save the title count identifier (if needed)."
count_id = ''
if self.kind == 'numtitle' and not rules['autonumbertitle']:
# Manually increase title count
self.count[self.level] += 1
# Reset sublevels count (if any)
max_levels = len(self.count)
if self.level < max_levels-1:
for i in range(self.level+1, max_levels):
self.count[i] = 0
# Compose count id from hierarchy
for i in range(self.level):
count_id= "%s%d."%(count_id, self.count[i+1])
self.count_id = count_id
def _set_label(self):
"Compose and save title label, used by anchors."
# Remove invalid chars from label set by user
self.label = re.sub('[^A-Za-z0-9_-]', '', self.label or '')
# Generate name as 15 first :alnum: chars
#TODO how to translate safely accented chars to plain?
#self.label = re.sub('[^A-Za-z0-9]', '', self.txt)[:15]
# 'tocN' label - sequential count, ignoring 'toc-level'
#self.label = self.anchor_prefix + str(len(self.toc)+1)
def _get_tagged_anchor(self):
"Return anchor if user defined a label, or TOC is on."
ret = ''
label = self.label
if CONF['toc'] and self.level <= CONF['toc-level']:
# This count is needed bcos self.toc stores all
# titles, regardless of the 'toc-level' setting,
# so we can't use self.toc length to number anchors
self.anchor_count += 1
# Autonumber label (if needed)
label = label or '%s%s' % (self.anchor_prefix, self.anchor_count)
if label and TAGS['anchor']:
ret = regex['x'].sub(label,TAGS['anchor'])
return ret
def _get_full_title_text(self):
"Returns the full title contents, already escaped."
ret = self.txt
# Insert count_id (if any) before text
if self.count_id:
ret = '%s %s'%(self.count_id, ret)
# Escape specials
ret = doEscape(TARGET, ret)
# Same targets needs final escapes on title lines
# It's here because there is a 'continue' after title
if rules['finalescapetitle']:
ret = doFinalEscape(TARGET, ret)
return ret
def get(self):
"Returns the tagged title as a list."
global AA_TITLE
ret = []
# Maybe some anchoring before?
anchor = self._get_tagged_anchor()
self.tag = regex['_anchor'].sub(anchor, self.tag)
### Compose & escape title text (TOC uses unescaped)
full_title = self._get_full_title_text()
# Close previous section area
ret.extend(self.tag_hold)
self.tag_hold = []
tagged = regex['x'].sub(full_title, self.tag)
# Adds "underline" on TXT target
if TARGET == 'txt':
if BLOCK.count > 1: ret.append('') # blank line before
ret.append(tagged)
# Get the right letter count for UTF
if CONF['encoding'].lower() == 'utf-8':
i = len(full_title.decode('utf-8'))
else:
i = len(full_title)
ret.append(regex['x'].sub('='*i, self.tag))
elif TARGET == 'art' and self.level == 1:
if CONF['slides'] :
AA_TITLE = tagged
else :
if BLOCK.count > 1: ret.append('') # blank line before
ret.extend(aa_box(tagged, CONF['width']))
elif TARGET == 'art':
level = 'level'+str(self.level)
if BLOCK.count > 1: ret.append('') # blank line before
ret.append(tagged)
ret.append(AA[level] * len(full_title))
else:
ret.append(tagged)
return ret
def dump_marked_toc(self, max_level=99):
"Dumps all toc itens as a valid t2t-marked list"
ret = []
toc_count = 1
for level, count_id, txt, label in self.toc:
if level > max_level: continue # ignore
indent = ' '*level
id_txt = ('%s %s'%(count_id, txt)).lstrip()
label = label or self.anchor_prefix+repr(toc_count)
toc_count += 1
# TOC will have crosslinks to anchors
if TAGS['anchor']:
if CONF['enum-title'] and level == 1:
# 1. [Foo #anchor] is more readable than [1. Foo #anchor] in level 1.
# This is a stoled idea from Windows .CHM help files.
tocitem = '%s+ [""%s"" #%s]' % (indent, txt, label)
else:
tocitem = '%s- [""%s"" #%s]' % (indent, id_txt, label)
# TOC will be plain text (no links)
else:
if TARGET in ['txt', 'man', 'art']:
# For these, the list is not necessary, just dump the text
tocitem = '%s""%s""' % (indent, id_txt)
else:
tocitem = '%s- ""%s""' % (indent, id_txt)
ret.append(tocitem)
return ret
##############################################################################
#TODO check all this table mess
# It uses parse_row properties for table lines
# BLOCK.table() replaces the cells by the parsed content
class TableMaster:
def __init__(self, line=''):
self.rows = []
self.border = 0
self.align = 'Left'
self.cellalign = []
self.colalign = []
self.cellspan = []
if line:
prop = self.parse_row(line)
self.border = prop['border']
self.align = prop['align']
self.cellalign = prop['cellalign']
self.cellspan = prop['cellspan']
self.colalign = self._get_col_align()
def _get_col_align(self):
colalign = []
for cell in range(0,len(self.cellalign)):
align = self.cellalign[cell]
span = self.cellspan[cell]
colalign.extend([align] * span)
return colalign
def _get_open_tag(self):
topen = TAGS['tableOpen']
tborder = TAGS['_tableBorder']
talign = TAGS['_tableAlign'+self.align]
calignsep = TAGS['tableColAlignSep']
calign = ''
# The first line defines if table has border or not
if not self.border: tborder = ''
# Set the columns alignment
if rules['tablecellaligntype'] == 'column':
calign = [TAGS['_tableColAlign%s'%x] for x in self.colalign]
calign = calignsep.join(calign)
# Align full table, set border and Column align (if any)
topen = regex['_tableAlign' ].sub(talign , topen)
topen = regex['_tableBorder' ].sub(tborder, topen)
topen = regex['_tableColAlign'].sub(calign , topen)
# Tex table spec, border or not: {|l|c|r|} , {lcr}
if calignsep and not self.border:
# Remove cell align separator
topen = topen.replace(calignsep, '')
return topen
def _get_cell_align(self, cells):
ret = []
for cell in cells:
align = 'Left'
if cell.strip():
if cell[0] == ' ' and cell[-1] == ' ':
align = 'Center'
elif cell[0] == ' ':
align = 'Right'
ret.append(align)
return ret
def _get_cell_span(self, cells):
ret = []
for cell in cells:
span = 1
m = re.search('\a(\|+)$', cell)
if m: span = len(m.group(1))+1
ret.append(span)
return ret
def _tag_cells(self, rowdata):
row = []
cells = rowdata['cells']
open_ = TAGS['tableCellOpen']
close = TAGS['tableCellClose']
sep = TAGS['tableCellSep']
calign = [TAGS['_tableCellAlign'+x] for x in rowdata['cellalign']]
calignsep = TAGS['tableColAlignSep']
ncolumns = len(self.colalign)
# Populate the span and multicol open tags
cspan = []
multicol = []
colindex = 0
for cellindex in range(0,len(rowdata['cellspan'])):
span = rowdata['cellspan'][cellindex]
align = rowdata['cellalign'][cellindex]
if span > 1:
cspan.append(regex['x'].sub(
str(span), TAGS['_tableCellColSpan']))
mcopen = regex['x'].sub(str(span), TAGS['_tableCellMulticolOpen'])
multicol.append(mcopen)
else:
cspan.append('')
if colindex < ncolumns and align != self.colalign[colindex]:
mcopen = regex['x'].sub('1', TAGS['_tableCellMulticolOpen'])
multicol.append(mcopen)
else:
multicol.append('')
if not self.border:
multicol[-1] = multicol[-1].replace(calignsep, '')
colindex += span
# Maybe is it a title row?
if rowdata['title']:
open_ = TAGS['tableTitleCellOpen'] or open_
close = TAGS['tableTitleCellClose'] or close
sep = TAGS['tableTitleCellSep'] or sep
# Should we break the line on *each* table cell?
if rules['breaktablecell']: close = close+'\n'
# Cells pre processing
if rules['tablecellstrip']:
cells = [x.strip() for x in cells]
if rowdata['title'] and rules['tabletitlerowinbold']:
cells = [enclose_me('fontBold',x) for x in cells]
# Add cell BEGIN/END tags
for cell in cells:
copen = open_
cclose = close
# Make sure we will pop from some filled lists
# Fixes empty line bug '| |'
this_align = this_span = this_mcopen = ''
if calign: this_align = calign.pop(0)
if cspan : this_span = cspan.pop(0)
if multicol: this_mcopen = multicol.pop(0)
# Insert cell align into open tag (if cell is alignable)
if rules['tablecellaligntype'] == 'cell':
copen = regex['_tableCellAlign'].sub(
this_align, copen)
# Insert cell span into open tag (if cell is spannable)
if rules['tablecellspannable']:
copen = regex['_tableCellColSpan'].sub(
this_span, copen)
# Use multicol tags instead (if multicol supported, and if
# cell has a span or is aligned differently to column)
if rules['tablecellmulticol']:
if this_mcopen:
copen = regex['_tableColAlign'].sub(this_align, this_mcopen)
cclose = TAGS['_tableCellMulticolClose']
row.append(copen + cell + cclose)
# Maybe there are cell separators?
return sep.join(row)
def add_row(self, cells):
self.rows.append(cells)
def parse_row(self, line):
# Default table properties
ret = {
'border':0, 'title':0, 'align':'Left',
'cells':[], 'cellalign':[], 'cellspan':[]
}
# Detect table align (and remove spaces mark)
if line[0] == ' ': ret['align'] = 'Center'
line = line.lstrip()
# Detect title mark
if line[1] == '|': ret['title'] = 1
# Detect border mark and normalize the EOL
m = re.search(' (\|+) *$', line)
if m: line = line+' ' ; ret['border'] = 1
else: line = line+' | '
# Delete table mark
line = regex['table'].sub('', line)
# Detect colspan | foo | bar baz |||
line = re.sub(' (\|+)\| ', '\a\\1 | ', line)
# Split cells (the last is fake)
ret['cells'] = line.split(' | ')[:-1]
# Find cells span
ret['cellspan'] = self._get_cell_span(ret['cells'])
# Remove span ID
ret['cells'] = [re.sub('\a\|+$','',x) for x in ret['cells']]
# Find cells align
ret['cellalign'] = self._get_cell_align(ret['cells'])
# Hooray!
Debug('Table Prop: %s' % ret, 7)
return ret
def dump(self):
open_ = self._get_open_tag()
rows = self.rows
close = TAGS['tableClose']
rowopen = TAGS['tableRowOpen']
rowclose = TAGS['tableRowClose']
rowsep = TAGS['tableRowSep']
titrowopen = TAGS['tableTitleRowOpen'] or rowopen
titrowclose = TAGS['tableTitleRowClose'] or rowclose
if rules['breaktablelineopen']:
rowopen = rowopen + '\n'
titrowopen = titrowopen + '\n'
# Tex gotchas
if TARGET == 'tex':
if not self.border:
rowopen = titrowopen = ''
else:
close = rowopen + close
# Now we tag all the table cells on each row
#tagged_cells = map(lambda x: self._tag_cells(x), rows) #!py15
tagged_cells = []
for cell in rows: tagged_cells.append(self._tag_cells(cell))
# Add row separator tags between lines
tagged_rows = []
if rowsep:
#!py15
#tagged_rows = map(lambda x:x+rowsep, tagged_cells)
for cell in tagged_cells:
tagged_rows.append(cell+rowsep)
# Remove last rowsep, because the table is over
tagged_rows[-1] = tagged_rows[-1].replace(rowsep, '')
# Add row BEGIN/END tags for each line
else:
for rowdata in rows:
if rowdata['title']:
o,c = titrowopen, titrowclose
else:
o,c = rowopen, rowclose
row = tagged_cells.pop(0)
tagged_rows.append(o + row + c)
# Join the pieces together
fulltable = []
if open_: fulltable.append(open_)
fulltable.extend(tagged_rows)
if close: fulltable.append(close)
return fulltable
##############################################################################
class BlockMaster:
"TIP: use blockin/out to add/del holders"
def __init__(self):
self.BLK = []
self.HLD = []
self.PRP = []
self.depth = 0
self.count = 0
self.last = ''
self.tableparser = None
self.contains = {
'para' :['comment','raw','tagged'],
'verb' :[],
'table' :['comment'],
'raw' :[],
'tagged' :[],
'comment' :[],
'quote' :['quote','comment','raw','tagged'],
'list' :['list','numlist','deflist','para','verb','comment','raw','tagged'],
'numlist' :['list','numlist','deflist','para','verb','comment','raw','tagged'],
'deflist' :['list','numlist','deflist','para','verb','comment','raw','tagged'],
'bar' :[],
'title' :[],
'numtitle':[],
}
self.allblocks = list(self.contains.keys())
# If one is found inside another, ignore the marks
self.exclusive = ['comment','verb','raw','tagged']
# May we include bars inside quotes?
if rules['barinsidequote']:
self.contains['quote'].append('bar')
def block(self):
if not self.BLK: return ''
return self.BLK[-1]
def isblock(self, name=''):
return self.block() == name
def prop(self, key):
if not self.PRP: return ''
return self.PRP[-1].get(key) or ''
def propset(self, key, val):
self.PRP[-1][key] = val
#Debug('BLOCK prop ++: %s->%s'%(key,repr(val)), 1)
#Debug('BLOCK props: %s'%(repr(self.PRP)), 1)
def hold(self):
if not self.HLD: return []
return self.HLD[-1]
def holdadd(self, line):
if self.block().endswith('list'): line = [line]
self.HLD[-1].append(line)
Debug('HOLD add: %s'%repr(line), 4)
Debug('FULL HOLD: %s'%self.HLD, 4)
def holdaddsub(self, line):
self.HLD[-1][-1].append(line)
Debug('HOLD addsub: %s'%repr(line), 4)
Debug('FULL HOLD: %s'%self.HLD, 4)
def holdextend(self, lines):
if self.block().endswith('list'): lines = [lines]
self.HLD[-1].extend(lines)
Debug('HOLD extend: %s'%repr(lines), 4)
Debug('FULL HOLD: %s'%self.HLD, 4)
def blockin(self, block):
ret = []
if block not in self.allblocks:
Error("Invalid block '%s'"%block)
# First, let's close other possible open blocks
while self.block() and block not in self.contains[self.block()]:
ret.extend(self.blockout())
# Now we can gladly add this new one
self.BLK.append(block)
self.HLD.append([])
self.PRP.append({})
self.count += 1
if block == 'table': self.tableparser = TableMaster()
# Deeper and deeper
self.depth = len(self.BLK)
Debug('block ++ (%s): %s' % (block,self.BLK), 3)
return ret
def blockout(self):
global AA_COUNT
if not self.BLK: Error('No block to pop')
blockname = self.BLK.pop()
result = getattr(self, blockname)()
parsed = self.HLD.pop()
self.PRP.pop()
self.depth = len(self.BLK)
if blockname == 'table': del self.tableparser
# Inserting a nested block into mother
if self.block():
if blockname != 'comment': # ignore comment blocks
if self.block().endswith('list'):
self.HLD[-1][-1].append(result)
else:
self.HLD[-1].append(result)
# Reset now. Mother block will have it all
result = []
Debug('block -- (%s): %s' % (blockname,self.BLK), 3)
Debug('RELEASED (%s): %s' % (blockname,parsed), 3)
# Save this top level block name (produced output)
# The next block will use it
if result:
self.last = blockname
Debug('BLOCK: %s'%result, 6)
# ASCII Art processing
if TARGET == 'art' and CONF['slides'] and not CONF['toc-only'] and not CONF.get('art-no-title'):
n = (CONF['height'] - 1) - (AA_COUNT % (CONF['height'] - 1) + 1)
if n < len(result) and not (TITLE.level == 1 and blockname in ["title", "numtitle"]):
result = ([''] * n) + [aa_line(AA['bar1'], CONF['width'])] + aa_slide(AA_TITLE, CONF['width']) + [''] + result
if blockname in ["title", "numtitle"] and TITLE.level == 1:
aa_title = aa_slide(AA_TITLE, CONF['width']) + ['']
if AA_COUNT:
aa_title = ([''] * n) + [aa_line(AA['bar2'], CONF['width'])] + aa_title
result = aa_title + result
AA_COUNT += len(result)
return result
def _last_escapes(self, line):
return doFinalEscape(TARGET, line)
def _get_escaped_hold(self):
ret = []
for line in self.hold():
if isinstance(line, list):
ret.extend(line)
else:
ret.append(self._last_escapes(line))
return ret
def _remove_twoblanks(self, lastitem):
if len(lastitem) > 1 and lastitem[-2:] == ['','']:
return lastitem[:-2]
return lastitem
def _should_add_blank_line(self, where, blockname):
"Validates the blanksaround* rules"
# Nestable blocks: only mother blocks (level 1) are spaced
if blockname.endswith('list') and self.depth > 1:
return False
# The blank line after the block is always added
if where == 'after' \
and rules['blanksaround'+blockname]:
return True
# # No blank before if it's the first block of the body
# elif where == 'before' \
# and BLOCK.count == 1:
# return False
# # No blank before if it's the first block of this level (nested)
# elif where == 'before' \
# and self.count == 1:
# return False
# The blank line before the block is only added if
# the previous block haven't added a blank line
# (to avoid consecutive blanks)
elif where == 'before' \
and rules['blanksaround'+blockname] \
and not rules.get('blanksaround'+self.last):
return True
# Nested quotes are handled here,
# because the mother quote isn't closed yet
elif where == 'before' \
and blockname == 'quote' \
and rules['blanksaround'+blockname] \
and self.depth > 1:
return True
return False
def comment(self):
return ''
def raw(self):
lines = self.hold()
return [doEscape(TARGET, x) for x in lines]
def tagged(self):
return self.hold()
def para(self):
result = []
open_ = TAGS['paragraphOpen']
close = TAGS['paragraphClose']
lines = self._get_escaped_hold()
# Blank line before?
if self._should_add_blank_line('before', 'para'): result.append('')
# Open tag
if open_: result.append(open_)
# Pagemaker likes a paragraph as a single long line
if rules['onelinepara']:
result.append(' '.join(lines))
# Others are normal :)
else:
result.extend(lines)
# Close tag
if close: result.append(close)
# Blank line after?
if self._should_add_blank_line('after', 'para'): result.append('')
# Very very very very very very very very very UGLY fix
# Needed because <center> can't appear inside <p>
try:
if len(lines) == 1 and \
TARGET in ('html', 'xhtml') and \
re.match('^\s*<center>.*</center>\s*$', lines[0]):
result = [lines[0]]
except: pass
return result
def verb(self):
"Verbatim lines are not masked, so there's no need to unmask"
result = []
open_ = TAGS['blockVerbOpen']
close = TAGS['blockVerbClose']
# Blank line before?
if self._should_add_blank_line('before', 'verb'): result.append('')
# Open tag
if open_: result.append(open_)
# Get contents
for line in self.hold():
if self.prop('mapped') == 'table':
line = MacroMaster().expand(line)
if not rules['verbblocknotescaped']:
line = doEscape(TARGET,line)
if rules['indentverbblock']:
line = ' '+line
if rules['verbblockfinalescape']:
line = doFinalEscape(TARGET, line)
result.append(line)
# Close tag
if close: result.append(close)
# Blank line after?
if self._should_add_blank_line('after', 'verb'): result.append('')
return result
def numtitle(self): return self.title('numtitle')
def title(self, name='title'):
result = []
# Blank line before?
if self._should_add_blank_line('before', name): result.append('')
# Get contents
result.extend(TITLE.get())
# Blank line after?
if self._should_add_blank_line('after', name): result.append('')
return result
def table(self):
result = []
# Blank line before?
if self._should_add_blank_line('before', 'table'): result.append('')
# Rewrite all table cells by the unmasked and escaped data
lines = self._get_escaped_hold()
for i in range(len(lines)):
cells = lines[i].split(SEPARATOR)
self.tableparser.rows[i]['cells'] = cells
result.extend(self.tableparser.dump())
# Blank line after?
if self._should_add_blank_line('after', 'table'): result.append('')
return result
def quote(self):
result = []
open_ = TAGS['blockQuoteOpen'] # block based
close = TAGS['blockQuoteClose']
qline = TAGS['blockQuoteLine'] # line based
indent = tagindent = '\t'*self.depth
# Apply rules
if rules['tagnotindentable']: tagindent = ''
if not rules['keepquoteindent']: indent = ''
# Blank line before?
if self._should_add_blank_line('before', 'quote'): result.append('')
# Open tag
if open_: result.append(tagindent+open_)
# Get contents
for item in self.hold():
if type(item) == type([]):
result.extend(item) # subquotes
else:
item = regex['quote'].sub('', item) # del TABs
item = self._last_escapes(item)
item = qline*self.depth + item
result.append(indent+item) # quote line
# Close tag
if close: result.append(tagindent+close)
# Blank line after?
if self._should_add_blank_line('after', 'quote'): result.append('')
return result
def bar(self):
result = []
bar_tag = ''
# Blank line before?
if self._should_add_blank_line('before', 'bar'): result.append('')
# Get the original bar chars
bar_chars = self.hold()[0].strip()
# Set bar type
if bar_chars.startswith('='): bar_tag = TAGS['bar2']
else : bar_tag = TAGS['bar1']
# To avoid comment tag confusion like <!-- ------ --> (sgml)
if TAGS['comment'].count('--'):
bar_chars = bar_chars.replace('--', '__')
# Get the bar tag (may contain \a)
result.append(regex['x'].sub(bar_chars, bar_tag))
# Blank line after?
if self._should_add_blank_line('after', 'bar'): result.append('')
return result
def deflist(self): return self.list('deflist')
def numlist(self): return self.list('numlist')
def list(self, name='list'):
result = []
items = self.hold()
indent = self.prop('indent')
tagindent = indent
listline = TAGS.get(name+'ItemLine')
itemcount = 0
if name == 'deflist':
itemopen = TAGS[name+'Item1Open']
itemclose = TAGS[name+'Item2Close']
itemsep = TAGS[name+'Item1Close']+\
TAGS[name+'Item2Open']
else:
itemopen = TAGS[name+'ItemOpen']
itemclose = TAGS[name+'ItemClose']
itemsep = ''
# Apply rules
if rules['tagnotindentable']: tagindent = ''
if not rules['keeplistindent']: indent = tagindent = ''
# ItemLine: number of leading chars identifies list depth
if listline:
itemopen = listline*self.depth + itemopen
# Adds trailing space on opening tags
if (name == 'list' and rules['spacedlistitemopen']) or \
(name == 'numlist' and rules['spacednumlistitemopen']):
itemopen = itemopen + ' '
# Remove two-blanks from list ending mark, to avoid <p>
items[-1] = self._remove_twoblanks(items[-1])
# Blank line before?
if self._should_add_blank_line('before', name): result.append('')
# Tag each list item (multiline items), store in listbody
itemopenorig = itemopen
listbody = []
widelist = 0
for item in items:
# Add "manual" item count for noautonum targets
itemcount += 1
if name == 'numlist' and not rules['autonumberlist']:
n = str(itemcount)
itemopen = regex['x'].sub(n, itemopenorig)
del n
# Tag it
item[0] = self._last_escapes(item[0])
if name == 'deflist':
z,term,rest = item[0].split(SEPARATOR, 2)
item[0] = rest
if not item[0]: del item[0] # to avoid <p>
listbody.append(tagindent+itemopen+term+itemsep)
else:
fullitem = tagindent+itemopen
listbody.append(item[0].replace(SEPARATOR, fullitem))
del item[0]
# Process next lines for this item (if any)
for line in item:
if type(line) == type([]): # sublist inside
listbody.extend(line)
else:
line = self._last_escapes(line)
# Blank lines turns to <p>
if not line and rules['parainsidelist']:
line = indent + TAGS['paragraphOpen'] + TAGS['paragraphClose']
line = line.rstrip()
widelist = 1
# Some targets don't like identation here (wiki)
if not rules['keeplistindent'] or (name == 'deflist' and rules['deflisttextstrip']):
line = line.lstrip()
# Maybe we have a line prefix to add? (wiki)
if name == 'deflist' and TAGS['deflistItem2LinePrefix']:
line = TAGS['deflistItem2LinePrefix'] + line
listbody.append(line)
# Close item (if needed)
if itemclose: listbody.append(tagindent+itemclose)
if not widelist and rules['compactlist']:
listopen = TAGS.get(name+'OpenCompact')
listclose = TAGS.get(name+'CloseCompact')
else:
listopen = TAGS.get(name+'Open')
listclose = TAGS.get(name+'Close')
# Open list (not nestable lists are only opened at mother)
if listopen and not \
(rules['listnotnested'] and BLOCK.depth != 1):
result.append(tagindent+listopen)
result.extend(listbody)
# Close list (not nestable lists are only closed at mother)
if listclose and not \
(rules['listnotnested'] and self.depth != 1):
result.append(tagindent+listclose)
# Blank line after?
if self._should_add_blank_line('after', name): result.append('')
return result
##############################################################################
class MacroMaster:
def __init__(self, config={}):
self.name = ''
self.config = config or CONF
self.infile = self.config['sourcefile']
self.outfile = self.config['outfile']
self.currdate = time.localtime(time.time())
self.rgx = regex.get('macros') or getRegexes()['macros']
self.fileinfo = { 'infile': None, 'outfile': None }
self.dft_fmt = MACROS
def walk_file_format(self, fmt):
"Walks the %%{in/out}file format string, expanding the % flags"
i = 0; ret = '' # counter/hold
while i < len(fmt): # char by char
c = fmt[i]; i += 1
if c == '%': # hot char!
if i == len(fmt): # % at the end
ret = ret + c
break
c = fmt[i]; i += 1 # read next
ret = ret + self.expand_file_flag(c)
else:
ret = ret +c # common char
return ret
def expand_file_flag(self, flag):
"%f: filename %F: filename (w/o extension)"
"%d: dirname %D: dirname (only parent dir)"
"%p: file path %e: extension"
info = self.fileinfo[self.name] # get dict
if flag == '%': x = '%' # %% -> %
elif flag == 'f': x = info['name']
elif flag == 'F': x = re.sub('\.[^.]*$','',info['name'])
elif flag == 'd': x = info['dir']
elif flag == 'D': x = os.path.split(info['dir'])[-1]
elif flag == 'p': x = info['path']
elif flag == 'e': x = re.search('.(\.([^.]+))?$', info['name']).group(2) or ''
#TODO simpler way for %e ?
else : x = '%'+flag # false alarm
return x
def set_file_info(self, macroname):
if self.fileinfo.get(macroname): return # already done
file_ = getattr(self, self.name) # self.infile
if file_ == STDOUT or file_ == MODULEOUT:
dir_ = ''
path = name = file_
else:
path = os.path.abspath(file_)
dir_ = os.path.dirname(path)
name = os.path.basename(path)
self.fileinfo[macroname] = {'path':path,'dir':dir_,'name':name}
def expand(self, line=''):
"Expand all macros found on the line"
while self.rgx.search(line):
m = self.rgx.search(line)
name = self.name = m.group('name').lower()
fmt = m.group('fmt') or self.dft_fmt.get(name)
if name == 'date':
txt = time.strftime(fmt,self.currdate)
elif name == 'mtime':
if self.infile in (STDIN, MODULEIN):
fdate = self.currdate
else:
mtime = os.path.getmtime(self.infile)
fdate = time.localtime(mtime)
txt = time.strftime(fmt,fdate)
elif name == 'infile' or name == 'outfile':
self.set_file_info(name)
txt = self.walk_file_format(fmt)
else:
Error("Unknown macro name '%s'"%name)
line = self.rgx.sub(txt,line,1)
return line
##############################################################################
def listTargets():
"""list all available targets"""
targets = TARGETS
targets.sort()
for target in targets:
print("%s\t%s" % (target, TARGET_NAMES.get(target)))
def dumpConfig(source_raw, parsed_config):
onoff = {1:_('ON'), 0:_('OFF')}
data = [
(_('RC file') , RC_RAW ),
(_('source document'), source_raw ),
(_('command line') , CMDLINE_RAW)
]
# First show all RAW data found
for label, cfg in data:
print(_('RAW config for %s')%label)
for target,key,val in cfg:
target = '(%s)'%target
key = dotted_spaces("%-14s"%key)
val = val or _('ON')
print(' %-8s %s: %s'%(target,key,val))
print()
# Then the parsed results of all of them
print(_('Full PARSED config'))
keys = list(parsed_config.keys()) ; keys.sort() # sorted
for key in keys:
val = parsed_config[key]
# Filters are the last
if key == 'preproc' or key == 'postproc':
continue
# Flag beautifier
if key in list(FLAGS.keys()) or key in list(ACTIONS.keys()):
val = onoff.get(val) or val
# List beautifier
if type(val) == type([]):
if key == 'options': sep = ' '
else : sep = ', '
val = sep.join(val)
print("%25s: %s"%(dotted_spaces("%-14s"%key),val))
print()
print(_('Active filters'))
for filter_ in ['preproc', 'postproc']:
for rule in parsed_config.get(filter_) or []:
print("%25s: %s -> %s" % (
dotted_spaces("%-14s"%filter_), rule[0], rule[1]))
def get_file_body(file_):
"Returns all the document BODY lines"
return process_source_file(file_, noconf=1)[1][2]
def finish_him(outlist, config):
"Writing output to screen or file"
outfile = config['outfile']
outlist = unmaskEscapeChar(outlist)
outlist = expandLineBreaks(outlist)
# Apply PostProc filters
if config['postproc']:
filters = compile_filters(config['postproc'],
_('Invalid PostProc filter regex'))
postoutlist = []
errmsg = _('Invalid PostProc filter replacement')
for line in outlist:
for rgx,repl in filters:
try: line = rgx.sub(repl, line)
except: Error("%s: '%s'"%(errmsg, repl))
postoutlist.append(line)
outlist = postoutlist[:]
if outfile == MODULEOUT:
return outlist
elif outfile == STDOUT:
if GUI:
return outlist, config
else:
for line in outlist: print(line)
else:
Savefile(outfile, addLineBreaks(outlist))
if not GUI and not QUIET:
print(_('%s wrote %s')%(my_name,outfile))
if config['split']:
if not QUIET: print("--- html...")
sgml2html = 'sgml2html -s %s -l %s %s' % (
config['split'], config['lang'] or lang, outfile)
if not QUIET: print("Running system command:", sgml2html)
os.system(sgml2html)
def toc_inside_body(body, toc, config):
ret = []
if AUTOTOC: return body # nothing to expand
toc_mark = MaskMaster().tocmask
# Expand toc mark with TOC contents
for line in body:
if line.count(toc_mark): # toc mark found
if config['toc']:
ret.extend(toc) # include if --toc
else:
pass # or remove %%toc line
else:
ret.append(line) # common line
return ret
def toc_tagger(toc, config):
"Returns the tagged TOC, as a single tag or a tagged list"
ret = []
# Convert the TOC list (t2t-marked) to the target's list format
if config['toc-only'] or (config['toc'] and not TAGS['TOC']):
fakeconf = config.copy()
fakeconf['headers'] = 0
fakeconf['toc-only'] = 0
fakeconf['mask-email'] = 0
fakeconf['preproc'] = []
fakeconf['postproc'] = []
fakeconf['css-sugar'] = 0
fakeconf['art-no-title'] = 1 # needed for --toc and --slides together, avoids slide title before TOC
ret,foo = convert(toc, fakeconf)
set_global_config(config) # restore config
# Our TOC list is not needed, the target already knows how to do a TOC
elif config['toc'] and TAGS['TOC']:
ret = [TAGS['TOC']]
return ret
def toc_formatter(toc, config):
"Formats TOC for automatic placement between headers and body"
if config['toc-only']: return toc # no formatting needed
if not config['toc'] : return [] # TOC disabled
ret = toc
# Art: An automatic "Table of Contents" header is added to the TOC slide
if config['target'] == 'art' and config['slides']:
n = (config['height'] - 1) - (len(toc) + 6) % (config['height'] - 1)
toc = aa_slide(_("Table of Contents"), config['width']) + toc + ([''] * n)
toc.append(aa_line(AA['bar2'], config['width']))
return toc
# TOC open/close tags (if any)
if TAGS['tocOpen' ]: ret.insert(0, TAGS['tocOpen'])
if TAGS['tocClose']: ret.append(TAGS['tocClose'])
# Autotoc specific formatting
if AUTOTOC:
if rules['autotocwithbars']: # TOC between bars
para = TAGS['paragraphOpen']+TAGS['paragraphClose']
bar = regex['x'].sub('-' * DFT_TEXT_WIDTH, TAGS['bar1'])
tocbar = [para, bar, para]
if config['target'] == 'art' and config['headers']:
# exception: header already printed a bar
ret = [para] + ret + tocbar
else:
ret = tocbar + ret + tocbar
if rules['blankendautotoc']: # blank line after TOC
ret.append('')
if rules['autotocnewpagebefore']: # page break before TOC
ret.insert(0,TAGS['pageBreak'])
if rules['autotocnewpageafter']: # page break after TOC
ret.append(TAGS['pageBreak'])
return ret
def doHeader(headers, config):
if not config['headers']: return []
if not headers: headers = ['','','']
target = config['target']
if target not in HEADER_TEMPLATE:
Error("doHeader: Unknown target '%s'"%target)
if target in ('html','xhtml') and config.get('css-sugar'):
template = HEADER_TEMPLATE[target+'css'].split('\n')
else:
template = HEADER_TEMPLATE[target].split('\n')
head_data = {'STYLE':[], 'ENCODING':''}
for key in head_data.keys():
val = config.get(key.lower())
# Remove .sty extension from each style filename (freaking tex)
# XXX Can't handle --style foo.sty,bar.sty
if target == 'tex' and key == 'STYLE':
val = [re.sub('(?i)\.sty$','',x) for x in val]
if key == 'ENCODING':
val = get_encoding_string(val, target)
head_data[key] = val
# Parse header contents
for i in 0,1,2:
# Expand macros
contents = MacroMaster(config=config).expand(headers[i])
# Escapes - on tex, just do it if any \tag{} present
if target != 'tex' or \
(target == 'tex' and re.search(r'\\\w+{', contents)):
contents = doEscape(target, contents)
if target == 'lout':
contents = doFinalEscape(target, contents)
head_data['HEADER%d'%(i+1)] = contents
# css-inside removes STYLE line
#XXX In tex, this also removes the modules call (%!style:amsfonts)
if target in ('html','xhtml') and config.get('css-inside') and \
config.get('style'):
head_data['STYLE'] = []
Debug("Header Data: %s"%head_data, 1)
# ASCII Art does not use a header template, aa_header() formats the header
if target == 'art':
n_h = len([v for v in head_data if v.startswith("HEADER") and head_data[v]])
if not n_h :
return []
if config['slides']:
x = config['height'] - 3 - (n_h * 3)
n = x / (n_h + 1)
end = x % (n_h + 1)
template = aa_header(head_data, config['width'], n, end)
else:
template = [''] + aa_header(head_data, config['width'], 2, 0)
# Header done, let's get out
return template
# Scan for empty dictionary keys
# If found, scan template lines for that key reference
# If found, remove the reference
# If there isn't any other key reference on the same line, remove it
#TODO loop by template line > key
for key in head_data.keys():
if head_data.get(key): continue
for line in template:
if line.count('%%(%s)s'%key):
sline = line.replace('%%(%s)s'%key, '')
if not re.search(r'%\([A-Z0-9]+\)s', sline):
template.remove(line)
# Style is a multiple tag.
# - If none or just one, use default template
# - If two or more, insert extra lines in a loop (and remove original)
styles = head_data['STYLE']
if len(styles) == 1:
head_data['STYLE'] = styles[0]
elif len(styles) > 1:
style_mark = '%(STYLE)s'
for i in range(len(template)):
if template[i].count(style_mark):
while styles:
template.insert(i+1, template[i].replace(style_mark, styles.pop()))
del template[i]
break
# Populate template with data (dict expansion)
template = '\n'.join(template) % head_data
# Adding CSS contents into template (for --css-inside)
# This code sux. Dirty++
if target in ('html','xhtml') and config.get('css-inside') and \
config.get('style'):
set_global_config(config) # usually on convert(), needed here
for i in range(len(config['style'])):
cssfile = config['style'][i]
if not os.path.isabs(cssfile):
infile = config.get('sourcefile')
cssfile = os.path.join(
os.path.dirname(infile), cssfile)
try:
contents = Readfile(cssfile, 1)
css = "\n%s\n%s\n%s\n%s\n" % (
doCommentLine("Included %s" % cssfile),
TAGS['cssOpen'],
'\n'.join(contents),
TAGS['cssClose'])
# Style now is content, needs escaping (tex)
#css = maskEscapeChar(css)
except:
errmsg = "CSS include failed for %s" % cssfile
css = "\n%s\n" % (doCommentLine(errmsg))
# Insert this CSS file contents on the template
template = re.sub('(?i)(</HEAD>)', css+r'\1', template)
# template = re.sub(r'(?i)(\\begin{document})',
# css+'\n'+r'\1', template) # tex
# The last blank line to keep everything separated
template = re.sub('(?i)(</HEAD>)', '\n'+r'\1', template)
return template.split('\n')
def doCommentLine(txt):
# The -- string ends a (h|sg|xht)ml comment :(
txt = maskEscapeChar(txt)
if TAGS['comment'].count('--') and txt.count('--'):
txt = re.sub('-(?=-)', r'-\\', txt)
if TAGS['comment']:
return regex['x'].sub(txt, TAGS['comment'])
return ''
def doFooter(config):
ret = []
# No footer. The --no-headers option hides header AND footer
if not config['headers']:
return []
# Only add blank line before footer if last block doesn't added by itself
if not rules.get('blanksaround'+BLOCK.last):
ret.append('')
# Add txt2tags info at footer, if target supports comments
if TAGS['comment']:
# Not using TARGET_NAMES because it's i18n'ed.
# It's best to always present this info in english.
target = config['target']
if config['target'] == 'tex':
target = 'LaTeX2e'
t2t_version = '%s code generated by %s %s (%s)' % (target, my_name, my_version, my_url)
cmdline = 'cmdline: %s %s' % (my_name, ' '.join(config['realcmdline']))
ret.append(doCommentLine(t2t_version))
ret.append(doCommentLine(cmdline))
# Maybe we have a specific tag to close the document?
if TAGS['EOD']:
ret.append(TAGS['EOD'])
return ret
def doEscape(target,txt):
"Target-specific special escapes. Apply *before* insert any tag."
tmpmask = 'vvvvThisEscapingSuxvvvv'
if target in ('html','sgml','xhtml','dbk'):
txt = re.sub('&','&',txt)
txt = re.sub('<','<',txt)
txt = re.sub('>','>',txt)
if target == 'sgml':
txt = re.sub('\xff','ÿ',txt) # "+y
elif target == 'pm6':
txt = re.sub('<','<\#60>',txt)
elif target == 'mgp':
txt = re.sub('^%',' %',txt) # add leading blank to avoid parse
elif target == 'man':
txt = re.sub("^([.'])", '\\&\\1',txt) # command ID
txt = txt.replace(ESCCHAR, ESCCHAR+'e') # \e
elif target == 'lout':
# TIP: / moved to FinalEscape to avoid //italic//
# TIP: these are also converted by lout: ... --- --
txt = txt.replace(ESCCHAR, tmpmask) # \
txt = txt.replace('"', '"%s""'%ESCCHAR) # "\""
txt = re.sub('([|&{}@#^~])', '"\\1"', txt) # "@"
txt = txt.replace(tmpmask, '"%s"'%(ESCCHAR*2)) # "\\"
elif target == 'tex':
# Mark literal \ to be changed to $\backslash$ later
txt = txt.replace(ESCCHAR, tmpmask)
txt = re.sub('([#$&%{}])', ESCCHAR+r'\1' , txt) # \%
txt = re.sub('([~^])' , ESCCHAR+r'\1{}', txt) # \~{}
txt = re.sub('([<|>])' , r'$\1$', txt) # $>$
txt = txt.replace(tmpmask, maskEscapeChar(r'$\backslash$'))
# TIP the _ is escaped at the end
return txt
# TODO man: where - really needs to be escaped?
def doFinalEscape(target, txt):
"Last escapes of each line"
if target == 'pm6' : txt = txt.replace(ESCCHAR+'<', r'<\#92><')
elif target == 'man' : txt = txt.replace('-', r'\-')
elif target == 'sgml': txt = txt.replace('[', '[')
elif target == 'lout': txt = txt.replace('/', '"/"')
elif target == 'tex' :
txt = txt.replace('_', r'\_')
txt = txt.replace('vvvvTexUndervvvv', '_') # shame!
## JS
txt = txt.replace('vvvUnderscoreInRawTextvvv', '_')
txt = txt.replace('vvvUnderscoreInTaggedTextvvv', '_')
return txt
def EscapeCharHandler(action, data):
"Mask/Unmask the Escape Char on the given string"
if not data.strip(): return data
if action not in ('mask','unmask'):
Error("EscapeCharHandler: Invalid action '%s'"%action)
if action == 'mask': return data.replace('\\', ESCCHAR)
else: return data.replace(ESCCHAR, '\\')
def maskEscapeChar(data):
"Replace any Escape Char \ with a text mask (Input: str or list)"
if type(data) == type([]):
return [EscapeCharHandler('mask', x) for x in data]
return EscapeCharHandler('mask',data)
def unmaskEscapeChar(data):
"Undo the Escape char \ masking (Input: str or list)"
if type(data) == type([]):
return [EscapeCharHandler('unmask', x) for x in data]
return EscapeCharHandler('unmask',data)
def addLineBreaks(mylist):
"use LB to respect sys.platform"
ret = []
for line in mylist:
line = line.replace('\n', LB) # embedded \n's
ret.append(line+LB) # add final line break
return ret
# Convert ['foo\nbar'] to ['foo', 'bar']
def expandLineBreaks(mylist):
ret = []
for line in mylist:
ret.extend(line.split('\n'))
return ret
def compile_filters(filters, errmsg='Filter'):
if filters:
for i in range(len(filters)):
patt,repl = filters[i]
try: rgx = re.compile(patt)
except: Error("%s: '%s'"%(errmsg, patt))
filters[i] = (rgx,repl)
return filters
def enclose_me(tagname, txt):
return TAGS.get(tagname+'Open') + txt + TAGS.get(tagname+'Close')
def beautify_me(name, font, line):
"where name is: bold, italic, underline or strike"
# Exception: Doesn't parse an horizontal bar as strike
if name == 'strike' and regex['bar'].search(line): return line
open_ = TAGS['%sOpen' % font]
close = TAGS['%sClose' % font]
txt = r'%s\1%s'%(open_, close)
line = regex[font].sub(txt, line)
return line
def get_tagged_link(label, url):
ret = ''
target = CONF['target']
image_re = regex['img']
# Set link type
if regex['email'].match(url):
linktype = 'email'
else:
linktype = 'url';
# Escape specials from TEXT parts
label = doEscape(target,label)
# Escape specials from link URL
if not rules['linkable'] or rules['escapeurl']:
url = doEscape(target, url)
# Adding protocol to guessed link
guessurl = ''
if linktype == 'url' and \
re.match('(?i)'+regex['_urlskel']['guess'], url):
if url[0] in 'Ww': guessurl = 'http://' +url
else : guessurl = 'ftp://' +url
# Not link aware targets -> protocol is useless
if not rules['linkable']: guessurl = ''
# Simple link (not guessed)
if not label and not guessurl:
if CONF['mask-email'] and linktype == 'email':
# Do the email mask feature (no TAGs, just text)
url = url.replace('@', ' (a) ')
url = url.replace('.', ' ')
url = "<%s>" % url
if rules['linkable']: url = doEscape(target, url)
ret = url
else:
# Just add link data to tag
tag = TAGS[linktype]
ret = regex['x'].sub(url,tag)
# Named link or guessed simple link
else:
# Adjusts for guessed link
if not label: label = url # no protocol
if guessurl : url = guessurl # with protocol
# Image inside link!
if image_re.match(label):
if rules['imglinkable']: # get image tag
label = parse_images(label)
else: # img@link !supported
label = "(%s)"%image_re.match(label).group(1)
# Putting data on the right appearance order
if rules['labelbeforelink'] or not rules['linkable']:
urlorder = [label, url] # label before link
else:
urlorder = [url, label] # link before label
# Add link data to tag (replace \a's)
ret = TAGS["%sMark"%linktype]
for data in urlorder:
ret = regex['x'].sub(data,ret,1)
return ret
def parse_deflist_term(line):
"Extract and parse definition list term contents"
img_re = regex['img']
term = regex['deflist'].search(line).group(3)
# Mask image inside term as (image.jpg), where not supported
if not rules['imgasdefterm'] and img_re.search(term):
while img_re.search(term):
imgfile = img_re.search(term).group(1)
term = img_re.sub('(%s)'%imgfile, term, 1)
#TODO tex: escape ] on term. \], \rbrack{} and \verb!]! don't work :(
return term
def get_image_align(line):
"Return the image (first found) align for the given line"
# First clear marks that can mess align detection
line = re.sub(SEPARATOR+'$', '', line) # remove deflist sep
line = re.sub('^'+SEPARATOR, '', line) # remove list sep
line = re.sub('^[\t]+' , '', line) # remove quote mark
# Get image position on the line
m = regex['img'].search(line)
ini = m.start() ; head = 0
end = m.end() ; tail = len(line)
# The align detection algorithm
if ini == head and end != tail: align = 'left' # ^img + text$
elif ini != head and end == tail: align = 'right' # ^text + img$
else : align = 'center' # default align
# Some special cases
if BLOCK.isblock('table'): align = 'center' # ignore when table
# if TARGET == 'mgp' and align == 'center': align = 'center'
return align
# Reference: http://www.iana.org/assignments/character-sets
# http://www.drclue.net/F1.cgi/HTML/META/META.html
def get_encoding_string(enc, target):
if not enc: return ''
# Target specific translation table
translate = {
'tex': {
# missing: ansinew , applemac , cp437 , cp437de , cp865
'utf-8' : 'utf8',
'us-ascii' : 'ascii',
'windows-1250': 'cp1250',
'windows-1252': 'cp1252',
'ibm850' : 'cp850',
'ibm852' : 'cp852',
'iso-8859-1' : 'latin1',
'iso-8859-2' : 'latin2',
'iso-8859-3' : 'latin3',
'iso-8859-4' : 'latin4',
'iso-8859-5' : 'latin5',
'iso-8859-9' : 'latin9',
'koi8-r' : 'koi8-r'
}
}
# Normalization
enc = re.sub('(?i)(us[-_]?)?ascii|us|ibm367','us-ascii' , enc)
enc = re.sub('(?i)(ibm|cp)?85([02])' ,'ibm85\\2' , enc)
enc = re.sub('(?i)(iso[_-]?)?8859[_-]?' ,'iso-8859-' , enc)
enc = re.sub('iso-8859-($|[^1-9]).*' ,'iso-8859-1', enc)
# Apply translation table
try: enc = translate[target][enc.lower()]
except: pass
return enc
##############################################################################
##MerryChristmas,IdontwanttofighttonightwithyouImissyourbodyandIneedyourlove##
##############################################################################
def process_source_file(file_='', noconf=0, contents=[]):
"""
Find and Join all the configuration available for a source file.
No sanity checking is done on this step.
It also extracts the source document parts into separate holders.
The config scan order is:
1. The user configuration file (i.e. $HOME/.txt2tagsrc)
2. The source document's CONF area
3. The command line options
The return data is a tuple of two items:
1. The parsed config dictionary
2. The document's parts, as a (head, conf, body) tuple
All the conversion process will be based on the data and
configuration returned by this function.
The source files is read on this step only.
"""
if contents:
source = SourceDocument(contents=contents)
else:
source = SourceDocument(file_)
head, conf, body = source.split()
Message(_("Source document contents stored"),2)
if not noconf:
# Read document config
source_raw = source.get_raw_config()
# Join all the config directives found, then parse it
full_raw = RC_RAW + source_raw + CMDLINE_RAW
Message(_("Parsing and saving all config found (%03d items)") % (len(full_raw)), 1)
full_parsed = ConfigMaster(full_raw).parse()
# Add manually the filename to the conf dic
if contents:
full_parsed['sourcefile'] = MODULEIN
full_parsed['infile'] = MODULEIN
full_parsed['outfile'] = MODULEOUT
else:
full_parsed['sourcefile'] = file_
# Maybe should we dump the config found?
if full_parsed.get('dump-config'):
dumpConfig(source_raw, full_parsed)
Quit()
# The user just want to know a single config value (hidden feature)
#TODO pick a better name than --show-config-value
elif full_parsed.get('show-config-value'):
config_value = full_parsed.get(full_parsed['show-config-value'])
if config_value:
if type(config_value) == type([]):
print('\n'.join(config_value))
else:
print(config_value)
Quit()
# Okay, all done
Debug("FULL config for this file: %s"%full_parsed, 1)
else:
full_parsed = {}
return full_parsed, (head,conf,body)
def get_infiles_config(infiles):
"""
Find and Join into a single list, all configuration available
for each input file. This function is supposed to be the very
first one to be called, before any processing.
"""
return list(map(process_source_file, infiles))
def convert_this_files(configs):
global CONF
for myconf,doc in configs: # multifile support
target_head = []
target_toc = []
target_body = []
target_foot = []
source_head, source_conf, source_body = doc
myconf = ConfigMaster().sanity(myconf)
# Compose the target file Headers
#TODO escape line before?
#TODO see exceptions by tex and mgp
Message(_("Composing target Headers"),1)
target_head = doHeader(source_head, myconf)
# Parse the full marked body into tagged target
first_body_line = (len(source_head) or 1)+ len(source_conf) + 1
Message(_("Composing target Body"),1)
target_body, marked_toc = convert(source_body, myconf, firstlinenr=first_body_line)
# If dump-source, we're done
if myconf['dump-source']:
for line in source_head+source_conf+target_body:
print(line)
return
# Close the last slide
if myconf['slides'] and not myconf['toc-only'] and myconf['target'] == 'art':
n = (myconf['height'] - 1) - (AA_COUNT % (myconf['height'] - 1) + 1)
target_body = target_body + ([''] * n) + [aa_line(AA['bar2'], myconf['width'])]
# Compose the target file Footer
Message(_("Composing target Footer"),1)
target_foot = doFooter(myconf)
# Make TOC (if needed)
Message(_("Composing target TOC"),1)
tagged_toc = toc_tagger(marked_toc, myconf)
target_toc = toc_formatter(tagged_toc, myconf)
target_body = toc_inside_body(target_body, target_toc, myconf)
if not AUTOTOC and not myconf['toc-only']: target_toc = []
# Finally, we have our document
outlist = target_head + target_toc + target_body + target_foot
# If on GUI, abort before finish_him
# If module, return finish_him as list
# Else, write results to file or STDOUT
if GUI:
return outlist, myconf
elif myconf.get('outfile') == MODULEOUT:
return finish_him(outlist, myconf), myconf
else:
Message(_("Saving results to the output file"),1)
finish_him(outlist, myconf)
def parse_images(line):
"Tag all images found"
while regex['img'].search(line) and TAGS['img'] != '[\a]':
txt = regex['img'].search(line).group(1)
tag = TAGS['img']
# If target supports image alignment, here we go
if rules['imgalignable']:
align = get_image_align(line) # right
align_name = align.capitalize() # Right
# The align is a full tag, or part of the image tag (~A~)
if TAGS['imgAlign'+align_name]:
tag = TAGS['imgAlign'+align_name]
else:
align_tag = TAGS['_imgAlign'+align_name]
tag = regex['_imgAlign'].sub(align_tag, tag, 1)
# Dirty fix to allow centered solo images
if align == 'center' and TARGET in ('html','xhtml'):
rest = regex['img'].sub('',line,1)
if re.match('^\s+$', rest):
tag = "<center>%s</center>" %tag
if TARGET == 'tex':
tag = re.sub(r'\\b',r'\\\\b',tag)
txt = txt.replace('_', 'vvvvTexUndervvvv')
# Ugly hack to avoid infinite loop when target's image tag contains []
tag = tag.replace('[', 'vvvvEscapeSquareBracketvvvv')
line = regex['img'].sub(tag,line,1)
line = regex['x'].sub(txt,line,1)
return line.replace('vvvvEscapeSquareBracketvvvv','[')
def add_inline_tags(line):
# Beautifiers
for beauti, font in [
('bold', 'fontBold'), ('italic', 'fontItalic'),
('underline', 'fontUnderline'), ('strike', 'fontStrike')]:
if regex[font].search(line):
line = beautify_me(beauti, font, line)
line = parse_images(line)
return line
def get_include_contents(file_, path=''):
"Parses %!include: value and extract file contents"
ids = {'`':'verb', '"':'raw', "'":'tagged' }
id_ = 't2t'
# Set include type and remove identifier marks
mark = file_[0]
if mark in ids.keys():
if file_[:2] == file_[-2:] == mark*2:
id_ = ids[mark] # set type
file_ = file_[2:-2] # remove marks
# Handle remote dir execution
filepath = os.path.join(path, file_)
# Read included file contents
lines = Readfile(filepath, remove_linebreaks=1)
# Default txt2tags marked text, just BODY matters
if id_ == 't2t':
lines = get_file_body(filepath)
#TODO fix images relative path if file has a path, ie.: chapter1/index.t2t (wait until tree parsing)
#TODO for the images path fix, also respect outfile path, if different from infile (wait until tree parsing)
lines.insert(0, '%%INCLUDED(%s) starts here: %s'%(id_,file_))
# This appears when included hit EOF with verbatim area open
#lines.append('%%INCLUDED(%s) ends here: %s'%(id_,file_))
return id_, lines
def set_global_config(config):
global CONF, TAGS, regex, rules, TARGET
CONF = config
rules = getRules(CONF)
TAGS = getTags(CONF)
regex = getRegexes()
TARGET = config['target'] # save for buggy functions that need global
def convert(bodylines, config, firstlinenr=1):
global BLOCK, TITLE
set_global_config(config)
target = config['target']
BLOCK = BlockMaster()
MASK = MaskMaster()
TITLE = TitleMaster()
ret = []
dump_source = []
f_lastwasblank = 0
# Compiling all PreProc regexes
pre_filter = compile_filters(
CONF['preproc'], _('Invalid PreProc filter regex'))
# Let's mark it up!
linenr = firstlinenr-1
lineref = 0
while lineref < len(bodylines):
# Defaults
MASK.reset()
results_box = ''
untouchedline = bodylines[lineref]
dump_source.append(untouchedline)
line = re.sub('[\n\r]+$','',untouchedline) # del line break
# Apply PreProc filters
if pre_filter:
errmsg = _('Invalid PreProc filter replacement')
for rgx,repl in pre_filter:
try: line = rgx.sub(repl, line)
except: Error("%s: '%s'"%(errmsg, repl))
line = maskEscapeChar(line) # protect \ char
linenr += 1
lineref += 1
Debug(repr(line), 2, linenr) # heavy debug: show each line
#------------------[ Comment Block ]------------------------
# We're already on a comment block
if BLOCK.block() == 'comment':
# Closing comment
if regex['blockCommentClose'].search(line):
ret.extend(BLOCK.blockout() or [])
continue
# Normal comment-inside line. Ignore it.
continue
# Detecting comment block init
if regex['blockCommentOpen'].search(line) \
and BLOCK.block() not in BLOCK.exclusive:
ret.extend(BLOCK.blockin('comment'))
continue
#-------------------------[ Tagged Text ]----------------------
# We're already on a tagged block
if BLOCK.block() == 'tagged':
# Closing tagged
if regex['blockTaggedClose'].search(line):
ret.extend(BLOCK.blockout())
continue
# Normal tagged-inside line
BLOCK.holdadd(line)
continue
# Detecting tagged block init
if regex['blockTaggedOpen'].search(line) \
and BLOCK.block() not in BLOCK.exclusive:
ret.extend(BLOCK.blockin('tagged'))
continue
# One line tagged text
if regex['1lineTagged'].search(line) \
and BLOCK.block() not in BLOCK.exclusive:
ret.extend(BLOCK.blockin('tagged'))
line = regex['1lineTagged'].sub('',line)
BLOCK.holdadd(line)
ret.extend(BLOCK.blockout())
continue
#-------------------------[ Raw Text ]----------------------
# We're already on a raw block
if BLOCK.block() == 'raw':
# Closing raw
if regex['blockRawClose'].search(line):
ret.extend(BLOCK.blockout())
continue
# Normal raw-inside line
BLOCK.holdadd(line)
continue
# Detecting raw block init
if regex['blockRawOpen'].search(line) \
and BLOCK.block() not in BLOCK.exclusive:
ret.extend(BLOCK.blockin('raw'))
continue
# One line raw text
if regex['1lineRaw'].search(line) \
and BLOCK.block() not in BLOCK.exclusive:
ret.extend(BLOCK.blockin('raw'))
line = regex['1lineRaw'].sub('',line)
BLOCK.holdadd(line)
ret.extend(BLOCK.blockout())
continue
#------------------------[ Verbatim ]----------------------
#TIP We'll never support beautifiers inside verbatim
# Closing table mapped to verb
if BLOCK.block() == 'verb' \
and BLOCK.prop('mapped') == 'table' \
and not regex['table'].search(line):
ret.extend(BLOCK.blockout())
# We're already on a verb block
if BLOCK.block() == 'verb':
# Closing verb
if regex['blockVerbClose'].search(line):
ret.extend(BLOCK.blockout())
continue
# Normal verb-inside line
BLOCK.holdadd(line)
continue
# Detecting verb block init
if regex['blockVerbOpen'].search(line) \
and BLOCK.block() not in BLOCK.exclusive:
ret.extend(BLOCK.blockin('verb'))
f_lastwasblank = 0
continue
# One line verb-formatted text
if regex['1lineVerb'].search(line) \
and BLOCK.block() not in BLOCK.exclusive:
ret.extend(BLOCK.blockin('verb'))
line = regex['1lineVerb'].sub('',line)
BLOCK.holdadd(line)
ret.extend(BLOCK.blockout())
f_lastwasblank = 0
continue
# Tables are mapped to verb when target is not table-aware
if not rules['tableable'] and regex['table'].search(line):
if not BLOCK.isblock('verb'):
ret.extend(BLOCK.blockin('verb'))
BLOCK.propset('mapped', 'table')
BLOCK.holdadd(line)
continue
#---------------------[ blank lines ]-----------------------
if regex['blankline'].search(line):
# Close open paragraph
if BLOCK.isblock('para'):
ret.extend(BLOCK.blockout())
f_lastwasblank = 1
continue
# Close all open tables
if BLOCK.isblock('table'):
ret.extend(BLOCK.blockout())
f_lastwasblank = 1
continue
# Close all open quotes
while BLOCK.isblock('quote'):
ret.extend(BLOCK.blockout())
# Closing all open lists
if f_lastwasblank: # 2nd consecutive blank
if BLOCK.block().endswith('list'):
BLOCK.holdaddsub('') # helps parser
while BLOCK.depth: # closes list (if any)
ret.extend(BLOCK.blockout())
continue # ignore consecutive blanks
# Paragraph (if any) is wanted inside lists also
if BLOCK.block().endswith('list'):
BLOCK.holdaddsub('')
f_lastwasblank = 1
continue
#---------------------[ special ]---------------------------
if regex['special'].search(line):
targ, key, val = ConfigLines().parse_line(line, None, target)
if key:
Debug("Found config '%s', value '%s'" % (key, val), 1, linenr)
else:
Debug('Bogus Special Line', 1, linenr)
# %!include command
if key == 'include':
incpath = os.path.dirname(CONF['sourcefile'])
incfile = val
err = _('A file cannot include itself (loop!)')
if CONF['sourcefile'] == incfile:
Error("%s: %s"%(err,incfile))
inctype, inclines = get_include_contents(incfile, incpath)
# Verb, raw and tagged are easy
if inctype != 't2t':
ret.extend(BLOCK.blockin(inctype))
BLOCK.holdextend(inclines)
ret.extend(BLOCK.blockout())
else:
# Insert include lines into body
#TODO include maxdepth limit
bodylines = bodylines[:lineref] + inclines + bodylines[lineref:]
#TODO fix path if include@include
# Remove %!include call
if CONF['dump-source']:
dump_source.pop()
# This line is done, go to next
continue
# %!csv command
elif key == 'csv':
if not csv:
Error("Python module 'csv' not found, but needed for %!csv")
table = []
filename = val
reader = csv.reader(Readfile(filename))
# Convert each CSV line to a txt2tags' table line
# foo,bar,baz -> | foo | bar | baz |
try:
for row in reader:
table.append('| %s |' % ' | '.join(row))
except csv.Error as e:
Error('CSV: file %s: %s' % (filename, e))
# Parse and convert the new table
# Note: cell contents is raw, no t2t marks are parsed
if rules['tableable']:
ret.extend(BLOCK.blockin('table'))
if table:
BLOCK.tableparser.__init__(table[0])
for row in table:
tablerow = TableMaster().parse_row(row)
BLOCK.tableparser.add_row(tablerow)
# Very ugly, but necessary for escapes
line = SEPARATOR.join(tablerow['cells'])
BLOCK.holdadd(doEscape(target, line))
ret.extend(BLOCK.blockout())
# Tables are mapped to verb when target is not table-aware
else:
if target == 'art' and table:
table = aa_table(table)
ret.extend(BLOCK.blockin('verb'))
BLOCK.propset('mapped', 'table')
for row in table:
BLOCK.holdadd(row)
ret.extend(BLOCK.blockout())
# This line is done, go to next
continue
#---------------------[ dump-source ]-----------------------
# We don't need to go any further
if CONF['dump-source']:
continue
#---------------------[ Comments ]--------------------------
# Just skip them (if not macro)
if regex['comment'].search(line) and not \
regex['macros'].match(line) and not \
regex['toc'].match(line):
continue
#---------------------[ Triggers ]--------------------------
# Valid line, reset blank status
f_lastwasblank = 0
# Any NOT quote line closes all open quotes
if BLOCK.isblock('quote') and not regex['quote'].search(line):
while BLOCK.isblock('quote'):
ret.extend(BLOCK.blockout())
# Any NOT table line closes an open table
if BLOCK.isblock('table') and not regex['table'].search(line):
ret.extend(BLOCK.blockout())
#---------------------[ Horizontal Bar ]--------------------
if regex['bar'].search(line):
# Bars inside quotes are handled on the Quote processing
# Otherwise we parse the bars right here
#
if not (BLOCK.isblock('quote') or regex['quote'].search(line)) \
or (BLOCK.isblock('quote') and not rules['barinsidequote']):
# Close all the opened blocks
ret.extend(BLOCK.blockin('bar'))
# Extract the bar chars (- or =)
m = regex['bar'].search(line)
bar_chars = m.group(2)
# Process and dump the tagged bar
BLOCK.holdadd(bar_chars)
ret.extend(BLOCK.blockout())
Debug("BAR: %s"%line, 6)
# We're done, nothing more to process
continue
#---------------------[ Title ]-----------------------------
if (regex['title'].search(line) or regex['numtitle'].search(line)) \
and not BLOCK.block().endswith('list'):
if regex['title'].search(line):
name = 'title'
else:
name = 'numtitle'
# Close all the opened blocks
ret.extend(BLOCK.blockin(name))
# Process title
TITLE.add(line)
ret.extend(BLOCK.blockout())
# We're done, nothing more to process
continue
#---------------------[ %%toc ]-----------------------
# %%toc line closes paragraph
if BLOCK.block() == 'para' and regex['toc'].search(line):
ret.extend(BLOCK.blockout())
#---------------------[ apply masks ]-----------------------
line = MASK.mask(line)
#XXX from here, only block-inside lines will pass
#---------------------[ Quote ]-----------------------------
if regex['quote'].search(line):
# Store number of leading TABS
quotedepth = len(regex['quote'].search(line).group(0))
# SGML doesn't support nested quotes
if rules['quotenotnested']: quotedepth = 1
# Don't cross depth limit
maxdepth = rules['quotemaxdepth']
if maxdepth and quotedepth > maxdepth:
quotedepth = maxdepth
# New quote
if not BLOCK.isblock('quote'):
ret.extend(BLOCK.blockin('quote'))
# New subquotes
while BLOCK.depth < quotedepth:
BLOCK.blockin('quote')
# Closing quotes
while quotedepth < BLOCK.depth:
ret.extend(BLOCK.blockout())
# Bar inside quote
if regex['bar'].search(line) and rules['barinsidequote']:
tempBlock = BlockMaster()
tagged_bar = []
tagged_bar.extend(tempBlock.blockin('bar'))
tempBlock.holdadd(line)
tagged_bar.extend(tempBlock.blockout())
BLOCK.holdextend(tagged_bar)
continue
#---------------------[ Lists ]-----------------------------
# An empty item also closes the current list
if BLOCK.block().endswith('list'):
m = regex['listclose'].match(line)
if m:
listindent = m.group(1)
listtype = m.group(2)
currlisttype = BLOCK.prop('type')
currlistindent = BLOCK.prop('indent')
if listindent == currlistindent and \
listtype == currlisttype:
ret.extend(BLOCK.blockout())
continue
if regex['list'].search(line) or \
regex['numlist'].search(line) or \
regex['deflist'].search(line):
listindent = BLOCK.prop('indent')
listids = ''.join(LISTNAMES.keys())
m = re.match('^( *)([%s]) ' % re.escape(listids), line)
listitemindent = m.group(1)
listtype = m.group(2)
listname = LISTNAMES[listtype]
results_box = BLOCK.holdadd
# Del list ID (and separate term from definition)
if listname == 'deflist':
term = parse_deflist_term(line)
line = regex['deflist'].sub(
SEPARATOR+term+SEPARATOR,line)
else:
line = regex[listname].sub(SEPARATOR,line)
# Don't cross depth limit
maxdepth = rules['listmaxdepth']
if maxdepth and BLOCK.depth == maxdepth:
if len(listitemindent) > len(listindent):
listitemindent = listindent
# List bumping (same indent, diff mark)
# Close the currently open list to clear the mess
if BLOCK.block().endswith('list') \
and listname != BLOCK.block() \
and len(listitemindent) == len(listindent):
ret.extend(BLOCK.blockout())
listindent = BLOCK.prop('indent')
# Open mother list or sublist
if not BLOCK.block().endswith('list') or \
len(listitemindent) > len(listindent):
ret.extend(BLOCK.blockin(listname))
BLOCK.propset('indent',listitemindent)
BLOCK.propset('type',listtype)
# Closing sublists
while len(listitemindent) < len(BLOCK.prop('indent')):
ret.extend(BLOCK.blockout())
# O-oh, sublist before list ("\n\n - foo\n- foo")
# Fix: close sublist (as mother), open another list
if not BLOCK.block().endswith('list'):
ret.extend(BLOCK.blockin(listname))
BLOCK.propset('indent',listitemindent)
BLOCK.propset('type',listtype)
#---------------------[ Table ]-----------------------------
#TODO escape undesired format inside table
#TODO add pm6 target
if regex['table'].search(line):
if not BLOCK.isblock('table'): # first table line!
ret.extend(BLOCK.blockin('table'))
BLOCK.tableparser.__init__(line)
tablerow = TableMaster().parse_row(line)
BLOCK.tableparser.add_row(tablerow) # save config
# Maintain line to unmask and inlines
# XXX Bug: | **bo | ld** | turns **bo\x01ld** and gets converted :(
# TODO isolate unmask+inlines parsing to use here
line = SEPARATOR.join(tablerow['cells'])
#---------------------[ Paragraph ]-------------------------
if not BLOCK.block() and \
not line.count(MASK.tocmask): # new para!
ret.extend(BLOCK.blockin('para'))
############################################################
############################################################
############################################################
#---------------------[ Final Parses ]----------------------
# The target-specific special char escapes for body lines
line = doEscape(target,line)
line = add_inline_tags(line)
line = MASK.undo(line)
#---------------------[ Hold or Return? ]-------------------
### Now we must choose where to put the parsed line
#
if not results_box:
# List item extra lines
if BLOCK.block().endswith('list'):
results_box = BLOCK.holdaddsub
# Other blocks
elif BLOCK.block():
results_box = BLOCK.holdadd
# No blocks
else:
line = doFinalEscape(target, line)
results_box = ret.append
results_box(line)
# EOF: close any open para/verb/lists/table/quotes
Debug('EOF',7)
while BLOCK.block():
ret.extend(BLOCK.blockout())
# Maybe close some opened title area?
if rules['titleblocks']:
ret.extend(TITLE.close_all())
# Maybe a major tag to enclose body? (like DIV for CSS)
if TAGS['bodyOpen' ]: ret.insert(0, TAGS['bodyOpen'])
if TAGS['bodyClose']: ret.append(TAGS['bodyClose'])
if CONF['toc-only']: ret = []
marked_toc = TITLE.dump_marked_toc(CONF['toc-level'])
# If dump-source, all parsing is ignored
if CONF['dump-source']: ret = dump_source[:]
return ret, marked_toc
##############################################################################
################################### GUI ######################################
##############################################################################
#
# Tk help: http://python.org/topics/tkinter/
# Tuto: http://ibiblio.org/obp/py4fun/gui/tkPhone.html
# /usr/lib/python*/lib-tk/Tkinter.py
#
# grid table : row=0, column=0, columnspan=2, rowspan=2
# grid align : sticky='n,s,e,w' (North, South, East, West)
# pack place : side='top,bottom,right,left'
# pack fill : fill='x,y,both,none', expand=1
# pack align : anchor='n,s,e,w' (North, South, East, West)
# padding : padx=10, pady=10, ipadx=10, ipady=10 (internal)
# checkbox : offvalue is return if the _user_ deselected the box
# label align: justify=left,right,center
def load_GUI_resources():
"Load all extra modules and methods used by GUI"
global askopenfilename, showinfo, showwarning, showerror, Tkinter
from tkinter.filedialog import askopenfilename
from tkinter.messagebox import showinfo,showwarning,showerror
import tkinter
class Gui:
"Graphical Tk Interface"
def __init__(self, conf={}):
self.root = tkinter.Tk() # mother window, come to butthead
self.root.title(my_name) # window title bar text
self.window = self.root # variable "focus" for inclusion
self.row = 0 # row count for grid()
self.action_length = 150 # left column length (pixel)
self.frame_margin = 10 # frame margin size (pixel)
self.frame_border = 6 # frame border size (pixel)
# The default Gui colors, can be changed by %!guicolors
self.dft_gui_colors = ['#6c6','white','#cf9','#030']
self.gui_colors = []
self.bg1 = self.fg1 = self.bg2 = self.fg2 = ''
# On Tk, vars need to be set/get using setvar()/get()
self.infile = self.setvar('')
self.target = self.setvar('')
self.target_name = self.setvar('')
# The checks appearance order
self.checks = [
'headers', 'enum-title', 'toc', 'mask-email', 'toc-only', 'stdout'
]
# Creating variables for all checks
for check in self.checks:
setattr(self, 'f_'+check, self.setvar(''))
# Load RC config
self.conf = {}
if conf: self.load_config(conf)
def load_config(self, conf):
self.conf = conf
self.gui_colors = conf.get('guicolors') or self.dft_gui_colors
self.bg1, self.fg1, self.bg2, self.fg2 = self.gui_colors
self.root.config(bd=15,bg=self.bg1)
### Config as dic for python 1.5 compat (**opts don't work :( )
def entry(self, **opts): return tkinter.Entry(self.window, opts)
def label(self, txt='', bg=None, **opts):
opts.update({'text':txt,'bg':bg or self.bg1})
return tkinter.Label(self.window, opts)
def button(self,name,cmd,**opts):
opts.update({'text':name,'command':cmd})
return tkinter.Button(self.window, opts)
def check(self,name,checked=0,**opts):
bg, fg = self.bg2, self.fg2
opts.update({
'text':name,
'onvalue':1,
'offvalue':0,
'activeforeground':fg,
'activebackground':bg,
'highlightbackground':bg,
'fg':fg,
'bg':bg,
'anchor':'w'
})
chk = tkinter.Checkbutton(self.window, opts)
if checked: chk.select()
chk.grid(columnspan=2, sticky='w', padx=0)
def menu(self,sel,items):
return tkinter.OptionMenu(*(self.window,sel)+tuple(items))
# Handy auxiliary functions
def action(self, txt):
self.label(
txt,
fg=self.fg1,
bg=self.bg1,
wraplength=self.action_length).grid(column=0,row=self.row)
def frame_open(self):
self.window = tkinter.Frame(
self.root,
bg=self.bg2,
borderwidth=self.frame_border)
def frame_close(self):
self.window.grid(
column=1,
row=self.row,
sticky='w',
padx=self.frame_margin)
self.window = self.root
self.label('').grid()
self.row += 2 # update row count
def target_name2key(self):
name = self.target_name.get()
target = [x for x in TARGETS if TARGET_NAMES[x] == name]
try : key = target[0]
except: key = ''
self.target = self.setvar(key)
def target_key2name(self):
key = self.target.get()
name = TARGET_NAMES.get(key) or key
self.target_name = self.setvar(name)
def exit(self): self.root.destroy()
def setvar(self, val): z = tkinter.StringVar() ; z.set(val) ; return z
def askfile(self):
ftypes= [(_('txt2tags files'), ('*.t2t','*.txt')), (_('All files'),'*')]
newfile = askopenfilename(filetypes=ftypes)
if newfile:
self.infile.set(newfile)
newconf = process_source_file(newfile)[0]
newconf = ConfigMaster().sanity(newconf, gui=1)
# Restate all checkboxes after file selection
#TODO how to make a refresh without killing it?
self.root.destroy()
self.__init__(newconf)
self.mainwindow()
def scrollwindow(self, txt='no text!', title=''):
# Create components
win = tkinter.Toplevel() ; win.title(title)
frame = tkinter.Frame(win)
scroll = tkinter.Scrollbar(frame)
text = tkinter.Text(frame,yscrollcommand=scroll.set)
button = tkinter.Button(win)
# Config
text.insert(tkinter.END, '\n'.join(txt))
scroll.config(command=text.yview)
button.config(text=_('Close'), command=win.destroy)
button.focus_set()
# Packing
text.pack(side='left', fill='both', expand=1)
scroll.pack(side='right', fill='y')
frame.pack(fill='both', expand=1)
button.pack(ipadx=30)
def runprogram(self):
global CMDLINE_RAW
# Prepare
self.target_name2key()
infile, target = self.infile.get(), self.target.get()
# Sanity
if not target:
showwarning(my_name,_("You must select a target type!"))
return
if not infile:
showwarning(my_name,_("You must provide the source file location!"))
return
# Compose cmdline
guiflags = []
real_cmdline_conf = ConfigMaster(CMDLINE_RAW).parse()
if 'infile' in real_cmdline_conf:
del real_cmdline_conf['infile']
if 'target' in real_cmdline_conf:
del real_cmdline_conf['target']
real_cmdline = CommandLine().compose_cmdline(real_cmdline_conf)
default_outfile = ConfigMaster().get_outfile_name(
{'sourcefile':infile, 'outfile':'', 'target':target})
for opt in self.checks:
val = int(getattr(self, 'f_%s'%opt).get() or "0")
if opt == 'stdout': opt = 'outfile'
on_config = self.conf.get(opt) or 0
on_cmdline = real_cmdline_conf.get(opt) or 0
if opt == 'outfile':
if on_config == STDOUT: on_config = 1
else: on_config = 0
if on_cmdline == STDOUT: on_cmdline = 1
else: on_cmdline = 0
if val != on_config or (
val == on_config == on_cmdline and
opt in real_cmdline_conf):
if val:
# Was not set, but user selected on GUI
Debug("user turned ON: %s"%opt)
if opt == 'outfile': opt = '-o-'
else: opt = '--%s'%opt
else:
# Was set, but user deselected on GUI
Debug("user turned OFF: %s"%opt)
if opt == 'outfile':
opt = "-o%s"%default_outfile
else: opt = '--no-%s'%opt
guiflags.append(opt)
cmdline = [my_name, '-t', target] + real_cmdline + guiflags + [infile]
Debug('Gui/Tk cmdline: %s' % cmdline, 5)
# Run!
cmdline_raw_orig = CMDLINE_RAW
try:
# Fake the GUI cmdline as the real one, and parse file
CMDLINE_RAW = CommandLine().get_raw_config(cmdline[1:])
data = process_source_file(infile)
# On GUI, convert_* returns the data, not finish_him()
outlist, config = convert_this_files([data])
# On GUI and STDOUT, finish_him() returns the data
result = finish_him(outlist, config)
# Show outlist in s a nice new window
if result:
outlist, config = result
title = _('%s: %s converted to %s') % (
my_name,
os.path.basename(infile),
config['target'].upper())
self.scrollwindow(outlist, title)
# Show the "file saved" message
else:
msg = "%s\n\n %s\n%s\n\n %s\n%s"%(
_('Conversion done!'),
_('FROM:'), infile,
_('TO:'), config['outfile'])
showinfo(my_name, msg)
except error: # common error (windowed), not quit
pass
except: # fatal error (windowed and printed)
errormsg = getUnknownErrorMessage()
print(errormsg)
showerror(_('%s FATAL ERROR!')%my_name,errormsg)
self.exit()
CMDLINE_RAW = cmdline_raw_orig
def mainwindow(self):
self.infile.set(self.conf.get('sourcefile') or '')
self.target.set(self.conf.get('target') or _('-- select one --'))
outfile = self.conf.get('outfile')
if outfile == STDOUT: # map -o-
self.conf['stdout'] = 1
if self.conf.get('headers') == None:
self.conf['headers'] = 1 # map default
action1 = _("Enter the source file location:")
action2 = _("Choose the target document type:")
action3 = _("Some options you may check:")
action4 = _("Some extra options:")
checks_txt = {
'headers' : _("Include headers on output"),
'enum-title': _("Number titles (1, 1.1, 1.1.1, etc)"),
'toc' : _("Do TOC also (Table of Contents)"),
'mask-email': _("Hide e-mails from SPAM robots"),
'toc-only' : _("Just do TOC, nothing more"),
'stdout' : _("Dump to screen (Don't save target file)")
}
targets_menu = [TARGET_NAMES[x] for x in TARGETS]
# Header
self.label("%s %s"%(my_name.upper(), my_version),
bg=self.bg2, fg=self.fg2).grid(columnspan=2, ipadx=10)
self.label(_("ONE source, MULTI targets")+'\n%s\n'%my_url,
bg=self.bg1, fg=self.fg1).grid(columnspan=2)
self.row = 2
# Choose input file
self.action(action1) ; self.frame_open()
e_infile = self.entry(textvariable=self.infile,width=25)
e_infile.grid(row=self.row, column=0, sticky='e')
if not self.infile.get(): e_infile.focus_set()
self.button(_("Browse"), self.askfile).grid(
row=self.row, column=1, sticky='w', padx=10)
# Show outfile name, style and encoding (if any)
txt = ''
if outfile:
txt = outfile
if outfile == STDOUT: txt = _('<screen>')
l_output = self.label(_('Output: ')+txt, fg=self.fg2, bg=self.bg2)
l_output.grid(columnspan=2, sticky='w')
for setting in ['style','encoding']:
if self.conf.get(setting):
name = setting.capitalize()
val = self.conf[setting]
self.label('%s: %s'%(name, val),
fg=self.fg2, bg=self.bg2).grid(
columnspan=2, sticky='w')
# Choose target
self.frame_close() ; self.action(action2)
self.frame_open()
self.target_key2name()
self.menu(self.target_name, targets_menu).grid(
columnspan=2, sticky='w')
# Options checkboxes label
self.frame_close() ; self.action(action3)
self.frame_open()
# Compose options check boxes, example:
# self.check(checks_txt['toc'],1,variable=self.f_toc)
for check in self.checks:
# Extra options label
if check == 'toc-only':
self.frame_close() ; self.action(action4)
self.frame_open()
txt = checks_txt[check]
var = getattr(self, 'f_'+check)
checked = self.conf.get(check)
self.check(txt,checked,variable=var)
self.frame_close()
# Spacer and buttons
self.label('').grid() ; self.row += 1
b_quit = self.button(_("Quit"), self.exit)
b_quit.grid(row=self.row, column=0, sticky='w', padx=30)
b_conv = self.button(_("Convert!"), self.runprogram)
b_conv.grid(row=self.row, column=1, sticky='e', padx=30)
if self.target.get() and self.infile.get():
b_conv.focus_set()
# As documentation told me
if sys.platform.startswith('win'):
self.root.iconify()
self.root.update()
self.root.deiconify()
self.root.mainloop()
##############################################################################
##############################################################################
def exec_command_line(user_cmdline=[]):
global CMDLINE_RAW, RC_RAW, DEBUG, VERBOSE, QUIET, GUI, Error
# Extract command line data
cmdline_data = user_cmdline or sys.argv[1:]
CMDLINE_RAW = CommandLine().get_raw_config(cmdline_data, relative=1)
cmdline_parsed = ConfigMaster(CMDLINE_RAW).parse()
DEBUG = cmdline_parsed.get('debug' ) or 0
VERBOSE = cmdline_parsed.get('verbose') or 0
QUIET = cmdline_parsed.get('quiet' ) or 0
GUI = cmdline_parsed.get('gui' ) or 0
infiles = cmdline_parsed.get('infile' ) or []
Message(_("Txt2tags %s processing begins")%my_version,1)
# The easy ones
if cmdline_parsed.get('help' ): Quit(USAGE)
if cmdline_parsed.get('version'): Quit(VERSIONSTR)
if cmdline_parsed.get('targets'):
listTargets()
Quit()
# Multifile haters
if len(infiles) > 1:
errmsg=_("Option --%s can't be used with multiple input files")
for option in NO_MULTI_INPUT:
if cmdline_parsed.get(option):
Error(errmsg%option)
Debug("system platform: %s"%sys.platform)
Debug("python version: %s"%(sys.version.split('(')[0]))
Debug("line break char: %s"%repr(LB))
Debug("command line: %s"%sys.argv)
Debug("command line raw config: %s"%CMDLINE_RAW,1)
# Extract RC file config
if cmdline_parsed.get('rc') == 0:
Message(_("Ignoring user configuration file"),1)
else:
rc_file = get_rc_path()
if os.path.isfile(rc_file):
Message(_("Loading user configuration file"),1)
RC_RAW = ConfigLines(file_=rc_file).get_raw_config()
Debug("rc file: %s"%rc_file)
Debug("rc file raw config: %s"%RC_RAW,1)
# Get all infiles config (if any)
infiles_config = get_infiles_config(infiles)
# Is GUI available?
# Try to load and start GUI interface for --gui
if GUI:
try:
load_GUI_resources()
Debug("GUI resources OK (Tk module is installed)")
winbox = Gui()
Debug("GUI display OK")
GUI = 1
except:
Debug("GUI Error: no Tk module or no DISPLAY")
GUI = 0
# User forced --gui, but it's not available
if cmdline_parsed.get('gui') and not GUI:
print(getTraceback()); print()
Error(
"Sorry, I can't run my Graphical Interface - GUI\n"
"- Check if Python Tcl/Tk module is installed (Tkinter)\n"
"- Make sure you are in a graphical environment (like X)")
# Okay, we will use GUI
if GUI:
Message(_("We are on GUI interface"),1)
# Redefine Error function to raise exception instead sys.exit()
def Error(msg):
showerror(_('txt2tags ERROR!'), msg)
raise error
# If no input file, get RC+cmdline config, else full config
if not infiles:
gui_conf = ConfigMaster(RC_RAW+CMDLINE_RAW).parse()
else:
try : gui_conf = infiles_config[0][0]
except: gui_conf = {}
# Sanity is needed to set outfile and other things
gui_conf = ConfigMaster().sanity(gui_conf, gui=1)
Debug("GUI config: %s"%gui_conf,5)
# Insert config and populate the nice window!
winbox.load_config(gui_conf)
winbox.mainwindow()
# Console mode rocks forever!
else:
Message(_("We are on Command Line interface"),1)
# Called with no arguments, show error
# TODO#1: this checking should be only in ConfigMaster.sanity()
if not infiles:
Error(_('Missing input file (try --help)') + '\n\n' +
_('Please inform an input file (.t2t) at the end of the command.') + '\n' +
_('Example:') + ' %s -t html %s' % (my_name, _('file.t2t')))
convert_this_files(infiles_config)
Message(_("Txt2tags finished successfully"),1)
if __name__ == '__main__':
try:
exec_command_line()
except error as msg:
sys.stderr.write("%s\n"%msg)
sys.stderr.flush()
sys.exit(1)
except SystemExit:
pass
except:
sys.stderr.write(getUnknownErrorMessage())
sys.stderr.flush()
sys.exit(1)
Quit()
# The End.
| 236,923 | 38.4676 | 126 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/autodoc/external/__init__.py
| 0 | 0 | 0 |
py
|
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/buildbot/regression_test.py
|
import logging
from downward.reports.absolute import AbsoluteReport
class Check:
"""
Compare the attribute values x and y of two runs and check whether
*min_rel* <= y/x <= *max_rel*. Even if the check fails, only report the
failure if the absolute difference is greater than *ignored_abs_diff*.
"""
def __init__(self, attribute, min_rel=None, max_rel=None, ignored_abs_diff=0):
self.attribute = attribute
self.min_rel = min_rel
self.max_rel = max_rel
self.ignored_abs_diff = ignored_abs_diff
def get_error(self, base, new):
val1 = base.get(self.attribute)
val2 = new.get(self.attribute)
if val1 is None and val2 is None:
# Some configs don't produce certain attributes.
return ''
if val1 is None:
return 'Attribute %s missing for %s' % (self.attribute, base['config'])
if val2 is None:
return 'Attribute %s missing for %s' % (self.attribute, new['config'])
if abs(val2 - val1) <= self.ignored_abs_diff or val1 == 0:
return ''
factor = val2 / float(val1)
msg = self.attribute + ' | %(val2).2f / %(val1).2f = %(factor).2f' % locals()
if self.min_rel and factor < self.min_rel:
return msg + ' < %.2f' % self.min_rel
if self.max_rel and factor > self.max_rel:
return msg + ' > %.2f' % self.max_rel
return ''
class RegressionCheckReport(AbsoluteReport):
"""
Write a table with the regressions. If there are none, no table is generated
and therefore no output file is written.
"""
def __init__(self, baseline, checks, result_handler, **kwargs):
"""
*baseline* must be a global revision identifier.
*checks must be an iterable of Check instances.*
"""
AbsoluteReport.__init__(self, **kwargs)
self.baseline = baseline
self.checks = checks
self.result_handler = result_handler
def _is_baseline_run(self, run):
return run['global_revision'].startswith(self.baseline)
def get_markup(self):
lines = []
for (domain, problem), runs in self.problem_runs.items():
runs_base = [run for run in runs if self._is_baseline_run(run)]
runs_new = [run for run in runs if not self._is_baseline_run(run)]
assert len(runs_base) == len(runs_new), (len(runs_base), len(runs_new), self.baseline)
for base, new in zip(runs_base, runs_new):
algo = new['algorithm']
for check in self.checks:
error = check.get_error(base, new)
if error:
lines.append('| %(domain)s:%(problem)s '
'| %(algo)s '
'| %(error)s |' % locals())
if lines:
# Add header.
lines.insert(0, '|| Task | Config | Attribute | Error |')
return '\n'.join(lines)
def write(self):
AbsoluteReport.write(self)
markup = self.get_markup()
if markup:
print('There has been a regression:')
print()
print(markup)
success = not markup
self.result_handler(success)
| 3,281 | 37.162791 | 98 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/buildbot/buildbot-exp.py
|
#! /usr/bin/env python3
USAGE = """\
1) Use via buildbot:
The buildbot weekly and nightly tests use this script to check for
performance regressions. To update the baseline:
* change BASELINE variable below
* push the change
* login to http://buildbot.fast-downward.org
* Under Builds > Builders > recreate-baseline-worker-gcc8-lp select
"force-recreate-baseline". Make sure to "force" a new build instead
of "rebuilding" an existing build. Rebuilding will regenerate the
old baseline.
* Wait for the next nightly build or force a nightly build (do not
rebuild an old build).
You can find the experiment data on the Linux build slave in the
docker volume "buildbot-experiments".
2) Use as commandline tool:
Create baseline data
./buildbot-exp.py --test nightly --rev baseline --all
./buildbot-exp.py --test weekly --rev baseline --all
Compare the current revision to the baseline (these commands exit
with 1 if a regression was found):
./buildbot-exp.py --test nightly --all
./buildbot-exp.py --test weekly --all
You can adapt the experiment by changing the values for BASELINE,
CONFIGS, SUITES and RELATIVE_CHECKS below.
"""
import logging
import os
import shutil
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import cached_revision, tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from regression_test import Check, RegressionCheckReport
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.abspath(os.path.join(DIR, '../../'))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
DEFAULT_BASE_DIR = os.path.dirname(tools.get_script_path())
BASE_DIR = os.getenv("BUILDBOT_EXP_BASE_DIR", DEFAULT_BASE_DIR)
EXPERIMENTS_DIR = os.path.join(BASE_DIR, 'data')
REVISION_CACHE = os.path.join(BASE_DIR, 'revision-cache')
REGRESSIONS_DIR = os.path.join(BASE_DIR, 'regressions')
BASELINE = '0b4344f8f5a8'
CONFIGS = {}
CONFIGS['nightly'] = [
('lmcut', ['--search', 'astar(lmcut())']),
('lazy-greedy-ff', ['--evaluator', 'h=ff()', '--search', 'lazy_greedy([h], preferred=[h])']),
('lazy-greedy-cea', ['--evaluator', 'h=cea()', '--search', 'lazy_greedy([h], preferred=[h])']),
('lazy-greedy-ff-cea', ['--evaluator', 'hff=ff()', '--heuristic', 'hcea=cea()',
'--search', 'lazy_greedy([hff, hcea], preferred=[hff, hcea])']),
('blind', ['--search', 'astar(blind())']),
# TODO: Revert to optimal=true.
('lmcount-optimal', ['--search',
'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=false))']),
]
CONFIGS['weekly'] = CONFIGS['nightly']
SUITES = {
'nightly': ['gripper:prob01.pddl', 'miconic:s1-0.pddl'],
'weekly': ['gripper:prob01.pddl', 'miconic:s1-0.pddl'],
}
TRANSLATOR_ATTRIBUTES = [
'auxiliary_atoms', 'axioms', 'derived_variables',
'effect_conditions_simplified', 'facts', 'final_queue_length',
'implied_preconditions_added', 'mutex_groups',
'operators', 'operators_removed', 'propositions_removed',
'relevant_atoms', 'task_size', 'total_mutex_groups_size',
'total_queue_pushes', 'uncovered_facts', 'variables']
TIME_ATTRIBUTES = (
['search_time', 'total_time'] + ['translator_time_%s' % attr for attr in (
'building_dictionary_for_full_mutex_groups', 'building_mutex_information',
'building_strips_to_sas_dictionary', 'building_translation_key',
'checking_invariant_weight', 'choosing_groups', 'collecting_mutex_groups',
'completing_instantiation', 'computing_fact_groups', 'computing_model',
'detecting_unreachable_propositions', 'done', 'finding_invariants',
'generating_datalog_program', 'instantiating', 'instantiating_groups',
'normalizing_datalog_program', 'normalizing_task', 'parsing',
'preparing_model', 'processing_axioms', 'simplifying_axioms',
'translating_task', 'writing_output')])
SEARCH_ATTRIBUTES = ['dead_ends', 'evaluations', 'expansions',
'expansions_until_last_jump', 'generated', 'reopened']
MEMORY_ATTRIBUTES = ['translator_peak_memory', 'memory']
RELATIVE_CHECKS = ([
Check('initial_h_value', min_rel=1.0),
Check('cost', max_rel=1.0),
Check('plan_length', max_rel=1.0)] +
[Check('translator_%s' % attr, max_rel=1.0) for attr in TRANSLATOR_ATTRIBUTES] +
[Check(attr, max_rel=1.05, ignored_abs_diff=1) for attr in TIME_ATTRIBUTES] +
[Check(attr, max_rel=1.05) for attr in SEARCH_ATTRIBUTES] +
[Check(attr, max_rel=1.05, ignored_abs_diff=1024) for attr in MEMORY_ATTRIBUTES])
# Absolute attributes are reported, but not checked.
ABSOLUTE_ATTRIBUTES = [check.attribute for check in RELATIVE_CHECKS]
def parse_custom_args():
ARGPARSER.description = USAGE
ARGPARSER.add_argument('--rev', dest='revision', default='main',
help='Fast Downward revision or "baseline".')
ARGPARSER.add_argument('--test', choices=['nightly', 'weekly'], default='nightly',
help='Select whether "nightly" or "weekly" tests should be run.')
return ARGPARSER.parse_args()
def get_exp_dir(name, test):
return os.path.join(EXPERIMENTS_DIR, '%s-%s' % (name, test))
def regression_test_handler(test, rev, success):
if not success:
tools.makedirs(REGRESSIONS_DIR)
tarball = os.path.join(REGRESSIONS_DIR, "{test}-{rev}.tar.gz".format(**locals()))
subprocess.check_call(
["tar", "-czf", tarball, "-C", BASE_DIR, os.path.relpath(EXPERIMENTS_DIR, start=BASE_DIR)])
logging.error(
"Regression found. To inspect the experiment data for the failed regression test, run\n"
"sudo ./extract-regression-experiment.sh {test}-{rev}\n"
"in the ~/infrastructure/hosts/linux-buildbot-worker directory "
"on the Linux buildbot computer.".format(**locals()))
exp_dir = get_exp_dir(rev, test)
eval_dir = exp_dir + "-eval"
shutil.rmtree(exp_dir)
shutil.rmtree(eval_dir)
if not success:
sys.exit(1)
def main():
args = parse_custom_args()
if args.revision.lower() == 'baseline':
rev = BASELINE
name = 'baseline'
else:
rev = cached_revision.get_global_rev(REPO, rev=args.revision)
name = rev
exp = FastDownwardExperiment(path=get_exp_dir(name, args.test), revision_cache=REVISION_CACHE)
exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
for config_nick, config in CONFIGS[args.test]:
exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report')
# Only compare results if we are not running the baseline experiment.
if rev != BASELINE:
def result_handler(success):
regression_test_handler(args.test, rev, success)
exp.add_fetcher(
src=get_exp_dir('baseline', args.test) + '-eval',
dest=exp.eval_dir,
merge=True,
name='fetch-baseline-results')
exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison')
exp.add_report(
RegressionCheckReport(BASELINE, RELATIVE_CHECKS, result_handler),
name='regression-check')
exp.run_steps()
main()
| 7,557 | 39.202128 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/style/run-clang-tidy.py
|
#! /usr/bin/env python3
import json
import os
import pipes
import re
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
import utils
def check_search_code_with_clang_tidy():
# clang-tidy needs the CMake files.
build_dir = os.path.join(REPO, "builds", "clang-tidy")
if not os.path.exists(build_dir):
os.makedirs(build_dir)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(["cmake", "../../src"], cwd=build_dir, stdout=devnull)
# Create custom compilation database file. CMake outputs part of this information
# when passing -DCMAKE_EXPORT_COMPILE_COMMANDS=ON, but the resulting file
# contains no header files.
search_dir = os.path.join(REPO, "src/search")
src_files = utils.get_src_files(search_dir, (".h", ".cc"))
compile_commands = [{
"directory": os.path.join(build_dir, "search"),
"command": "g++ -I{}/ext -std=c++11 -c {}".format(search_dir, src_file),
"file": src_file}
for src_file in src_files
]
with open(os.path.join(build_dir, "compile_commands.json"), "w") as f:
json.dump(compile_commands, f, indent=2)
# See https://clang.llvm.org/extra/clang-tidy/checks/list.html for
# an explanation of the checks. We comment out inactive checks of some
# categories instead of deleting them to see which additional checks
# we could activate.
checks = [
# Enable with CheckTriviallyCopyableMove=0 when we require
# clang-tidy >= 6.0 (see issue856).
# "misc-move-const-arg",
"misc-move-constructor-init",
"misc-use-after-move",
"performance-for-range-copy",
"performance-implicit-cast-in-loop",
"performance-inefficient-vector-operation",
"readability-avoid-const-params-in-decls",
# "readability-braces-around-statements",
"readability-container-size-empty",
"readability-delete-null-pointer",
"readability-deleted-default",
# "readability-else-after-return",
# "readability-function-size",
# "readability-identifier-naming",
# "readability-implicit-bool-cast",
# Disabled since we prefer a clean interface over consistent names.
# "readability-inconsistent-declaration-parameter-name",
"readability-misleading-indentation",
"readability-misplaced-array-index",
# "readability-named-parameter",
# "readability-non-const-parameter",
"readability-redundant-control-flow",
"readability-redundant-declaration",
"readability-redundant-function-ptr-dereference",
"readability-redundant-member-init",
"readability-redundant-smartptr-get",
"readability-redundant-string-cstr",
"readability-redundant-string-init",
"readability-simplify-boolean-expr",
"readability-static-definition-in-anonymous-namespace",
"readability-uniqueptr-delete-release",
]
cmd = [
"run-clang-tidy-8",
"-quiet",
"-p", build_dir,
"-clang-tidy-binary=clang-tidy-8",
# Include all non-system headers (.*) except the ones from search/ext/.
"-header-filter=.*,-tree.hh,-tree_util.hh",
"-checks=-*," + ",".join(checks)]
print("Running clang-tidy: " + " ".join(pipes.quote(x) for x in cmd))
print()
try:
output = subprocess.check_output(cmd, cwd=DIR, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as err:
print("Failed to run clang-tidy-8. Is it on the PATH?")
print("Output:", err.stdout)
return False
errors = re.findall(r"^(.*:\d+:\d+: (?:warning|error): .*)$", output, flags=re.M)
for error in errors:
print(error)
if errors:
fix_cmd = cmd + [
"-clang-apply-replacements-binary=clang-apply-replacements-8", "-fix"]
print()
print("You may be able to fix these issues with the following command: " +
" ".join(pipes.quote(x) for x in fix_cmd))
sys.exit(1)
check_search_code_with_clang_tidy()
| 4,198 | 37.522936 | 96 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/style/check-cc-file.py
|
#! /usr/bin/env python3
import argparse
import re
import subprocess
import sys
STD_REGEX = r"(^|\s|\W)std::"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("cc_file", nargs="+")
return parser.parse_args()
def check_std(cc_file):
source_without_comments = subprocess.check_output(
["gcc", "-fpreprocessed", "-dD", "-E", cc_file]).decode("utf-8")
errors = []
for line in source_without_comments.splitlines():
if re.search(STD_REGEX, line):
errors.append("Remove std:: from {}: {}".format(
cc_file, line.strip()))
return errors
def main():
args = parse_args()
errors = []
for cc_file in args.cc_file:
errors.extend(check_std(cc_file))
for error in errors:
print(error)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| 881 | 19.511628 | 72 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/style/check-include-guard-convention.py
|
#! /usr/bin/env python3
import glob
import os.path
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
def check_header_files(component):
component_dir = os.path.join(SRC_DIR, component)
header_files = (glob.glob(os.path.join(component_dir, "*.h")) +
glob.glob(os.path.join(component_dir, "*", "*.h")))
assert header_files
errors = []
for filename in header_files:
assert filename.endswith(".h"), filename
rel_filename = os.path.relpath(filename, start=component_dir)
guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper()
expected = "#ifndef " + guard
for line in open(filename):
line = line.rstrip("\n")
if line.startswith("#ifndef"):
if line != expected:
errors.append('%s uses guard "%s" but should use "%s"' %
(filename, line, expected))
break
return errors
def main():
errors = []
errors.extend(check_header_files("search"))
for error in errors:
print(error)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| 1,278 | 27.422222 | 90 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/style/utils.py
|
import os
import os.path
def get_src_files(path, extensions, ignore_dirs=None):
ignore_dirs = ignore_dirs or []
src_files = []
for root, dirs, files in os.walk(path):
for ignore_dir in ignore_dirs:
if ignore_dir in dirs:
dirs.remove(ignore_dir)
src_files.extend([
os.path.join(root, file)
for file in files if file.endswith(extensions)])
return src_files
| 441 | 26.625 | 60 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/style/run-all-style-checks.py
|
#! /usr/bin/env python3
"""
Run syntax checks on Python and C++ files.
Exit with 0 if all tests pass and with 1 otherwise.
"""
import errno
import os
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
import utils
def check_python_style():
try:
subprocess.check_call([
"flake8",
# https://flake8.pycqa.org/en/latest/user/error-codes.html
"--extend-ignore", "E128,E129,E131,E261,E266,E301,E302,E305,E306,E402,E501,E741,F401",
"--exclude", "run-clang-tidy.py,txt2tags.py,.tox",
"src/translate/", "driver/", "misc/",
"build.py", "build_configs.py", "fast-downward.py"], cwd=REPO)
except FileNotFoundError:
sys.exit('Error: flake8 not found. Try "tox -e style".')
except subprocess.CalledProcessError:
return False
else:
return True
def check_include_guard_convention():
return subprocess.call("./check-include-guard-convention.py", cwd=DIR) == 0
def check_cc_files():
"""
Currently, we only check that there is no "std::" in .cc files.
"""
search_dir = os.path.join(SRC_DIR, "search")
cc_files = utils.get_src_files(search_dir, (".cc",))
print("Checking style of {} *.cc files".format(len(cc_files)))
return subprocess.call(["./check-cc-file.py"] + cc_files, cwd=DIR) == 0
def check_cplusplus_style():
return subprocess.call(["./run-uncrustify.py"], cwd=DIR) == 0
def main():
results = []
for test_name, test in sorted(globals().items()):
if test_name.startswith("check_"):
print("Running {}".format(test_name))
results.append(test())
if all(results):
print("All style checks passed")
else:
sys.exit("Style checks failed")
if __name__ == "__main__":
main()
| 1,904 | 25.830986 | 98 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/misc/style/run-uncrustify.py
|
#! /usr/bin/env python3
"""
Run uncrustify on all C++ files in the repository.
"""
import argparse
import os.path
import subprocess
import sys
import utils
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SEARCH_DIR = os.path.join(REPO, "src", "search")
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-m", "--modify", action="store_true",
help="modify the files that need to be uncrustified")
parser.add_argument(
"-f", "--force", action="store_true",
help="modify files even if there are uncommited changes")
return parser.parse_args()
def search_files_are_dirty():
if os.path.exists(os.path.join(REPO, ".git")):
cmd = ["git", "status", "--porcelain", SEARCH_DIR]
elif os.path.exists(os.path.join(REPO, ".hg")):
cmd = ["hg", "status", SEARCH_DIR]
else:
sys.exit("Error: repo must contain a .git or .hg directory.")
return bool(subprocess.check_output(cmd, cwd=REPO))
def main():
args = parse_args()
if not args.force and args.modify and search_files_are_dirty():
sys.exit(f"Error: {SEARCH_DIR} has uncommited changes.")
src_files = utils.get_src_files(SEARCH_DIR, (".h", ".cc"))
print(f"Checking {len(src_files)} files with uncrustify.")
config_file = os.path.join(REPO, ".uncrustify.cfg")
executable = "uncrustify"
cmd = [executable, "-q", "-c", config_file] + src_files
if args.modify:
cmd.append("--no-backup")
else:
cmd.append("--check")
try:
# Hide clean files printed on stdout.
returncode = subprocess.call(cmd, stdout=subprocess.PIPE)
except FileNotFoundError:
sys.exit(f"Error: {executable} not found. Is it on the PATH?")
if not args.modify and returncode != 0:
print('Run "tox -e fix-style" in the misc/ directory to fix the C++ style.')
return returncode
if __name__ == "__main__":
sys.exit(main())
| 2,016 | 30.030769 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue643/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
import suites
configs = [
IssueConfig(
"cegar-landmarks-10k",
["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]),
IssueConfig(
"cegar-landmarks-goals-900s",
["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"]),
]
revisions = ["issue643-base", "issue643-v1"]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="[email protected]",
)
exp.add_comparison_table_step()
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
| 1,151 | 27.097561 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue643/suites.py
|
# Benchmark suites from the Fast Downward benchmark collection.
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-agl14-adl',
'citycar-agl14-adl',
'maintenance-agl14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-agl14-strips',
'childsnack-agl14-strips',
'floortile-agl14-strips',
'ged-agl14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-agl14-strips',
'tetris-agl14-strips',
'thoughtful-agl14-strips',
'transport-agl14-strips',
'visitall-agl14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-mco14-adl',
'citycar-mco14-adl',
'maintenance-mco14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-mco14-strips',
'floortile-mco14-strips',
'ged-mco14-strips',
'hiking-mco14-strips',
'openstacks-mco14-strips',
'parking-mco14-strips',
'tetris-mco14-strips',
'thoughtful-mco14-strips',
'transport-mco14-strips',
'visitall-mco14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-opt14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-sat14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat())
def suite_unsolvable():
# TODO: Add other unsolvable problems (Miconic-FullADL).
# TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl'
# if the extra-domains branch is merged.
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_alternative_formulations())
| 7,695 | 23.35443 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue643/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, suite, revisions=[], configs={}, grid_priority=None,
path=None, test_suite=None, email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(os.path.join(repo, "benchmarks"), suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,481 | 33.963585 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue643/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue425/opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites, configs
from downward.reports.compare import CompareConfigsReport
import common_setup
REVISIONS = ["issue425-base", "issue425-v1"]
CONFIGS = configs.default_configs_optimal()
# remove config that is disabled in this branch
del CONFIGS['astar_selmax_lmcut_lmcount']
exp = common_setup.IssueExperiment(
search_revisions=REVISIONS,
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
limits={"search_time": 300}
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
def grouped_configs_to_compare(config_nicks):
grouped_configs = []
for config_nick in config_nicks:
col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS]
grouped_configs.append((col_names[0], col_names[1],
'Diff - %s' % config_nick))
return grouped_configs
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_optimal_core()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES,
),
outfile="issue425-opt-compare-core-configs.html"
)
def add_first_run_search_time(run):
if run.get("search_time_all", []):
run["first_run_search_time"] = run["search_time_all"][0]
return run
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_optimal_ipc()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"],
filter=add_first_run_search_time,
),
outfile="issue425-opt-compare-portfolio-configs.html"
)
exp()
| 1,764 | 32.301887 | 113 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue425/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (node.endswith("cluster.bc2.ch") or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
# TODO: Once we have reference results, we should add "quality".
# TODO: Add something about errors/exit codes.
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" % (rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for attribute in valid_attributes:
name = "-".join([rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,741 | 35.614943 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue425/sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites, configs
from downward.reports.compare import CompareConfigsReport
import common_setup
REVISIONS = ["issue425-base", "issue425-v1"]
exp = common_setup.IssueExperiment(
search_revisions=REVISIONS,
configs=configs.default_configs_satisficing(),
suite=suites.suite_satisficing_with_ipc11(),
limits={"search_time": 300}
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
def grouped_configs_to_compare(config_nicks):
grouped_configs = []
for config_nick in config_nicks:
col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS]
grouped_configs.append((col_names[0], col_names[1],
'Diff - %s' % config_nick))
return grouped_configs
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_satisficing_core()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES,
),
outfile="issue425-sat-compare-core-configs.html"
)
def add_first_run_search_time(run):
if run.get("search_time_all", []):
run["first_run_search_time"] = run["search_time_all"][0]
return run
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_satisficing_ipc()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"],
filter=add_first_run_search_time,
),
outfile="issue425-sat-compare-portfolio-configs.html"
)
exp()
| 1,671 | 33.122449 | 113 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue416/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup_no_benchmarks import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']),
IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']),
IssueConfig('astar-seq_opt_bjolp', ['--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true), mpd=true)']),
}
exp = IssueExperiment(
benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/",
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
nick = config.nick
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v2_memory_%s.png' % nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v2_total_time_%s.png' % nick
)
exp()
main(revisions=['issue416-v2-base', 'issue416-v2'])
| 1,806 | 31.854545 | 135 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue416/common_setup_no_benchmarks.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={},
grid_priority=None, path=None, test_suite=None,
email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(benchmarks_dir, suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,496 | 33.907821 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue416/v2-lama.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup_no_benchmarks import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_satisficing_with_ipc11()
configs = {
IssueConfig('seq_sat_lama_2011', [], driver_options=['--alias', 'seq-sat-lama-2011']),
IssueConfig('lama_first', [], driver_options=['--alias', 'lama-first']),
IssueConfig('ehc_lm_zhu', ['--search', 'ehc(lmcount(lm_zg()))']),
}
exp = IssueExperiment(
benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/",
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
nick = config.nick
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v2_memory_%s.png' % nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v2_total_time_%s.png' % nick
)
exp()
main(revisions=['issue416-v2-base', 'issue416-v2'])
| 1,722 | 30.907407 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue416/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']),
IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']),
IssueConfig('astar-seq_opt_bjolp', ['--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true), mpd=true)']),
}
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
nick = config.nick
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_memory_%s.png' % nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_total_time_%s.png' % nick
)
exp()
main(revisions=['issue416-base', 'issue416-v1'])
| 1,719 | 30.851852 | 135 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue416/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Wrapper for FastDownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, suite, revisions=[], configs={}, grid_priority=None,
path=None, test_suite=None, email=None, processes=1,
**kwargs):
"""Create a DownwardExperiment with some convenience features.
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(os.path.join(repo, "benchmarks"), suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,539 | 34.027933 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue416/v1-lama.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_satisficing_with_ipc11()
configs = {
IssueConfig('seq_sat_lama_2011', [], driver_options=['--alias', 'seq-sat-lama-2011']),
IssueConfig('lama_first', [], driver_options=['--alias', 'lama-first']),
IssueConfig('ehc_lm_zhu', ['--search', 'ehc(lmcount(lm_zg()))']),
}
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
nick = config.nick
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_memory_%s.png' % nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_total_time_%s.png' % nick
)
exp()
main(revisions=['issue416-base', 'issue416-v1'])
| 1,635 | 29.867925 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue416/relativescatter.py
|
# -*- coding: utf-8 -*-
#
# downward uses the lab package to conduct experiments with the
# Fast Downward planning system.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
import os
from lab import tools
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 4,690 | 35.937008 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue658/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not availabe, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']),
IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']),
IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']),
IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']),
IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']),
IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue658-base', 'issue658-v1'])
| 5,267 | 56.89011 | 293 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue658/v2-dfp.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not availabe, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']),
IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue658-base', 'issue658-v2'])
| 3,611 | 41.494118 | 259 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue658/suites.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import textwrap
HELP = "Convert suite name to list of domains or tasks."
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(set(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat()))
def suite_unsolvable():
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl() + suite_ipc14_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt() + suite_ipc14_opt_strips())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl() + suite_ipc14_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat() + suite_ipc14_sat_strips())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_ipc14() + suite_alternative_formulations())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("suite", help="suite name")
return parser.parse_args()
def main():
prefix = "suite_"
suite_names = [
name[len(prefix):] for name in sorted(globals().keys())
if name.startswith(prefix)]
parser = argparse.ArgumentParser(description=HELP)
parser.add_argument("suite", choices=suite_names, help="suite name")
parser.add_argument(
"--width", default=72, type=int,
help="output line width (default: %(default)s). Use 1 for single "
"column.")
args = parser.parse_args()
suite_func = globals()[prefix + args.suite]
print(textwrap.fill(
str(suite_func()),
width=args.width,
break_long_words=False,
break_on_hyphens=False))
if __name__ == "__main__":
main()
| 8,551 | 23.364672 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue658/ms-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int)
parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float)
parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int)
parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float)
def check_ms_constructed(content, props):
ms_construction_time = props.get('ms_construction_time')
abstraction_constructed = False
if ms_construction_time is not None:
abstraction_constructed = True
props['ms_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_ms_constructed)
def check_planner_exit_reason(content, props):
ms_abstraction_constructed = props.get('ms_abstraction_constructed')
error = props.get('error')
if error != 'none' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
# Check whether merge-and-shrink computation or search ran out of
# time or memory.
ms_out_of_time = False
ms_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if ms_abstraction_constructed == False:
if error == 'timeout':
ms_out_of_time = True
elif error == 'out-of-memory':
ms_out_of_memory = True
elif ms_abstraction_constructed == True:
if error == 'timeout':
search_out_of_time = True
elif error == 'out-of-memory':
search_out_of_memory = True
props['ms_out_of_time'] = ms_out_of_time
props['ms_out_of_memory'] = ms_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
def check_proved_unsolvability(content, props):
proved_unsolvability = False
if props['coverage'] == 0:
for line in content.splitlines():
if line == 'Completely explored state space -- no solution!':
proved_unsolvability = True
break
props['proved_unsolvability'] = proved_unsolvability
parser.add_function(check_proved_unsolvability)
parser.parse()
| 2,784 | 37.150685 | 135 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue658/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={},
grid_priority=None, path=None, test_suite=None,
email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(benchmarks_dir, suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,496 | 33.907821 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue658/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue915/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, geometric_mean
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue915-v1"]
BUILDS = ["debug"]
CONFIG_NICKS = [
('b50k-dfp-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']),
('b50k-rl-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']),
('b50k-sccs-dfp-t900', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']),
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('ms-parser.py')
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
ms_construction_time,
ms_atomic_construction_time,
ms_abstraction_constructed,
ms_atomic_fts_constructed,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_absolute_report_step(attributes=attributes)
exp.run_steps()
| 4,038 | 41.968085 | 479 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue915/ms-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float)
parser.add_pattern('ms_atomic_construction_time', 'M&S algorithm timer: (.+)s \(after computation of atomic factors\)', required=False, type=float)
parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink algorithm: (\d+) KB', required=False, type=int)
def check_ms_constructed(content, props):
ms_construction_time = props.get('ms_construction_time')
abstraction_constructed = False
if ms_construction_time is not None:
abstraction_constructed = True
props['ms_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_ms_constructed)
def check_atomic_fts_constructed(content, props):
ms_atomic_construction_time = props.get('ms_atomic_construction_time')
ms_atomic_fts_constructed = False
if ms_atomic_construction_time is not None:
ms_atomic_fts_constructed = True
props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed
parser.add_function(check_atomic_fts_constructed)
def check_planner_exit_reason(content, props):
ms_abstraction_constructed = props.get('ms_abstraction_constructed')
error = props.get('error')
if error != 'success' and error != 'search-out-of-time' and error != 'search-out-of-memory':
print 'error: %s' % error
return
# Check whether merge-and-shrink computation or search ran out of
# time or memory.
ms_out_of_time = False
ms_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if ms_abstraction_constructed == False:
if error == 'search-out-of-time':
ms_out_of_time = True
elif error == 'search-out-of-memory':
ms_out_of_memory = True
elif ms_abstraction_constructed == True:
if error == 'search-out-of-time':
search_out_of_time = True
elif error == 'search-out-of-memory':
search_out_of_memory = True
search_out_of_memory = True
props['ms_out_of_time'] = ms_out_of_time
props['ms_out_of_memory'] = ms_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
parser.parse()
| 2,775 | 38.098592 | 147 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue915/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print("Make scatter plot for", name)
algo1 = get_algo_nick(rev1, config_nick)
algo2 = get_algo_nick(rev2, config_nick)
report = report_class(
filter_algorithm=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"])
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,744 | 36.423858 | 82 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue915/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue870/base-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue870-base"]
BUILDS = ["release64", "release64dynamic"]
CONFIG_NICKS = [
("blind", ["--search", "astar(blind())"]),
("lmcut", ["--search", "astar(lmcut())"]),
#("seq", ["--search", "astar(operatorcounting([state_equation_constraints()]))"]),
]
CONFIGS = [
IssueConfig(
config_nick + ":" + build,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parse_again_step()
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{nick}:{build1}".format(**locals()),
"{rev}-{nick}:{build2}".format(**locals()),
"Diff ({rev}-{nick})".format(**locals()))
for build1, build2 in itertools.combinations(BUILDS, 2)
for nick, config in CONFIG_NICKS]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue839-opt-static-vs-dynamic")
exp.run_steps()
| 2,330 | 27.777778 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue870/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,786 | 36.435443 | 82 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue870/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue870/v1-seq.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILDS_AND_REVISIONS = [("release64", "issue870-base"), ("release64dynamic", "issue870-v1")]
CONFIG_NICKS = [
("seq", ["--search", "astar(operatorcounting([state_equation_constraints()]))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=[],
configs=[],
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
for build, rev in BUILDS_AND_REVISIONS:
for config_nick, config in CONFIG_NICKS:
exp.add_algorithm(
":".join([config_nick, build, rev]),
common_setup.get_repo_base(),
rev,
config,
build_options=[build],
driver_options=["--build", build])
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
algorithm_pairs = [
("seq:release64:issue870-base",
"seq:release64dynamic:issue870-v1",
"Diff (seq)")
]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue870-seq-static-vs-dynamic")
exp.run_steps()
| 2,087 | 28 | 92 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue660/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite = [
'assembly',
'miconic-fulladl',
'openstacks',
'openstacks-sat08-adl',
'optical-telegraphs',
'philosophers',
'psr-large',
'psr-middle',
'trucks',
]
configs = {
IssueConfig('lazy-greedy-ff', [
'--heuristic',
'h=ff()',
'--search',
'lazy_greedy(h, preferred=h)'
]),
IssueConfig('lama-first', [],
driver_options=['--alias', 'lama-first']
),
IssueConfig('eager_greedy_cg', [
'--heuristic',
'h=cg()',
'--search',
'eager_greedy(h, preferred=h)'
]),
IssueConfig('eager_greedy_cea', [
'--heuristic',
'h=cea()',
'--search',
'eager_greedy(h, preferred=h)'
]),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue660-v2-base', 'issue660-v2'])
| 2,211 | 26.308642 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue660/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_satisficing()
configs = {
IssueConfig('lazy-greedy-ff', [
'--heuristic',
'h=ff()',
'--search',
'lazy_greedy(h, preferred=h)'
]),
IssueConfig('lama-first', [],
driver_options=['--alias', 'lama-first']
),
IssueConfig('eager_greedy_cg', [
'--heuristic',
'h=cg()',
'--search',
'eager_greedy(h, preferred=h)'
]),
IssueConfig('eager_greedy_cea', [
'--heuristic',
'h=cea()',
'--search',
'eager_greedy(h, preferred=h)'
]),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step(
filter_domain=[
'assembly',
'miconic-fulladl',
'openstacks',
'openstacks-sat08-adl',
'optical-telegraphs',
'philosophers',
'psr-large',
'psr-middle',
'trucks',
],
)
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue660-base', 'issue660-v1'])
| 2,301 | 26.73494 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue660/suites.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import textwrap
HELP = "Convert suite name to list of domains or tasks."
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(set(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat()))
def suite_unsolvable():
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl() + suite_ipc14_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt() + suite_ipc14_opt_strips())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl() + suite_ipc14_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat() + suite_ipc14_sat_strips())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_ipc14() + suite_alternative_formulations())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("suite", help="suite name")
return parser.parse_args()
def main():
prefix = "suite_"
suite_names = [
name[len(prefix):] for name in sorted(globals().keys())
if name.startswith(prefix)]
parser = argparse.ArgumentParser(description=HELP)
parser.add_argument("suite", choices=suite_names, help="suite name")
parser.add_argument(
"--width", default=72, type=int,
help="output line width (default: %(default)s). Use 1 for single "
"column.")
args = parser.parse_args()
suite_func = globals()[prefix + args.suite]
print(textwrap.fill(
str(suite_func()),
width=args.width,
break_long_words=False,
break_on_hyphens=False))
if __name__ == "__main__":
main()
| 8,551 | 23.364672 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue660/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={},
grid_priority=None, path=None, test_suite=None,
email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(benchmarks_dir, suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,496 | 33.907821 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue660/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue602/v1-agl.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
SUITE_AGL14 = [
'barman-agl14-strips',
'cavediving-agl14-adl',
'childsnack-agl14-strips',
'citycar-agl14-adl',
'floortile-agl14-strips',
'ged-agl14-strips',
'hiking-agl14-strips',
'maintenance-agl14-adl',
'openstacks-agl14-strips',
'parking-agl14-strips',
'tetris-agl14-strips',
'thoughtful-agl14-strips',
'transport-agl14-strips',
'visitall-agl14-strips',
]
def main(revisions=None):
suite = SUITE_AGL14
configs = [
IssueConfig("astar_goalcount", [
"--search",
"astar(goalcount)"]),
IssueConfig("eager_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cea", [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("seq_sat_lama_2011", [], driver_options=[
"--alias", "seq-sat-lama-2011"]),
IssueConfig("seq_sat_fdss_1", [], driver_options=[
"--alias", "seq-sat-fdss-1"]),
IssueConfig("seq_sat_fdss_2", [], driver_options=[
"--alias", "seq-sat-fdss-2"]),
]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=[
#'cavediving-sat14-adl:testing01_easy.pddl',
#'childsnack-sat14-strips:child-snack_pfile05.pddl',
#'citycar-sat14-adl:p3-2-2-0-1.pddl',
#'ged-sat14-strips:d-3-6.pddl',
'hiking-sat14-strips:ptesting-1-2-7.pddl',
#'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl',
#'tetris-sat14-strips:p020.pddl',
#'thoughtful-sat14-strips:bootstrap-typed-01.pddl',
#'transport-sat14-strips:p01.pddl',
],
processes=4,
email='[email protected]',
)
exp.add_absolute_report_step()
exp()
main(revisions=['issue602-v1'])
| 2,976 | 28.77 | 72 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue602/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
SUITE_SAT14 = [
'barman-sat14-strips',
'cavediving-sat14-adl',
'childsnack-sat14-strips',
'citycar-sat14-adl',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'maintenance-sat14-adl',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def main(revisions=None):
suite = SUITE_SAT14
configs = [
IssueConfig("astar_goalcount", [
"--search",
"astar(goalcount)"]),
IssueConfig("eager_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cea", [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("seq_sat_lama_2011", [], driver_options=[
"--alias", "seq-sat-lama-2011"]),
IssueConfig("seq_sat_fdss_1", [], driver_options=[
"--alias", "seq-sat-fdss-1"]),
IssueConfig("seq_sat_fdss_2", [], driver_options=[
"--alias", "seq-sat-fdss-2"]),
]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=[
#'cavediving-sat14-adl:testing01_easy.pddl',
#'childsnack-sat14-strips:child-snack_pfile05.pddl',
#'citycar-sat14-adl:p3-2-2-0-1.pddl',
#'ged-sat14-strips:d-3-6.pddl',
'hiking-sat14-strips:ptesting-1-2-7.pddl',
#'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl',
#'tetris-sat14-strips:p020.pddl',
#'thoughtful-sat14-strips:bootstrap-typed-01.pddl',
#'transport-sat14-strips:p01.pddl',
],
processes=4,
email='[email protected]',
)
exp.add_absolute_report_step()
exp()
main(revisions=['issue602-v1'])
| 2,976 | 28.77 | 72 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue602/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Wrapper for FastDownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions, configs, suite, grid_priority=None,
path=None, test_suite=None, email=None, processes=1,
**kwargs):
"""Create a DownwardExperiment with some convenience features.
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(os.path.join(repo, "benchmarks"), suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,533 | 34.011173 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue602/v1-mco.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
SUITE_MCO14 = [
'barman-mco14-strips',
'cavediving-mco14-adl',
'childsnack-mco14-strips',
'citycar-mco14-adl',
'floortile-mco14-strips',
'ged-mco14-strips',
'hiking-mco14-strips',
'maintenance-mco14-adl',
'openstacks-mco14-strips',
'parking-mco14-strips',
'tetris-mco14-strips',
'thoughtful-mco14-strips',
'transport-mco14-strips',
'visitall-mco14-strips',
]
def main(revisions=None):
suite = SUITE_MCO14
configs = [
IssueConfig("astar_goalcount", [
"--search",
"astar(goalcount)"]),
IssueConfig("eager_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cea", [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("seq_sat_lama_2011", [], driver_options=[
"--alias", "seq-sat-lama-2011"]),
IssueConfig("seq_sat_fdss_1", [], driver_options=[
"--alias", "seq-sat-fdss-1"]),
IssueConfig("seq_sat_fdss_2", [], driver_options=[
"--alias", "seq-sat-fdss-2"]),
]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=[
#'cavediving-sat14-adl:testing01_easy.pddl',
#'childsnack-sat14-strips:child-snack_pfile05.pddl',
#'citycar-sat14-adl:p3-2-2-0-1.pddl',
#'ged-sat14-strips:d-3-6.pddl',
'hiking-sat14-strips:ptesting-1-2-7.pddl',
#'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl',
#'tetris-sat14-strips:p020.pddl',
#'thoughtful-sat14-strips:bootstrap-typed-01.pddl',
#'transport-sat14-strips:p01.pddl',
],
processes=4,
email='[email protected]',
)
exp.add_absolute_report_step()
exp()
main(revisions=['issue602-v1'])
| 2,976 | 28.77 | 72 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue837/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue837-base", "issue837-v1"]
BUILDS = ["debug64"]
SEARCHES = [
("bjolp", [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]),
("blind", ["--search", "astar(blind())"]),
("cegar", ["--search", "astar(cegar())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("lmcut", ["--search", "astar(lmcut())"]),
("mas",
["--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"]),
("occ", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]),
("blind-sss-simple", ["--search", "astar(blind(), pruning=stubborn_sets_simple())"]),
("blind-sss-ec", ["--search", "astar(blind(), pruning=stubborn_sets_ec())"]),
("h2", ["--search", "astar(hm(m=2))"]),
("hmax", ["--search", "astar(hmax())"]),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
search,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,820 | 33.402439 | 106 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue837/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue837-base", "issue837-v1"]
BUILDS = ["debug64"]
CONFIG_DICT = {
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h])"],
"eager_greedy_cea": [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h])"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lazy_greedy_cg": [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lama-first": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
"ff-typed": [
"--heuristic", "hff=ff()",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
" type_based([hff, g()])], boost=1000),"
" preferred=[hff], cost_type=one)"],
}
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,747 | 28.548387 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue837/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pipesworld-notankage',
'pipesworld-tankage', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage',
'tetris-opt14-strips', 'tidybot-opt11-strips',
'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips',
'transport-opt11-strips', 'transport-opt14-strips',
'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,203 | 35.893506 | 82 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue837/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue752/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue752-v2"]
CONFIGS = [
IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]),
IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex))"]),
IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]),
IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]),
IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex))"]),
IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(partition="infai_2", email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]:
exp.add_report(RelativeScatterPlotReport(
attributes=["total_time"],
filter_algorithm=["issue752-v2-%s-%s" % (nick, solver) for solver in ["cplex", "soplex"]],
get_category=lambda r1, r2: r1["domain"]),
outfile="issue752-v2-scatter-total-time-%s.png" % nick)
exp.run_steps()
| 2,310 | 40.267857 | 156 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.